##// END OF EJS Templates
hg: make `clean` return consistent with the `update` function...
marmoute -
r47495:af753524 default
parent child Browse files
Show More
@@ -1,1533 +1,1534 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import os
12 import os
13 import shutil
13 import shutil
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import (
17 from .node import (
18 hex,
18 hex,
19 nullhex,
19 nullhex,
20 nullid,
20 nullid,
21 short,
21 short,
22 )
22 )
23 from .pycompat import getattr
23 from .pycompat import getattr
24
24
25 from . import (
25 from . import (
26 bookmarks,
26 bookmarks,
27 bundlerepo,
27 bundlerepo,
28 cacheutil,
28 cacheutil,
29 cmdutil,
29 cmdutil,
30 destutil,
30 destutil,
31 discovery,
31 discovery,
32 error,
32 error,
33 exchange,
33 exchange,
34 extensions,
34 extensions,
35 httppeer,
35 httppeer,
36 localrepo,
36 localrepo,
37 lock,
37 lock,
38 logcmdutil,
38 logcmdutil,
39 logexchange,
39 logexchange,
40 merge as mergemod,
40 merge as mergemod,
41 mergestate as mergestatemod,
41 mergestate as mergestatemod,
42 narrowspec,
42 narrowspec,
43 phases,
43 phases,
44 pycompat,
44 pycompat,
45 requirements,
45 requirements,
46 scmutil,
46 scmutil,
47 sshpeer,
47 sshpeer,
48 statichttprepo,
48 statichttprepo,
49 ui as uimod,
49 ui as uimod,
50 unionrepo,
50 unionrepo,
51 url,
51 url,
52 util,
52 util,
53 verify as verifymod,
53 verify as verifymod,
54 vfs as vfsmod,
54 vfs as vfsmod,
55 )
55 )
56 from .utils import hashutil
56 from .utils import hashutil
57
57
58 release = lock.release
58 release = lock.release
59
59
60 # shared features
60 # shared features
61 sharedbookmarks = b'bookmarks'
61 sharedbookmarks = b'bookmarks'
62
62
63
63
64 def _local(path):
64 def _local(path):
65 path = util.expandpath(util.urllocalpath(path))
65 path = util.expandpath(util.urllocalpath(path))
66
66
67 try:
67 try:
68 # we use os.stat() directly here instead of os.path.isfile()
68 # we use os.stat() directly here instead of os.path.isfile()
69 # because the latter started returning `False` on invalid path
69 # because the latter started returning `False` on invalid path
70 # exceptions starting in 3.8 and we care about handling
70 # exceptions starting in 3.8 and we care about handling
71 # invalid paths specially here.
71 # invalid paths specially here.
72 st = os.stat(path)
72 st = os.stat(path)
73 isfile = stat.S_ISREG(st.st_mode)
73 isfile = stat.S_ISREG(st.st_mode)
74 # Python 2 raises TypeError, Python 3 ValueError.
74 # Python 2 raises TypeError, Python 3 ValueError.
75 except (TypeError, ValueError) as e:
75 except (TypeError, ValueError) as e:
76 raise error.Abort(
76 raise error.Abort(
77 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
77 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
78 )
78 )
79 except OSError:
79 except OSError:
80 isfile = False
80 isfile = False
81
81
82 return isfile and bundlerepo or localrepo
82 return isfile and bundlerepo or localrepo
83
83
84
84
85 def addbranchrevs(lrepo, other, branches, revs):
85 def addbranchrevs(lrepo, other, branches, revs):
86 peer = other.peer() # a courtesy to callers using a localrepo for other
86 peer = other.peer() # a courtesy to callers using a localrepo for other
87 hashbranch, branches = branches
87 hashbranch, branches = branches
88 if not hashbranch and not branches:
88 if not hashbranch and not branches:
89 x = revs or None
89 x = revs or None
90 if revs:
90 if revs:
91 y = revs[0]
91 y = revs[0]
92 else:
92 else:
93 y = None
93 y = None
94 return x, y
94 return x, y
95 if revs:
95 if revs:
96 revs = list(revs)
96 revs = list(revs)
97 else:
97 else:
98 revs = []
98 revs = []
99
99
100 if not peer.capable(b'branchmap'):
100 if not peer.capable(b'branchmap'):
101 if branches:
101 if branches:
102 raise error.Abort(_(b"remote branch lookup not supported"))
102 raise error.Abort(_(b"remote branch lookup not supported"))
103 revs.append(hashbranch)
103 revs.append(hashbranch)
104 return revs, revs[0]
104 return revs, revs[0]
105
105
106 with peer.commandexecutor() as e:
106 with peer.commandexecutor() as e:
107 branchmap = e.callcommand(b'branchmap', {}).result()
107 branchmap = e.callcommand(b'branchmap', {}).result()
108
108
109 def primary(branch):
109 def primary(branch):
110 if branch == b'.':
110 if branch == b'.':
111 if not lrepo:
111 if not lrepo:
112 raise error.Abort(_(b"dirstate branch not accessible"))
112 raise error.Abort(_(b"dirstate branch not accessible"))
113 branch = lrepo.dirstate.branch()
113 branch = lrepo.dirstate.branch()
114 if branch in branchmap:
114 if branch in branchmap:
115 revs.extend(hex(r) for r in reversed(branchmap[branch]))
115 revs.extend(hex(r) for r in reversed(branchmap[branch]))
116 return True
116 return True
117 else:
117 else:
118 return False
118 return False
119
119
120 for branch in branches:
120 for branch in branches:
121 if not primary(branch):
121 if not primary(branch):
122 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
122 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
123 if hashbranch:
123 if hashbranch:
124 if not primary(hashbranch):
124 if not primary(hashbranch):
125 revs.append(hashbranch)
125 revs.append(hashbranch)
126 return revs, revs[0]
126 return revs, revs[0]
127
127
128
128
129 def parseurl(path, branches=None):
129 def parseurl(path, branches=None):
130 '''parse url#branch, returning (url, (branch, branches))'''
130 '''parse url#branch, returning (url, (branch, branches))'''
131
131
132 u = util.url(path)
132 u = util.url(path)
133 branch = None
133 branch = None
134 if u.fragment:
134 if u.fragment:
135 branch = u.fragment
135 branch = u.fragment
136 u.fragment = None
136 u.fragment = None
137 return bytes(u), (branch, branches or [])
137 return bytes(u), (branch, branches or [])
138
138
139
139
140 schemes = {
140 schemes = {
141 b'bundle': bundlerepo,
141 b'bundle': bundlerepo,
142 b'union': unionrepo,
142 b'union': unionrepo,
143 b'file': _local,
143 b'file': _local,
144 b'http': httppeer,
144 b'http': httppeer,
145 b'https': httppeer,
145 b'https': httppeer,
146 b'ssh': sshpeer,
146 b'ssh': sshpeer,
147 b'static-http': statichttprepo,
147 b'static-http': statichttprepo,
148 }
148 }
149
149
150
150
151 def _peerlookup(path):
151 def _peerlookup(path):
152 u = util.url(path)
152 u = util.url(path)
153 scheme = u.scheme or b'file'
153 scheme = u.scheme or b'file'
154 thing = schemes.get(scheme) or schemes[b'file']
154 thing = schemes.get(scheme) or schemes[b'file']
155 try:
155 try:
156 return thing(path)
156 return thing(path)
157 except TypeError:
157 except TypeError:
158 # we can't test callable(thing) because 'thing' can be an unloaded
158 # we can't test callable(thing) because 'thing' can be an unloaded
159 # module that implements __call__
159 # module that implements __call__
160 if not util.safehasattr(thing, b'instance'):
160 if not util.safehasattr(thing, b'instance'):
161 raise
161 raise
162 return thing
162 return thing
163
163
164
164
165 def islocal(repo):
165 def islocal(repo):
166 '''return true if repo (or path pointing to repo) is local'''
166 '''return true if repo (or path pointing to repo) is local'''
167 if isinstance(repo, bytes):
167 if isinstance(repo, bytes):
168 try:
168 try:
169 return _peerlookup(repo).islocal(repo)
169 return _peerlookup(repo).islocal(repo)
170 except AttributeError:
170 except AttributeError:
171 return False
171 return False
172 return repo.local()
172 return repo.local()
173
173
174
174
175 def openpath(ui, path, sendaccept=True):
175 def openpath(ui, path, sendaccept=True):
176 '''open path with open if local, url.open if remote'''
176 '''open path with open if local, url.open if remote'''
177 pathurl = util.url(path, parsequery=False, parsefragment=False)
177 pathurl = util.url(path, parsequery=False, parsefragment=False)
178 if pathurl.islocal():
178 if pathurl.islocal():
179 return util.posixfile(pathurl.localpath(), b'rb')
179 return util.posixfile(pathurl.localpath(), b'rb')
180 else:
180 else:
181 return url.open(ui, path, sendaccept=sendaccept)
181 return url.open(ui, path, sendaccept=sendaccept)
182
182
183
183
184 # a list of (ui, repo) functions called for wire peer initialization
184 # a list of (ui, repo) functions called for wire peer initialization
185 wirepeersetupfuncs = []
185 wirepeersetupfuncs = []
186
186
187
187
188 def _peerorrepo(
188 def _peerorrepo(
189 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
189 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
190 ):
190 ):
191 """return a repository object for the specified path"""
191 """return a repository object for the specified path"""
192 obj = _peerlookup(path).instance(
192 obj = _peerlookup(path).instance(
193 ui, path, create, intents=intents, createopts=createopts
193 ui, path, create, intents=intents, createopts=createopts
194 )
194 )
195 ui = getattr(obj, "ui", ui)
195 ui = getattr(obj, "ui", ui)
196 for f in presetupfuncs or []:
196 for f in presetupfuncs or []:
197 f(ui, obj)
197 f(ui, obj)
198 ui.log(b'extension', b'- executing reposetup hooks\n')
198 ui.log(b'extension', b'- executing reposetup hooks\n')
199 with util.timedcm('all reposetup') as allreposetupstats:
199 with util.timedcm('all reposetup') as allreposetupstats:
200 for name, module in extensions.extensions(ui):
200 for name, module in extensions.extensions(ui):
201 ui.log(b'extension', b' - running reposetup for %s\n', name)
201 ui.log(b'extension', b' - running reposetup for %s\n', name)
202 hook = getattr(module, 'reposetup', None)
202 hook = getattr(module, 'reposetup', None)
203 if hook:
203 if hook:
204 with util.timedcm('reposetup %r', name) as stats:
204 with util.timedcm('reposetup %r', name) as stats:
205 hook(ui, obj)
205 hook(ui, obj)
206 ui.log(
206 ui.log(
207 b'extension', b' > reposetup for %s took %s\n', name, stats
207 b'extension', b' > reposetup for %s took %s\n', name, stats
208 )
208 )
209 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
209 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
210 if not obj.local():
210 if not obj.local():
211 for f in wirepeersetupfuncs:
211 for f in wirepeersetupfuncs:
212 f(ui, obj)
212 f(ui, obj)
213 return obj
213 return obj
214
214
215
215
216 def repository(
216 def repository(
217 ui,
217 ui,
218 path=b'',
218 path=b'',
219 create=False,
219 create=False,
220 presetupfuncs=None,
220 presetupfuncs=None,
221 intents=None,
221 intents=None,
222 createopts=None,
222 createopts=None,
223 ):
223 ):
224 """return a repository object for the specified path"""
224 """return a repository object for the specified path"""
225 peer = _peerorrepo(
225 peer = _peerorrepo(
226 ui,
226 ui,
227 path,
227 path,
228 create,
228 create,
229 presetupfuncs=presetupfuncs,
229 presetupfuncs=presetupfuncs,
230 intents=intents,
230 intents=intents,
231 createopts=createopts,
231 createopts=createopts,
232 )
232 )
233 repo = peer.local()
233 repo = peer.local()
234 if not repo:
234 if not repo:
235 raise error.Abort(
235 raise error.Abort(
236 _(b"repository '%s' is not local") % (path or peer.url())
236 _(b"repository '%s' is not local") % (path or peer.url())
237 )
237 )
238 return repo.filtered(b'visible')
238 return repo.filtered(b'visible')
239
239
240
240
241 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
241 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
242 '''return a repository peer for the specified path'''
242 '''return a repository peer for the specified path'''
243 rui = remoteui(uiorrepo, opts)
243 rui = remoteui(uiorrepo, opts)
244 return _peerorrepo(
244 return _peerorrepo(
245 rui, path, create, intents=intents, createopts=createopts
245 rui, path, create, intents=intents, createopts=createopts
246 ).peer()
246 ).peer()
247
247
248
248
249 def defaultdest(source):
249 def defaultdest(source):
250 """return default destination of clone if none is given
250 """return default destination of clone if none is given
251
251
252 >>> defaultdest(b'foo')
252 >>> defaultdest(b'foo')
253 'foo'
253 'foo'
254 >>> defaultdest(b'/foo/bar')
254 >>> defaultdest(b'/foo/bar')
255 'bar'
255 'bar'
256 >>> defaultdest(b'/')
256 >>> defaultdest(b'/')
257 ''
257 ''
258 >>> defaultdest(b'')
258 >>> defaultdest(b'')
259 ''
259 ''
260 >>> defaultdest(b'http://example.org/')
260 >>> defaultdest(b'http://example.org/')
261 ''
261 ''
262 >>> defaultdest(b'http://example.org/foo/')
262 >>> defaultdest(b'http://example.org/foo/')
263 'foo'
263 'foo'
264 """
264 """
265 path = util.url(source).path
265 path = util.url(source).path
266 if not path:
266 if not path:
267 return b''
267 return b''
268 return os.path.basename(os.path.normpath(path))
268 return os.path.basename(os.path.normpath(path))
269
269
270
270
271 def sharedreposource(repo):
271 def sharedreposource(repo):
272 """Returns repository object for source repository of a shared repo.
272 """Returns repository object for source repository of a shared repo.
273
273
274 If repo is not a shared repository, returns None.
274 If repo is not a shared repository, returns None.
275 """
275 """
276 if repo.sharedpath == repo.path:
276 if repo.sharedpath == repo.path:
277 return None
277 return None
278
278
279 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
279 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
280 return repo.srcrepo
280 return repo.srcrepo
281
281
282 # the sharedpath always ends in the .hg; we want the path to the repo
282 # the sharedpath always ends in the .hg; we want the path to the repo
283 source = repo.vfs.split(repo.sharedpath)[0]
283 source = repo.vfs.split(repo.sharedpath)[0]
284 srcurl, branches = parseurl(source)
284 srcurl, branches = parseurl(source)
285 srcrepo = repository(repo.ui, srcurl)
285 srcrepo = repository(repo.ui, srcurl)
286 repo.srcrepo = srcrepo
286 repo.srcrepo = srcrepo
287 return srcrepo
287 return srcrepo
288
288
289
289
290 def share(
290 def share(
291 ui,
291 ui,
292 source,
292 source,
293 dest=None,
293 dest=None,
294 update=True,
294 update=True,
295 bookmarks=True,
295 bookmarks=True,
296 defaultpath=None,
296 defaultpath=None,
297 relative=False,
297 relative=False,
298 ):
298 ):
299 '''create a shared repository'''
299 '''create a shared repository'''
300
300
301 if not islocal(source):
301 if not islocal(source):
302 raise error.Abort(_(b'can only share local repositories'))
302 raise error.Abort(_(b'can only share local repositories'))
303
303
304 if not dest:
304 if not dest:
305 dest = defaultdest(source)
305 dest = defaultdest(source)
306 else:
306 else:
307 dest = ui.expandpath(dest)
307 dest = ui.expandpath(dest)
308
308
309 if isinstance(source, bytes):
309 if isinstance(source, bytes):
310 origsource = ui.expandpath(source)
310 origsource = ui.expandpath(source)
311 source, branches = parseurl(origsource)
311 source, branches = parseurl(origsource)
312 srcrepo = repository(ui, source)
312 srcrepo = repository(ui, source)
313 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
313 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
314 else:
314 else:
315 srcrepo = source.local()
315 srcrepo = source.local()
316 checkout = None
316 checkout = None
317
317
318 shareditems = set()
318 shareditems = set()
319 if bookmarks:
319 if bookmarks:
320 shareditems.add(sharedbookmarks)
320 shareditems.add(sharedbookmarks)
321
321
322 r = repository(
322 r = repository(
323 ui,
323 ui,
324 dest,
324 dest,
325 create=True,
325 create=True,
326 createopts={
326 createopts={
327 b'sharedrepo': srcrepo,
327 b'sharedrepo': srcrepo,
328 b'sharedrelative': relative,
328 b'sharedrelative': relative,
329 b'shareditems': shareditems,
329 b'shareditems': shareditems,
330 },
330 },
331 )
331 )
332
332
333 postshare(srcrepo, r, defaultpath=defaultpath)
333 postshare(srcrepo, r, defaultpath=defaultpath)
334 r = repository(ui, dest)
334 r = repository(ui, dest)
335 _postshareupdate(r, update, checkout=checkout)
335 _postshareupdate(r, update, checkout=checkout)
336 return r
336 return r
337
337
338
338
339 def _prependsourcehgrc(repo):
339 def _prependsourcehgrc(repo):
340 """copies the source repo config and prepend it in current repo .hg/hgrc
340 """copies the source repo config and prepend it in current repo .hg/hgrc
341 on unshare. This is only done if the share was perfomed using share safe
341 on unshare. This is only done if the share was perfomed using share safe
342 method where we share config of source in shares"""
342 method where we share config of source in shares"""
343 srcvfs = vfsmod.vfs(repo.sharedpath)
343 srcvfs = vfsmod.vfs(repo.sharedpath)
344 dstvfs = vfsmod.vfs(repo.path)
344 dstvfs = vfsmod.vfs(repo.path)
345
345
346 if not srcvfs.exists(b'hgrc'):
346 if not srcvfs.exists(b'hgrc'):
347 return
347 return
348
348
349 currentconfig = b''
349 currentconfig = b''
350 if dstvfs.exists(b'hgrc'):
350 if dstvfs.exists(b'hgrc'):
351 currentconfig = dstvfs.read(b'hgrc')
351 currentconfig = dstvfs.read(b'hgrc')
352
352
353 with dstvfs(b'hgrc', b'wb') as fp:
353 with dstvfs(b'hgrc', b'wb') as fp:
354 sourceconfig = srcvfs.read(b'hgrc')
354 sourceconfig = srcvfs.read(b'hgrc')
355 fp.write(b"# Config copied from shared source\n")
355 fp.write(b"# Config copied from shared source\n")
356 fp.write(sourceconfig)
356 fp.write(sourceconfig)
357 fp.write(b'\n')
357 fp.write(b'\n')
358 fp.write(currentconfig)
358 fp.write(currentconfig)
359
359
360
360
361 def unshare(ui, repo):
361 def unshare(ui, repo):
362 """convert a shared repository to a normal one
362 """convert a shared repository to a normal one
363
363
364 Copy the store data to the repo and remove the sharedpath data.
364 Copy the store data to the repo and remove the sharedpath data.
365
365
366 Returns a new repository object representing the unshared repository.
366 Returns a new repository object representing the unshared repository.
367
367
368 The passed repository object is not usable after this function is
368 The passed repository object is not usable after this function is
369 called.
369 called.
370 """
370 """
371
371
372 with repo.lock():
372 with repo.lock():
373 # we use locks here because if we race with commit, we
373 # we use locks here because if we race with commit, we
374 # can end up with extra data in the cloned revlogs that's
374 # can end up with extra data in the cloned revlogs that's
375 # not pointed to by changesets, thus causing verify to
375 # not pointed to by changesets, thus causing verify to
376 # fail
376 # fail
377 destlock = copystore(ui, repo, repo.path)
377 destlock = copystore(ui, repo, repo.path)
378 with destlock or util.nullcontextmanager():
378 with destlock or util.nullcontextmanager():
379 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
379 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
380 # we were sharing .hg/hgrc of the share source with the current
380 # we were sharing .hg/hgrc of the share source with the current
381 # repo. We need to copy that while unsharing otherwise it can
381 # repo. We need to copy that while unsharing otherwise it can
382 # disable hooks and other checks
382 # disable hooks and other checks
383 _prependsourcehgrc(repo)
383 _prependsourcehgrc(repo)
384
384
385 sharefile = repo.vfs.join(b'sharedpath')
385 sharefile = repo.vfs.join(b'sharedpath')
386 util.rename(sharefile, sharefile + b'.old')
386 util.rename(sharefile, sharefile + b'.old')
387
387
388 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
388 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
389 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
389 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
390 scmutil.writereporequirements(repo)
390 scmutil.writereporequirements(repo)
391
391
392 # Removing share changes some fundamental properties of the repo instance.
392 # Removing share changes some fundamental properties of the repo instance.
393 # So we instantiate a new repo object and operate on it rather than
393 # So we instantiate a new repo object and operate on it rather than
394 # try to keep the existing repo usable.
394 # try to keep the existing repo usable.
395 newrepo = repository(repo.baseui, repo.root, create=False)
395 newrepo = repository(repo.baseui, repo.root, create=False)
396
396
397 # TODO: figure out how to access subrepos that exist, but were previously
397 # TODO: figure out how to access subrepos that exist, but were previously
398 # removed from .hgsub
398 # removed from .hgsub
399 c = newrepo[b'.']
399 c = newrepo[b'.']
400 subs = c.substate
400 subs = c.substate
401 for s in sorted(subs):
401 for s in sorted(subs):
402 c.sub(s).unshare()
402 c.sub(s).unshare()
403
403
404 localrepo.poisonrepository(repo)
404 localrepo.poisonrepository(repo)
405
405
406 return newrepo
406 return newrepo
407
407
408
408
409 def postshare(sourcerepo, destrepo, defaultpath=None):
409 def postshare(sourcerepo, destrepo, defaultpath=None):
410 """Called after a new shared repo is created.
410 """Called after a new shared repo is created.
411
411
412 The new repo only has a requirements file and pointer to the source.
412 The new repo only has a requirements file and pointer to the source.
413 This function configures additional shared data.
413 This function configures additional shared data.
414
414
415 Extensions can wrap this function and write additional entries to
415 Extensions can wrap this function and write additional entries to
416 destrepo/.hg/shared to indicate additional pieces of data to be shared.
416 destrepo/.hg/shared to indicate additional pieces of data to be shared.
417 """
417 """
418 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
418 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
419 if default:
419 if default:
420 template = b'[paths]\ndefault = %s\n'
420 template = b'[paths]\ndefault = %s\n'
421 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
421 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
422 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
422 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
423 with destrepo.wlock():
423 with destrepo.wlock():
424 narrowspec.copytoworkingcopy(destrepo)
424 narrowspec.copytoworkingcopy(destrepo)
425
425
426
426
427 def _postshareupdate(repo, update, checkout=None):
427 def _postshareupdate(repo, update, checkout=None):
428 """Maybe perform a working directory update after a shared repo is created.
428 """Maybe perform a working directory update after a shared repo is created.
429
429
430 ``update`` can be a boolean or a revision to update to.
430 ``update`` can be a boolean or a revision to update to.
431 """
431 """
432 if not update:
432 if not update:
433 return
433 return
434
434
435 repo.ui.status(_(b"updating working directory\n"))
435 repo.ui.status(_(b"updating working directory\n"))
436 if update is not True:
436 if update is not True:
437 checkout = update
437 checkout = update
438 for test in (checkout, b'default', b'tip'):
438 for test in (checkout, b'default', b'tip'):
439 if test is None:
439 if test is None:
440 continue
440 continue
441 try:
441 try:
442 uprev = repo.lookup(test)
442 uprev = repo.lookup(test)
443 break
443 break
444 except error.RepoLookupError:
444 except error.RepoLookupError:
445 continue
445 continue
446 _update(repo, uprev)
446 _update(repo, uprev)
447
447
448
448
449 def copystore(ui, srcrepo, destpath):
449 def copystore(ui, srcrepo, destpath):
450 """copy files from store of srcrepo in destpath
450 """copy files from store of srcrepo in destpath
451
451
452 returns destlock
452 returns destlock
453 """
453 """
454 destlock = None
454 destlock = None
455 try:
455 try:
456 hardlink = None
456 hardlink = None
457 topic = _(b'linking') if hardlink else _(b'copying')
457 topic = _(b'linking') if hardlink else _(b'copying')
458 with ui.makeprogress(topic, unit=_(b'files')) as progress:
458 with ui.makeprogress(topic, unit=_(b'files')) as progress:
459 num = 0
459 num = 0
460 srcpublishing = srcrepo.publishing()
460 srcpublishing = srcrepo.publishing()
461 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
461 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
462 dstvfs = vfsmod.vfs(destpath)
462 dstvfs = vfsmod.vfs(destpath)
463 for f in srcrepo.store.copylist():
463 for f in srcrepo.store.copylist():
464 if srcpublishing and f.endswith(b'phaseroots'):
464 if srcpublishing and f.endswith(b'phaseroots'):
465 continue
465 continue
466 dstbase = os.path.dirname(f)
466 dstbase = os.path.dirname(f)
467 if dstbase and not dstvfs.exists(dstbase):
467 if dstbase and not dstvfs.exists(dstbase):
468 dstvfs.mkdir(dstbase)
468 dstvfs.mkdir(dstbase)
469 if srcvfs.exists(f):
469 if srcvfs.exists(f):
470 if f.endswith(b'data'):
470 if f.endswith(b'data'):
471 # 'dstbase' may be empty (e.g. revlog format 0)
471 # 'dstbase' may be empty (e.g. revlog format 0)
472 lockfile = os.path.join(dstbase, b"lock")
472 lockfile = os.path.join(dstbase, b"lock")
473 # lock to avoid premature writing to the target
473 # lock to avoid premature writing to the target
474 destlock = lock.lock(dstvfs, lockfile)
474 destlock = lock.lock(dstvfs, lockfile)
475 hardlink, n = util.copyfiles(
475 hardlink, n = util.copyfiles(
476 srcvfs.join(f), dstvfs.join(f), hardlink, progress
476 srcvfs.join(f), dstvfs.join(f), hardlink, progress
477 )
477 )
478 num += n
478 num += n
479 if hardlink:
479 if hardlink:
480 ui.debug(b"linked %d files\n" % num)
480 ui.debug(b"linked %d files\n" % num)
481 else:
481 else:
482 ui.debug(b"copied %d files\n" % num)
482 ui.debug(b"copied %d files\n" % num)
483 return destlock
483 return destlock
484 except: # re-raises
484 except: # re-raises
485 release(destlock)
485 release(destlock)
486 raise
486 raise
487
487
488
488
489 def clonewithshare(
489 def clonewithshare(
490 ui,
490 ui,
491 peeropts,
491 peeropts,
492 sharepath,
492 sharepath,
493 source,
493 source,
494 srcpeer,
494 srcpeer,
495 dest,
495 dest,
496 pull=False,
496 pull=False,
497 rev=None,
497 rev=None,
498 update=True,
498 update=True,
499 stream=False,
499 stream=False,
500 ):
500 ):
501 """Perform a clone using a shared repo.
501 """Perform a clone using a shared repo.
502
502
503 The store for the repository will be located at <sharepath>/.hg. The
503 The store for the repository will be located at <sharepath>/.hg. The
504 specified revisions will be cloned or pulled from "source". A shared repo
504 specified revisions will be cloned or pulled from "source". A shared repo
505 will be created at "dest" and a working copy will be created if "update" is
505 will be created at "dest" and a working copy will be created if "update" is
506 True.
506 True.
507 """
507 """
508 revs = None
508 revs = None
509 if rev:
509 if rev:
510 if not srcpeer.capable(b'lookup'):
510 if not srcpeer.capable(b'lookup'):
511 raise error.Abort(
511 raise error.Abort(
512 _(
512 _(
513 b"src repository does not support "
513 b"src repository does not support "
514 b"revision lookup and so doesn't "
514 b"revision lookup and so doesn't "
515 b"support clone by revision"
515 b"support clone by revision"
516 )
516 )
517 )
517 )
518
518
519 # TODO this is batchable.
519 # TODO this is batchable.
520 remoterevs = []
520 remoterevs = []
521 for r in rev:
521 for r in rev:
522 with srcpeer.commandexecutor() as e:
522 with srcpeer.commandexecutor() as e:
523 remoterevs.append(
523 remoterevs.append(
524 e.callcommand(
524 e.callcommand(
525 b'lookup',
525 b'lookup',
526 {
526 {
527 b'key': r,
527 b'key': r,
528 },
528 },
529 ).result()
529 ).result()
530 )
530 )
531 revs = remoterevs
531 revs = remoterevs
532
532
533 # Obtain a lock before checking for or cloning the pooled repo otherwise
533 # Obtain a lock before checking for or cloning the pooled repo otherwise
534 # 2 clients may race creating or populating it.
534 # 2 clients may race creating or populating it.
535 pooldir = os.path.dirname(sharepath)
535 pooldir = os.path.dirname(sharepath)
536 # lock class requires the directory to exist.
536 # lock class requires the directory to exist.
537 try:
537 try:
538 util.makedir(pooldir, False)
538 util.makedir(pooldir, False)
539 except OSError as e:
539 except OSError as e:
540 if e.errno != errno.EEXIST:
540 if e.errno != errno.EEXIST:
541 raise
541 raise
542
542
543 poolvfs = vfsmod.vfs(pooldir)
543 poolvfs = vfsmod.vfs(pooldir)
544 basename = os.path.basename(sharepath)
544 basename = os.path.basename(sharepath)
545
545
546 with lock.lock(poolvfs, b'%s.lock' % basename):
546 with lock.lock(poolvfs, b'%s.lock' % basename):
547 if os.path.exists(sharepath):
547 if os.path.exists(sharepath):
548 ui.status(
548 ui.status(
549 _(b'(sharing from existing pooled repository %s)\n') % basename
549 _(b'(sharing from existing pooled repository %s)\n') % basename
550 )
550 )
551 else:
551 else:
552 ui.status(
552 ui.status(
553 _(b'(sharing from new pooled repository %s)\n') % basename
553 _(b'(sharing from new pooled repository %s)\n') % basename
554 )
554 )
555 # Always use pull mode because hardlinks in share mode don't work
555 # Always use pull mode because hardlinks in share mode don't work
556 # well. Never update because working copies aren't necessary in
556 # well. Never update because working copies aren't necessary in
557 # share mode.
557 # share mode.
558 clone(
558 clone(
559 ui,
559 ui,
560 peeropts,
560 peeropts,
561 source,
561 source,
562 dest=sharepath,
562 dest=sharepath,
563 pull=True,
563 pull=True,
564 revs=rev,
564 revs=rev,
565 update=False,
565 update=False,
566 stream=stream,
566 stream=stream,
567 )
567 )
568
568
569 # Resolve the value to put in [paths] section for the source.
569 # Resolve the value to put in [paths] section for the source.
570 if islocal(source):
570 if islocal(source):
571 defaultpath = os.path.abspath(util.urllocalpath(source))
571 defaultpath = os.path.abspath(util.urllocalpath(source))
572 else:
572 else:
573 defaultpath = source
573 defaultpath = source
574
574
575 sharerepo = repository(ui, path=sharepath)
575 sharerepo = repository(ui, path=sharepath)
576 destrepo = share(
576 destrepo = share(
577 ui,
577 ui,
578 sharerepo,
578 sharerepo,
579 dest=dest,
579 dest=dest,
580 update=False,
580 update=False,
581 bookmarks=False,
581 bookmarks=False,
582 defaultpath=defaultpath,
582 defaultpath=defaultpath,
583 )
583 )
584
584
585 # We need to perform a pull against the dest repo to fetch bookmarks
585 # We need to perform a pull against the dest repo to fetch bookmarks
586 # and other non-store data that isn't shared by default. In the case of
586 # and other non-store data that isn't shared by default. In the case of
587 # non-existing shared repo, this means we pull from the remote twice. This
587 # non-existing shared repo, this means we pull from the remote twice. This
588 # is a bit weird. But at the time it was implemented, there wasn't an easy
588 # is a bit weird. But at the time it was implemented, there wasn't an easy
589 # way to pull just non-changegroup data.
589 # way to pull just non-changegroup data.
590 exchange.pull(destrepo, srcpeer, heads=revs)
590 exchange.pull(destrepo, srcpeer, heads=revs)
591
591
592 _postshareupdate(destrepo, update)
592 _postshareupdate(destrepo, update)
593
593
594 return srcpeer, peer(ui, peeropts, dest)
594 return srcpeer, peer(ui, peeropts, dest)
595
595
596
596
597 # Recomputing caches is often slow on big repos, so copy them.
597 # Recomputing caches is often slow on big repos, so copy them.
598 def _copycache(srcrepo, dstcachedir, fname):
598 def _copycache(srcrepo, dstcachedir, fname):
599 """copy a cache from srcrepo to destcachedir (if it exists)"""
599 """copy a cache from srcrepo to destcachedir (if it exists)"""
600 srcfname = srcrepo.cachevfs.join(fname)
600 srcfname = srcrepo.cachevfs.join(fname)
601 dstfname = os.path.join(dstcachedir, fname)
601 dstfname = os.path.join(dstcachedir, fname)
602 if os.path.exists(srcfname):
602 if os.path.exists(srcfname):
603 if not os.path.exists(dstcachedir):
603 if not os.path.exists(dstcachedir):
604 os.mkdir(dstcachedir)
604 os.mkdir(dstcachedir)
605 util.copyfile(srcfname, dstfname)
605 util.copyfile(srcfname, dstfname)
606
606
607
607
608 def clone(
608 def clone(
609 ui,
609 ui,
610 peeropts,
610 peeropts,
611 source,
611 source,
612 dest=None,
612 dest=None,
613 pull=False,
613 pull=False,
614 revs=None,
614 revs=None,
615 update=True,
615 update=True,
616 stream=False,
616 stream=False,
617 branch=None,
617 branch=None,
618 shareopts=None,
618 shareopts=None,
619 storeincludepats=None,
619 storeincludepats=None,
620 storeexcludepats=None,
620 storeexcludepats=None,
621 depth=None,
621 depth=None,
622 ):
622 ):
623 """Make a copy of an existing repository.
623 """Make a copy of an existing repository.
624
624
625 Create a copy of an existing repository in a new directory. The
625 Create a copy of an existing repository in a new directory. The
626 source and destination are URLs, as passed to the repository
626 source and destination are URLs, as passed to the repository
627 function. Returns a pair of repository peers, the source and
627 function. Returns a pair of repository peers, the source and
628 newly created destination.
628 newly created destination.
629
629
630 The location of the source is added to the new repository's
630 The location of the source is added to the new repository's
631 .hg/hgrc file, as the default to be used for future pulls and
631 .hg/hgrc file, as the default to be used for future pulls and
632 pushes.
632 pushes.
633
633
634 If an exception is raised, the partly cloned/updated destination
634 If an exception is raised, the partly cloned/updated destination
635 repository will be deleted.
635 repository will be deleted.
636
636
637 Arguments:
637 Arguments:
638
638
639 source: repository object or URL
639 source: repository object or URL
640
640
641 dest: URL of destination repository to create (defaults to base
641 dest: URL of destination repository to create (defaults to base
642 name of source repository)
642 name of source repository)
643
643
644 pull: always pull from source repository, even in local case or if the
644 pull: always pull from source repository, even in local case or if the
645 server prefers streaming
645 server prefers streaming
646
646
647 stream: stream raw data uncompressed from repository (fast over
647 stream: stream raw data uncompressed from repository (fast over
648 LAN, slow over WAN)
648 LAN, slow over WAN)
649
649
650 revs: revision to clone up to (implies pull=True)
650 revs: revision to clone up to (implies pull=True)
651
651
652 update: update working directory after clone completes, if
652 update: update working directory after clone completes, if
653 destination is local repository (True means update to default rev,
653 destination is local repository (True means update to default rev,
654 anything else is treated as a revision)
654 anything else is treated as a revision)
655
655
656 branch: branches to clone
656 branch: branches to clone
657
657
658 shareopts: dict of options to control auto sharing behavior. The "pool" key
658 shareopts: dict of options to control auto sharing behavior. The "pool" key
659 activates auto sharing mode and defines the directory for stores. The
659 activates auto sharing mode and defines the directory for stores. The
660 "mode" key determines how to construct the directory name of the shared
660 "mode" key determines how to construct the directory name of the shared
661 repository. "identity" means the name is derived from the node of the first
661 repository. "identity" means the name is derived from the node of the first
662 changeset in the repository. "remote" means the name is derived from the
662 changeset in the repository. "remote" means the name is derived from the
663 remote's path/URL. Defaults to "identity."
663 remote's path/URL. Defaults to "identity."
664
664
665 storeincludepats and storeexcludepats: sets of file patterns to include and
665 storeincludepats and storeexcludepats: sets of file patterns to include and
666 exclude in the repository copy, respectively. If not defined, all files
666 exclude in the repository copy, respectively. If not defined, all files
667 will be included (a "full" clone). Otherwise a "narrow" clone containing
667 will be included (a "full" clone). Otherwise a "narrow" clone containing
668 only the requested files will be performed. If ``storeincludepats`` is not
668 only the requested files will be performed. If ``storeincludepats`` is not
669 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
669 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
670 ``path:.``. If both are empty sets, no files will be cloned.
670 ``path:.``. If both are empty sets, no files will be cloned.
671 """
671 """
672
672
673 if isinstance(source, bytes):
673 if isinstance(source, bytes):
674 origsource = ui.expandpath(source)
674 origsource = ui.expandpath(source)
675 source, branches = parseurl(origsource, branch)
675 source, branches = parseurl(origsource, branch)
676 srcpeer = peer(ui, peeropts, source)
676 srcpeer = peer(ui, peeropts, source)
677 else:
677 else:
678 srcpeer = source.peer() # in case we were called with a localrepo
678 srcpeer = source.peer() # in case we were called with a localrepo
679 branches = (None, branch or [])
679 branches = (None, branch or [])
680 origsource = source = srcpeer.url()
680 origsource = source = srcpeer.url()
681 srclock = destlock = cleandir = None
681 srclock = destlock = cleandir = None
682 destpeer = None
682 destpeer = None
683 try:
683 try:
684 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
684 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
685
685
686 if dest is None:
686 if dest is None:
687 dest = defaultdest(source)
687 dest = defaultdest(source)
688 if dest:
688 if dest:
689 ui.status(_(b"destination directory: %s\n") % dest)
689 ui.status(_(b"destination directory: %s\n") % dest)
690 else:
690 else:
691 dest = ui.expandpath(dest)
691 dest = ui.expandpath(dest)
692
692
693 dest = util.urllocalpath(dest)
693 dest = util.urllocalpath(dest)
694 source = util.urllocalpath(source)
694 source = util.urllocalpath(source)
695
695
696 if not dest:
696 if not dest:
697 raise error.InputError(_(b"empty destination path is not valid"))
697 raise error.InputError(_(b"empty destination path is not valid"))
698
698
699 destvfs = vfsmod.vfs(dest, expandpath=True)
699 destvfs = vfsmod.vfs(dest, expandpath=True)
700 if destvfs.lexists():
700 if destvfs.lexists():
701 if not destvfs.isdir():
701 if not destvfs.isdir():
702 raise error.InputError(
702 raise error.InputError(
703 _(b"destination '%s' already exists") % dest
703 _(b"destination '%s' already exists") % dest
704 )
704 )
705 elif destvfs.listdir():
705 elif destvfs.listdir():
706 raise error.InputError(
706 raise error.InputError(
707 _(b"destination '%s' is not empty") % dest
707 _(b"destination '%s' is not empty") % dest
708 )
708 )
709
709
710 createopts = {}
710 createopts = {}
711 narrow = False
711 narrow = False
712
712
713 if storeincludepats is not None:
713 if storeincludepats is not None:
714 narrowspec.validatepatterns(storeincludepats)
714 narrowspec.validatepatterns(storeincludepats)
715 narrow = True
715 narrow = True
716
716
717 if storeexcludepats is not None:
717 if storeexcludepats is not None:
718 narrowspec.validatepatterns(storeexcludepats)
718 narrowspec.validatepatterns(storeexcludepats)
719 narrow = True
719 narrow = True
720
720
721 if narrow:
721 if narrow:
722 # Include everything by default if only exclusion patterns defined.
722 # Include everything by default if only exclusion patterns defined.
723 if storeexcludepats and not storeincludepats:
723 if storeexcludepats and not storeincludepats:
724 storeincludepats = {b'path:.'}
724 storeincludepats = {b'path:.'}
725
725
726 createopts[b'narrowfiles'] = True
726 createopts[b'narrowfiles'] = True
727
727
728 if depth:
728 if depth:
729 createopts[b'shallowfilestore'] = True
729 createopts[b'shallowfilestore'] = True
730
730
731 if srcpeer.capable(b'lfs-serve'):
731 if srcpeer.capable(b'lfs-serve'):
732 # Repository creation honors the config if it disabled the extension, so
732 # Repository creation honors the config if it disabled the extension, so
733 # we can't just announce that lfs will be enabled. This check avoids
733 # we can't just announce that lfs will be enabled. This check avoids
734 # saying that lfs will be enabled, and then saying it's an unknown
734 # saying that lfs will be enabled, and then saying it's an unknown
735 # feature. The lfs creation option is set in either case so that a
735 # feature. The lfs creation option is set in either case so that a
736 # requirement is added. If the extension is explicitly disabled but the
736 # requirement is added. If the extension is explicitly disabled but the
737 # requirement is set, the clone aborts early, before transferring any
737 # requirement is set, the clone aborts early, before transferring any
738 # data.
738 # data.
739 createopts[b'lfs'] = True
739 createopts[b'lfs'] = True
740
740
741 if extensions.disabled_help(b'lfs'):
741 if extensions.disabled_help(b'lfs'):
742 ui.status(
742 ui.status(
743 _(
743 _(
744 b'(remote is using large file support (lfs), but it is '
744 b'(remote is using large file support (lfs), but it is '
745 b'explicitly disabled in the local configuration)\n'
745 b'explicitly disabled in the local configuration)\n'
746 )
746 )
747 )
747 )
748 else:
748 else:
749 ui.status(
749 ui.status(
750 _(
750 _(
751 b'(remote is using large file support (lfs); lfs will '
751 b'(remote is using large file support (lfs); lfs will '
752 b'be enabled for this repository)\n'
752 b'be enabled for this repository)\n'
753 )
753 )
754 )
754 )
755
755
756 shareopts = shareopts or {}
756 shareopts = shareopts or {}
757 sharepool = shareopts.get(b'pool')
757 sharepool = shareopts.get(b'pool')
758 sharenamemode = shareopts.get(b'mode')
758 sharenamemode = shareopts.get(b'mode')
759 if sharepool and islocal(dest):
759 if sharepool and islocal(dest):
760 sharepath = None
760 sharepath = None
761 if sharenamemode == b'identity':
761 if sharenamemode == b'identity':
762 # Resolve the name from the initial changeset in the remote
762 # Resolve the name from the initial changeset in the remote
763 # repository. This returns nullid when the remote is empty. It
763 # repository. This returns nullid when the remote is empty. It
764 # raises RepoLookupError if revision 0 is filtered or otherwise
764 # raises RepoLookupError if revision 0 is filtered or otherwise
765 # not available. If we fail to resolve, sharing is not enabled.
765 # not available. If we fail to resolve, sharing is not enabled.
766 try:
766 try:
767 with srcpeer.commandexecutor() as e:
767 with srcpeer.commandexecutor() as e:
768 rootnode = e.callcommand(
768 rootnode = e.callcommand(
769 b'lookup',
769 b'lookup',
770 {
770 {
771 b'key': b'0',
771 b'key': b'0',
772 },
772 },
773 ).result()
773 ).result()
774
774
775 if rootnode != nullid:
775 if rootnode != nullid:
776 sharepath = os.path.join(sharepool, hex(rootnode))
776 sharepath = os.path.join(sharepool, hex(rootnode))
777 else:
777 else:
778 ui.status(
778 ui.status(
779 _(
779 _(
780 b'(not using pooled storage: '
780 b'(not using pooled storage: '
781 b'remote appears to be empty)\n'
781 b'remote appears to be empty)\n'
782 )
782 )
783 )
783 )
784 except error.RepoLookupError:
784 except error.RepoLookupError:
785 ui.status(
785 ui.status(
786 _(
786 _(
787 b'(not using pooled storage: '
787 b'(not using pooled storage: '
788 b'unable to resolve identity of remote)\n'
788 b'unable to resolve identity of remote)\n'
789 )
789 )
790 )
790 )
791 elif sharenamemode == b'remote':
791 elif sharenamemode == b'remote':
792 sharepath = os.path.join(
792 sharepath = os.path.join(
793 sharepool, hex(hashutil.sha1(source).digest())
793 sharepool, hex(hashutil.sha1(source).digest())
794 )
794 )
795 else:
795 else:
796 raise error.Abort(
796 raise error.Abort(
797 _(b'unknown share naming mode: %s') % sharenamemode
797 _(b'unknown share naming mode: %s') % sharenamemode
798 )
798 )
799
799
800 # TODO this is a somewhat arbitrary restriction.
800 # TODO this is a somewhat arbitrary restriction.
801 if narrow:
801 if narrow:
802 ui.status(
802 ui.status(
803 _(b'(pooled storage not supported for narrow clones)\n')
803 _(b'(pooled storage not supported for narrow clones)\n')
804 )
804 )
805 sharepath = None
805 sharepath = None
806
806
807 if sharepath:
807 if sharepath:
808 return clonewithshare(
808 return clonewithshare(
809 ui,
809 ui,
810 peeropts,
810 peeropts,
811 sharepath,
811 sharepath,
812 source,
812 source,
813 srcpeer,
813 srcpeer,
814 dest,
814 dest,
815 pull=pull,
815 pull=pull,
816 rev=revs,
816 rev=revs,
817 update=update,
817 update=update,
818 stream=stream,
818 stream=stream,
819 )
819 )
820
820
821 srcrepo = srcpeer.local()
821 srcrepo = srcpeer.local()
822
822
823 abspath = origsource
823 abspath = origsource
824 if islocal(origsource):
824 if islocal(origsource):
825 abspath = os.path.abspath(util.urllocalpath(origsource))
825 abspath = os.path.abspath(util.urllocalpath(origsource))
826
826
827 if islocal(dest):
827 if islocal(dest):
828 cleandir = dest
828 cleandir = dest
829
829
830 copy = False
830 copy = False
831 if (
831 if (
832 srcrepo
832 srcrepo
833 and srcrepo.cancopy()
833 and srcrepo.cancopy()
834 and islocal(dest)
834 and islocal(dest)
835 and not phases.hassecret(srcrepo)
835 and not phases.hassecret(srcrepo)
836 ):
836 ):
837 copy = not pull and not revs
837 copy = not pull and not revs
838
838
839 # TODO this is a somewhat arbitrary restriction.
839 # TODO this is a somewhat arbitrary restriction.
840 if narrow:
840 if narrow:
841 copy = False
841 copy = False
842
842
843 if copy:
843 if copy:
844 try:
844 try:
845 # we use a lock here because if we race with commit, we
845 # we use a lock here because if we race with commit, we
846 # can end up with extra data in the cloned revlogs that's
846 # can end up with extra data in the cloned revlogs that's
847 # not pointed to by changesets, thus causing verify to
847 # not pointed to by changesets, thus causing verify to
848 # fail
848 # fail
849 srclock = srcrepo.lock(wait=False)
849 srclock = srcrepo.lock(wait=False)
850 except error.LockError:
850 except error.LockError:
851 copy = False
851 copy = False
852
852
853 if copy:
853 if copy:
854 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
854 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
855 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
855 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
856 if not os.path.exists(dest):
856 if not os.path.exists(dest):
857 util.makedirs(dest)
857 util.makedirs(dest)
858 else:
858 else:
859 # only clean up directories we create ourselves
859 # only clean up directories we create ourselves
860 cleandir = hgdir
860 cleandir = hgdir
861 try:
861 try:
862 destpath = hgdir
862 destpath = hgdir
863 util.makedir(destpath, notindexed=True)
863 util.makedir(destpath, notindexed=True)
864 except OSError as inst:
864 except OSError as inst:
865 if inst.errno == errno.EEXIST:
865 if inst.errno == errno.EEXIST:
866 cleandir = None
866 cleandir = None
867 raise error.Abort(
867 raise error.Abort(
868 _(b"destination '%s' already exists") % dest
868 _(b"destination '%s' already exists") % dest
869 )
869 )
870 raise
870 raise
871
871
872 destlock = copystore(ui, srcrepo, destpath)
872 destlock = copystore(ui, srcrepo, destpath)
873 # copy bookmarks over
873 # copy bookmarks over
874 srcbookmarks = srcrepo.vfs.join(b'bookmarks')
874 srcbookmarks = srcrepo.vfs.join(b'bookmarks')
875 dstbookmarks = os.path.join(destpath, b'bookmarks')
875 dstbookmarks = os.path.join(destpath, b'bookmarks')
876 if os.path.exists(srcbookmarks):
876 if os.path.exists(srcbookmarks):
877 util.copyfile(srcbookmarks, dstbookmarks)
877 util.copyfile(srcbookmarks, dstbookmarks)
878
878
879 dstcachedir = os.path.join(destpath, b'cache')
879 dstcachedir = os.path.join(destpath, b'cache')
880 for cache in cacheutil.cachetocopy(srcrepo):
880 for cache in cacheutil.cachetocopy(srcrepo):
881 _copycache(srcrepo, dstcachedir, cache)
881 _copycache(srcrepo, dstcachedir, cache)
882
882
883 # we need to re-init the repo after manually copying the data
883 # we need to re-init the repo after manually copying the data
884 # into it
884 # into it
885 destpeer = peer(srcrepo, peeropts, dest)
885 destpeer = peer(srcrepo, peeropts, dest)
886 srcrepo.hook(b'outgoing', source=b'clone', node=nullhex)
886 srcrepo.hook(b'outgoing', source=b'clone', node=nullhex)
887 else:
887 else:
888 try:
888 try:
889 # only pass ui when no srcrepo
889 # only pass ui when no srcrepo
890 destpeer = peer(
890 destpeer = peer(
891 srcrepo or ui,
891 srcrepo or ui,
892 peeropts,
892 peeropts,
893 dest,
893 dest,
894 create=True,
894 create=True,
895 createopts=createopts,
895 createopts=createopts,
896 )
896 )
897 except OSError as inst:
897 except OSError as inst:
898 if inst.errno == errno.EEXIST:
898 if inst.errno == errno.EEXIST:
899 cleandir = None
899 cleandir = None
900 raise error.Abort(
900 raise error.Abort(
901 _(b"destination '%s' already exists") % dest
901 _(b"destination '%s' already exists") % dest
902 )
902 )
903 raise
903 raise
904
904
905 if revs:
905 if revs:
906 if not srcpeer.capable(b'lookup'):
906 if not srcpeer.capable(b'lookup'):
907 raise error.Abort(
907 raise error.Abort(
908 _(
908 _(
909 b"src repository does not support "
909 b"src repository does not support "
910 b"revision lookup and so doesn't "
910 b"revision lookup and so doesn't "
911 b"support clone by revision"
911 b"support clone by revision"
912 )
912 )
913 )
913 )
914
914
915 # TODO this is batchable.
915 # TODO this is batchable.
916 remoterevs = []
916 remoterevs = []
917 for rev in revs:
917 for rev in revs:
918 with srcpeer.commandexecutor() as e:
918 with srcpeer.commandexecutor() as e:
919 remoterevs.append(
919 remoterevs.append(
920 e.callcommand(
920 e.callcommand(
921 b'lookup',
921 b'lookup',
922 {
922 {
923 b'key': rev,
923 b'key': rev,
924 },
924 },
925 ).result()
925 ).result()
926 )
926 )
927 revs = remoterevs
927 revs = remoterevs
928
928
929 checkout = revs[0]
929 checkout = revs[0]
930 else:
930 else:
931 revs = None
931 revs = None
932 local = destpeer.local()
932 local = destpeer.local()
933 if local:
933 if local:
934 if narrow:
934 if narrow:
935 with local.wlock(), local.lock():
935 with local.wlock(), local.lock():
936 local.setnarrowpats(storeincludepats, storeexcludepats)
936 local.setnarrowpats(storeincludepats, storeexcludepats)
937 narrowspec.copytoworkingcopy(local)
937 narrowspec.copytoworkingcopy(local)
938
938
939 u = util.url(abspath)
939 u = util.url(abspath)
940 defaulturl = bytes(u)
940 defaulturl = bytes(u)
941 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
941 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
942 if not stream:
942 if not stream:
943 if pull:
943 if pull:
944 stream = False
944 stream = False
945 else:
945 else:
946 stream = None
946 stream = None
947 # internal config: ui.quietbookmarkmove
947 # internal config: ui.quietbookmarkmove
948 overrides = {(b'ui', b'quietbookmarkmove'): True}
948 overrides = {(b'ui', b'quietbookmarkmove'): True}
949 with local.ui.configoverride(overrides, b'clone'):
949 with local.ui.configoverride(overrides, b'clone'):
950 exchange.pull(
950 exchange.pull(
951 local,
951 local,
952 srcpeer,
952 srcpeer,
953 revs,
953 revs,
954 streamclonerequested=stream,
954 streamclonerequested=stream,
955 includepats=storeincludepats,
955 includepats=storeincludepats,
956 excludepats=storeexcludepats,
956 excludepats=storeexcludepats,
957 depth=depth,
957 depth=depth,
958 )
958 )
959 elif srcrepo:
959 elif srcrepo:
960 # TODO lift restriction once exchange.push() accepts narrow
960 # TODO lift restriction once exchange.push() accepts narrow
961 # push.
961 # push.
962 if narrow:
962 if narrow:
963 raise error.Abort(
963 raise error.Abort(
964 _(
964 _(
965 b'narrow clone not available for '
965 b'narrow clone not available for '
966 b'remote destinations'
966 b'remote destinations'
967 )
967 )
968 )
968 )
969
969
970 exchange.push(
970 exchange.push(
971 srcrepo,
971 srcrepo,
972 destpeer,
972 destpeer,
973 revs=revs,
973 revs=revs,
974 bookmarks=srcrepo._bookmarks.keys(),
974 bookmarks=srcrepo._bookmarks.keys(),
975 )
975 )
976 else:
976 else:
977 raise error.Abort(
977 raise error.Abort(
978 _(b"clone from remote to remote not supported")
978 _(b"clone from remote to remote not supported")
979 )
979 )
980
980
981 cleandir = None
981 cleandir = None
982
982
983 destrepo = destpeer.local()
983 destrepo = destpeer.local()
984 if destrepo:
984 if destrepo:
985 template = uimod.samplehgrcs[b'cloned']
985 template = uimod.samplehgrcs[b'cloned']
986 u = util.url(abspath)
986 u = util.url(abspath)
987 u.passwd = None
987 u.passwd = None
988 defaulturl = bytes(u)
988 defaulturl = bytes(u)
989 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
989 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
990 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
990 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
991
991
992 if ui.configbool(b'experimental', b'remotenames'):
992 if ui.configbool(b'experimental', b'remotenames'):
993 logexchange.pullremotenames(destrepo, srcpeer)
993 logexchange.pullremotenames(destrepo, srcpeer)
994
994
995 if update:
995 if update:
996 if update is not True:
996 if update is not True:
997 with srcpeer.commandexecutor() as e:
997 with srcpeer.commandexecutor() as e:
998 checkout = e.callcommand(
998 checkout = e.callcommand(
999 b'lookup',
999 b'lookup',
1000 {
1000 {
1001 b'key': update,
1001 b'key': update,
1002 },
1002 },
1003 ).result()
1003 ).result()
1004
1004
1005 uprev = None
1005 uprev = None
1006 status = None
1006 status = None
1007 if checkout is not None:
1007 if checkout is not None:
1008 # Some extensions (at least hg-git and hg-subversion) have
1008 # Some extensions (at least hg-git and hg-subversion) have
1009 # a peer.lookup() implementation that returns a name instead
1009 # a peer.lookup() implementation that returns a name instead
1010 # of a nodeid. We work around it here until we've figured
1010 # of a nodeid. We work around it here until we've figured
1011 # out a better solution.
1011 # out a better solution.
1012 if len(checkout) == 20 and checkout in destrepo:
1012 if len(checkout) == 20 and checkout in destrepo:
1013 uprev = checkout
1013 uprev = checkout
1014 elif scmutil.isrevsymbol(destrepo, checkout):
1014 elif scmutil.isrevsymbol(destrepo, checkout):
1015 uprev = scmutil.revsymbol(destrepo, checkout).node()
1015 uprev = scmutil.revsymbol(destrepo, checkout).node()
1016 else:
1016 else:
1017 if update is not True:
1017 if update is not True:
1018 try:
1018 try:
1019 uprev = destrepo.lookup(update)
1019 uprev = destrepo.lookup(update)
1020 except error.RepoLookupError:
1020 except error.RepoLookupError:
1021 pass
1021 pass
1022 if uprev is None:
1022 if uprev is None:
1023 try:
1023 try:
1024 if destrepo._activebookmark:
1024 if destrepo._activebookmark:
1025 uprev = destrepo.lookup(destrepo._activebookmark)
1025 uprev = destrepo.lookup(destrepo._activebookmark)
1026 update = destrepo._activebookmark
1026 update = destrepo._activebookmark
1027 else:
1027 else:
1028 uprev = destrepo._bookmarks[b'@']
1028 uprev = destrepo._bookmarks[b'@']
1029 update = b'@'
1029 update = b'@'
1030 bn = destrepo[uprev].branch()
1030 bn = destrepo[uprev].branch()
1031 if bn == b'default':
1031 if bn == b'default':
1032 status = _(b"updating to bookmark %s\n" % update)
1032 status = _(b"updating to bookmark %s\n" % update)
1033 else:
1033 else:
1034 status = (
1034 status = (
1035 _(b"updating to bookmark %s on branch %s\n")
1035 _(b"updating to bookmark %s on branch %s\n")
1036 ) % (update, bn)
1036 ) % (update, bn)
1037 except KeyError:
1037 except KeyError:
1038 try:
1038 try:
1039 uprev = destrepo.branchtip(b'default')
1039 uprev = destrepo.branchtip(b'default')
1040 except error.RepoLookupError:
1040 except error.RepoLookupError:
1041 uprev = destrepo.lookup(b'tip')
1041 uprev = destrepo.lookup(b'tip')
1042 if not status:
1042 if not status:
1043 bn = destrepo[uprev].branch()
1043 bn = destrepo[uprev].branch()
1044 status = _(b"updating to branch %s\n") % bn
1044 status = _(b"updating to branch %s\n") % bn
1045 destrepo.ui.status(status)
1045 destrepo.ui.status(status)
1046 _update(destrepo, uprev)
1046 _update(destrepo, uprev)
1047 if update in destrepo._bookmarks:
1047 if update in destrepo._bookmarks:
1048 bookmarks.activate(destrepo, update)
1048 bookmarks.activate(destrepo, update)
1049 if destlock is not None:
1049 if destlock is not None:
1050 release(destlock)
1050 release(destlock)
1051 # here is a tiny windows were someone could end up writing the
1051 # here is a tiny windows were someone could end up writing the
1052 # repository before the cache are sure to be warm. This is "fine"
1052 # repository before the cache are sure to be warm. This is "fine"
1053 # as the only "bad" outcome would be some slowness. That potential
1053 # as the only "bad" outcome would be some slowness. That potential
1054 # slowness already affect reader.
1054 # slowness already affect reader.
1055 with destrepo.lock():
1055 with destrepo.lock():
1056 destrepo.updatecaches(full=True)
1056 destrepo.updatecaches(full=True)
1057 finally:
1057 finally:
1058 release(srclock, destlock)
1058 release(srclock, destlock)
1059 if cleandir is not None:
1059 if cleandir is not None:
1060 shutil.rmtree(cleandir, True)
1060 shutil.rmtree(cleandir, True)
1061 if srcpeer is not None:
1061 if srcpeer is not None:
1062 srcpeer.close()
1062 srcpeer.close()
1063 if destpeer and destpeer.local() is None:
1063 if destpeer and destpeer.local() is None:
1064 destpeer.close()
1064 destpeer.close()
1065 return srcpeer, destpeer
1065 return srcpeer, destpeer
1066
1066
1067
1067
1068 def _showstats(repo, stats, quietempty=False):
1068 def _showstats(repo, stats, quietempty=False):
1069 if quietempty and stats.isempty():
1069 if quietempty and stats.isempty():
1070 return
1070 return
1071 repo.ui.status(
1071 repo.ui.status(
1072 _(
1072 _(
1073 b"%d files updated, %d files merged, "
1073 b"%d files updated, %d files merged, "
1074 b"%d files removed, %d files unresolved\n"
1074 b"%d files removed, %d files unresolved\n"
1075 )
1075 )
1076 % (
1076 % (
1077 stats.updatedcount,
1077 stats.updatedcount,
1078 stats.mergedcount,
1078 stats.mergedcount,
1079 stats.removedcount,
1079 stats.removedcount,
1080 stats.unresolvedcount,
1080 stats.unresolvedcount,
1081 )
1081 )
1082 )
1082 )
1083
1083
1084
1084
1085 def updaterepo(repo, node, overwrite, updatecheck=None):
1085 def updaterepo(repo, node, overwrite, updatecheck=None):
1086 """Update the working directory to node.
1086 """Update the working directory to node.
1087
1087
1088 When overwrite is set, changes are clobbered, merged else
1088 When overwrite is set, changes are clobbered, merged else
1089
1089
1090 returns stats (see pydoc mercurial.merge.applyupdates)"""
1090 returns stats (see pydoc mercurial.merge.applyupdates)"""
1091 repo.ui.deprecwarn(
1091 repo.ui.deprecwarn(
1092 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1092 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1093 b'5.7',
1093 b'5.7',
1094 )
1094 )
1095 return mergemod._update(
1095 return mergemod._update(
1096 repo,
1096 repo,
1097 node,
1097 node,
1098 branchmerge=False,
1098 branchmerge=False,
1099 force=overwrite,
1099 force=overwrite,
1100 labels=[b'working copy', b'destination'],
1100 labels=[b'working copy', b'destination'],
1101 updatecheck=updatecheck,
1101 updatecheck=updatecheck,
1102 )
1102 )
1103
1103
1104
1104
1105 def update(repo, node, quietempty=False, updatecheck=None):
1105 def update(repo, node, quietempty=False, updatecheck=None):
1106 """update the working directory to node"""
1106 """update the working directory to node"""
1107 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1107 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1108 _showstats(repo, stats, quietempty)
1108 _showstats(repo, stats, quietempty)
1109 if stats.unresolvedcount:
1109 if stats.unresolvedcount:
1110 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1110 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1111 return stats.unresolvedcount > 0
1111 return stats.unresolvedcount > 0
1112
1112
1113
1113
1114 # naming conflict in clone()
1114 # naming conflict in clone()
1115 _update = update
1115 _update = update
1116
1116
1117
1117
1118 def clean(repo, node, show_stats=True, quietempty=False):
1118 def clean(repo, node, show_stats=True, quietempty=False):
1119 """forcibly switch the working directory to node, clobbering changes"""
1119 """forcibly switch the working directory to node, clobbering changes"""
1120 stats = mergemod.clean_update(repo[node])
1120 stats = mergemod.clean_update(repo[node])
1121 assert stats.unresolvedcount == 0
1121 assert stats.unresolvedcount == 0
1122 if show_stats:
1122 if show_stats:
1123 _showstats(repo, stats, quietempty)
1123 _showstats(repo, stats, quietempty)
1124 return False
1124
1125
1125
1126
1126 # naming conflict in updatetotally()
1127 # naming conflict in updatetotally()
1127 _clean = clean
1128 _clean = clean
1128
1129
1129 _VALID_UPDATECHECKS = {
1130 _VALID_UPDATECHECKS = {
1130 mergemod.UPDATECHECK_ABORT,
1131 mergemod.UPDATECHECK_ABORT,
1131 mergemod.UPDATECHECK_NONE,
1132 mergemod.UPDATECHECK_NONE,
1132 mergemod.UPDATECHECK_LINEAR,
1133 mergemod.UPDATECHECK_LINEAR,
1133 mergemod.UPDATECHECK_NO_CONFLICT,
1134 mergemod.UPDATECHECK_NO_CONFLICT,
1134 }
1135 }
1135
1136
1136
1137
1137 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1138 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1138 """Update the working directory with extra care for non-file components
1139 """Update the working directory with extra care for non-file components
1139
1140
1140 This takes care of non-file components below:
1141 This takes care of non-file components below:
1141
1142
1142 :bookmark: might be advanced or (in)activated
1143 :bookmark: might be advanced or (in)activated
1143
1144
1144 This takes arguments below:
1145 This takes arguments below:
1145
1146
1146 :checkout: to which revision the working directory is updated
1147 :checkout: to which revision the working directory is updated
1147 :brev: a name, which might be a bookmark to be activated after updating
1148 :brev: a name, which might be a bookmark to be activated after updating
1148 :clean: whether changes in the working directory can be discarded
1149 :clean: whether changes in the working directory can be discarded
1149 :updatecheck: how to deal with a dirty working directory
1150 :updatecheck: how to deal with a dirty working directory
1150
1151
1151 Valid values for updatecheck are the UPDATECHECK_* constants
1152 Valid values for updatecheck are the UPDATECHECK_* constants
1152 defined in the merge module. Passing `None` will result in using the
1153 defined in the merge module. Passing `None` will result in using the
1153 configured default.
1154 configured default.
1154
1155
1155 * ABORT: abort if the working directory is dirty
1156 * ABORT: abort if the working directory is dirty
1156 * NONE: don't check (merge working directory changes into destination)
1157 * NONE: don't check (merge working directory changes into destination)
1157 * LINEAR: check that update is linear before merging working directory
1158 * LINEAR: check that update is linear before merging working directory
1158 changes into destination
1159 changes into destination
1159 * NO_CONFLICT: check that the update does not result in file merges
1160 * NO_CONFLICT: check that the update does not result in file merges
1160
1161
1161 This returns whether conflict is detected at updating or not.
1162 This returns whether conflict is detected at updating or not.
1162 """
1163 """
1163 if updatecheck is None:
1164 if updatecheck is None:
1164 updatecheck = ui.config(b'commands', b'update.check')
1165 updatecheck = ui.config(b'commands', b'update.check')
1165 if updatecheck not in _VALID_UPDATECHECKS:
1166 if updatecheck not in _VALID_UPDATECHECKS:
1166 # If not configured, or invalid value configured
1167 # If not configured, or invalid value configured
1167 updatecheck = mergemod.UPDATECHECK_LINEAR
1168 updatecheck = mergemod.UPDATECHECK_LINEAR
1168 if updatecheck not in _VALID_UPDATECHECKS:
1169 if updatecheck not in _VALID_UPDATECHECKS:
1169 raise ValueError(
1170 raise ValueError(
1170 r'Invalid updatecheck value %r (can accept %r)'
1171 r'Invalid updatecheck value %r (can accept %r)'
1171 % (updatecheck, _VALID_UPDATECHECKS)
1172 % (updatecheck, _VALID_UPDATECHECKS)
1172 )
1173 )
1173 with repo.wlock():
1174 with repo.wlock():
1174 movemarkfrom = None
1175 movemarkfrom = None
1175 warndest = False
1176 warndest = False
1176 if checkout is None:
1177 if checkout is None:
1177 updata = destutil.destupdate(repo, clean=clean)
1178 updata = destutil.destupdate(repo, clean=clean)
1178 checkout, movemarkfrom, brev = updata
1179 checkout, movemarkfrom, brev = updata
1179 warndest = True
1180 warndest = True
1180
1181
1181 if clean:
1182 if clean:
1182 ret = _clean(repo, checkout)
1183 ret = _clean(repo, checkout)
1183 else:
1184 else:
1184 if updatecheck == mergemod.UPDATECHECK_ABORT:
1185 if updatecheck == mergemod.UPDATECHECK_ABORT:
1185 cmdutil.bailifchanged(repo, merge=False)
1186 cmdutil.bailifchanged(repo, merge=False)
1186 updatecheck = mergemod.UPDATECHECK_NONE
1187 updatecheck = mergemod.UPDATECHECK_NONE
1187 ret = _update(repo, checkout, updatecheck=updatecheck)
1188 ret = _update(repo, checkout, updatecheck=updatecheck)
1188
1189
1189 if not ret and movemarkfrom:
1190 if not ret and movemarkfrom:
1190 if movemarkfrom == repo[b'.'].node():
1191 if movemarkfrom == repo[b'.'].node():
1191 pass # no-op update
1192 pass # no-op update
1192 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1193 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1193 b = ui.label(repo._activebookmark, b'bookmarks.active')
1194 b = ui.label(repo._activebookmark, b'bookmarks.active')
1194 ui.status(_(b"updating bookmark %s\n") % b)
1195 ui.status(_(b"updating bookmark %s\n") % b)
1195 else:
1196 else:
1196 # this can happen with a non-linear update
1197 # this can happen with a non-linear update
1197 b = ui.label(repo._activebookmark, b'bookmarks')
1198 b = ui.label(repo._activebookmark, b'bookmarks')
1198 ui.status(_(b"(leaving bookmark %s)\n") % b)
1199 ui.status(_(b"(leaving bookmark %s)\n") % b)
1199 bookmarks.deactivate(repo)
1200 bookmarks.deactivate(repo)
1200 elif brev in repo._bookmarks:
1201 elif brev in repo._bookmarks:
1201 if brev != repo._activebookmark:
1202 if brev != repo._activebookmark:
1202 b = ui.label(brev, b'bookmarks.active')
1203 b = ui.label(brev, b'bookmarks.active')
1203 ui.status(_(b"(activating bookmark %s)\n") % b)
1204 ui.status(_(b"(activating bookmark %s)\n") % b)
1204 bookmarks.activate(repo, brev)
1205 bookmarks.activate(repo, brev)
1205 elif brev:
1206 elif brev:
1206 if repo._activebookmark:
1207 if repo._activebookmark:
1207 b = ui.label(repo._activebookmark, b'bookmarks')
1208 b = ui.label(repo._activebookmark, b'bookmarks')
1208 ui.status(_(b"(leaving bookmark %s)\n") % b)
1209 ui.status(_(b"(leaving bookmark %s)\n") % b)
1209 bookmarks.deactivate(repo)
1210 bookmarks.deactivate(repo)
1210
1211
1211 if warndest:
1212 if warndest:
1212 destutil.statusotherdests(ui, repo)
1213 destutil.statusotherdests(ui, repo)
1213
1214
1214 return ret
1215 return ret
1215
1216
1216
1217
1217 def merge(
1218 def merge(
1218 ctx,
1219 ctx,
1219 force=False,
1220 force=False,
1220 remind=True,
1221 remind=True,
1221 labels=None,
1222 labels=None,
1222 ):
1223 ):
1223 """Branch merge with node, resolving changes. Return true if any
1224 """Branch merge with node, resolving changes. Return true if any
1224 unresolved conflicts."""
1225 unresolved conflicts."""
1225 repo = ctx.repo()
1226 repo = ctx.repo()
1226 stats = mergemod.merge(ctx, force=force, labels=labels)
1227 stats = mergemod.merge(ctx, force=force, labels=labels)
1227 _showstats(repo, stats)
1228 _showstats(repo, stats)
1228 if stats.unresolvedcount:
1229 if stats.unresolvedcount:
1229 repo.ui.status(
1230 repo.ui.status(
1230 _(
1231 _(
1231 b"use 'hg resolve' to retry unresolved file merges "
1232 b"use 'hg resolve' to retry unresolved file merges "
1232 b"or 'hg merge --abort' to abandon\n"
1233 b"or 'hg merge --abort' to abandon\n"
1233 )
1234 )
1234 )
1235 )
1235 elif remind:
1236 elif remind:
1236 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1237 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1237 return stats.unresolvedcount > 0
1238 return stats.unresolvedcount > 0
1238
1239
1239
1240
1240 def abortmerge(ui, repo):
1241 def abortmerge(ui, repo):
1241 ms = mergestatemod.mergestate.read(repo)
1242 ms = mergestatemod.mergestate.read(repo)
1242 if ms.active():
1243 if ms.active():
1243 # there were conflicts
1244 # there were conflicts
1244 node = ms.localctx.hex()
1245 node = ms.localctx.hex()
1245 else:
1246 else:
1246 # there were no conficts, mergestate was not stored
1247 # there were no conficts, mergestate was not stored
1247 node = repo[b'.'].hex()
1248 node = repo[b'.'].hex()
1248
1249
1249 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1250 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1250 stats = mergemod.clean_update(repo[node])
1251 stats = mergemod.clean_update(repo[node])
1251 assert stats.unresolvedcount == 0
1252 assert stats.unresolvedcount == 0
1252 _showstats(repo, stats)
1253 _showstats(repo, stats)
1253
1254
1254
1255
1255 def _incoming(
1256 def _incoming(
1256 displaychlist, subreporecurse, ui, repo, source, opts, buffered=False
1257 displaychlist, subreporecurse, ui, repo, source, opts, buffered=False
1257 ):
1258 ):
1258 """
1259 """
1259 Helper for incoming / gincoming.
1260 Helper for incoming / gincoming.
1260 displaychlist gets called with
1261 displaychlist gets called with
1261 (remoterepo, incomingchangesetlist, displayer) parameters,
1262 (remoterepo, incomingchangesetlist, displayer) parameters,
1262 and is supposed to contain only code that can't be unified.
1263 and is supposed to contain only code that can't be unified.
1263 """
1264 """
1264 source, branches = parseurl(ui.expandpath(source), opts.get(b'branch'))
1265 source, branches = parseurl(ui.expandpath(source), opts.get(b'branch'))
1265 other = peer(repo, opts, source)
1266 other = peer(repo, opts, source)
1266 cleanupfn = other.close
1267 cleanupfn = other.close
1267 try:
1268 try:
1268 ui.status(_(b'comparing with %s\n') % util.hidepassword(source))
1269 ui.status(_(b'comparing with %s\n') % util.hidepassword(source))
1269 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1270 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1270
1271
1271 if revs:
1272 if revs:
1272 revs = [other.lookup(rev) for rev in revs]
1273 revs = [other.lookup(rev) for rev in revs]
1273 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1274 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1274 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
1275 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
1275 )
1276 )
1276
1277
1277 if not chlist:
1278 if not chlist:
1278 ui.status(_(b"no changes found\n"))
1279 ui.status(_(b"no changes found\n"))
1279 return subreporecurse()
1280 return subreporecurse()
1280 ui.pager(b'incoming')
1281 ui.pager(b'incoming')
1281 displayer = logcmdutil.changesetdisplayer(
1282 displayer = logcmdutil.changesetdisplayer(
1282 ui, other, opts, buffered=buffered
1283 ui, other, opts, buffered=buffered
1283 )
1284 )
1284 displaychlist(other, chlist, displayer)
1285 displaychlist(other, chlist, displayer)
1285 displayer.close()
1286 displayer.close()
1286 finally:
1287 finally:
1287 cleanupfn()
1288 cleanupfn()
1288 subreporecurse()
1289 subreporecurse()
1289 return 0 # exit code is zero since we found incoming changes
1290 return 0 # exit code is zero since we found incoming changes
1290
1291
1291
1292
1292 def incoming(ui, repo, source, opts):
1293 def incoming(ui, repo, source, opts):
1293 def subreporecurse():
1294 def subreporecurse():
1294 ret = 1
1295 ret = 1
1295 if opts.get(b'subrepos'):
1296 if opts.get(b'subrepos'):
1296 ctx = repo[None]
1297 ctx = repo[None]
1297 for subpath in sorted(ctx.substate):
1298 for subpath in sorted(ctx.substate):
1298 sub = ctx.sub(subpath)
1299 sub = ctx.sub(subpath)
1299 ret = min(ret, sub.incoming(ui, source, opts))
1300 ret = min(ret, sub.incoming(ui, source, opts))
1300 return ret
1301 return ret
1301
1302
1302 def display(other, chlist, displayer):
1303 def display(other, chlist, displayer):
1303 limit = logcmdutil.getlimit(opts)
1304 limit = logcmdutil.getlimit(opts)
1304 if opts.get(b'newest_first'):
1305 if opts.get(b'newest_first'):
1305 chlist.reverse()
1306 chlist.reverse()
1306 count = 0
1307 count = 0
1307 for n in chlist:
1308 for n in chlist:
1308 if limit is not None and count >= limit:
1309 if limit is not None and count >= limit:
1309 break
1310 break
1310 parents = [p for p in other.changelog.parents(n) if p != nullid]
1311 parents = [p for p in other.changelog.parents(n) if p != nullid]
1311 if opts.get(b'no_merges') and len(parents) == 2:
1312 if opts.get(b'no_merges') and len(parents) == 2:
1312 continue
1313 continue
1313 count += 1
1314 count += 1
1314 displayer.show(other[n])
1315 displayer.show(other[n])
1315
1316
1316 return _incoming(display, subreporecurse, ui, repo, source, opts)
1317 return _incoming(display, subreporecurse, ui, repo, source, opts)
1317
1318
1318
1319
1319 def _outgoing(ui, repo, dest, opts):
1320 def _outgoing(ui, repo, dest, opts):
1320 path = ui.getpath(dest, default=(b'default-push', b'default'))
1321 path = ui.getpath(dest, default=(b'default-push', b'default'))
1321 if not path:
1322 if not path:
1322 raise error.Abort(
1323 raise error.Abort(
1323 _(b'default repository not configured!'),
1324 _(b'default repository not configured!'),
1324 hint=_(b"see 'hg help config.paths'"),
1325 hint=_(b"see 'hg help config.paths'"),
1325 )
1326 )
1326 dest = path.pushloc or path.loc
1327 dest = path.pushloc or path.loc
1327 branches = path.branch, opts.get(b'branch') or []
1328 branches = path.branch, opts.get(b'branch') or []
1328
1329
1329 ui.status(_(b'comparing with %s\n') % util.hidepassword(dest))
1330 ui.status(_(b'comparing with %s\n') % util.hidepassword(dest))
1330 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1331 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1331 if revs:
1332 if revs:
1332 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1333 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1333
1334
1334 other = peer(repo, opts, dest)
1335 other = peer(repo, opts, dest)
1335 try:
1336 try:
1336 outgoing = discovery.findcommonoutgoing(
1337 outgoing = discovery.findcommonoutgoing(
1337 repo, other, revs, force=opts.get(b'force')
1338 repo, other, revs, force=opts.get(b'force')
1338 )
1339 )
1339 o = outgoing.missing
1340 o = outgoing.missing
1340 if not o:
1341 if not o:
1341 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1342 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1342 return o, other
1343 return o, other
1343 except: # re-raises
1344 except: # re-raises
1344 other.close()
1345 other.close()
1345 raise
1346 raise
1346
1347
1347
1348
1348 def outgoing(ui, repo, dest, opts):
1349 def outgoing(ui, repo, dest, opts):
1349 def recurse():
1350 def recurse():
1350 ret = 1
1351 ret = 1
1351 if opts.get(b'subrepos'):
1352 if opts.get(b'subrepos'):
1352 ctx = repo[None]
1353 ctx = repo[None]
1353 for subpath in sorted(ctx.substate):
1354 for subpath in sorted(ctx.substate):
1354 sub = ctx.sub(subpath)
1355 sub = ctx.sub(subpath)
1355 ret = min(ret, sub.outgoing(ui, dest, opts))
1356 ret = min(ret, sub.outgoing(ui, dest, opts))
1356 return ret
1357 return ret
1357
1358
1358 limit = logcmdutil.getlimit(opts)
1359 limit = logcmdutil.getlimit(opts)
1359 o, other = _outgoing(ui, repo, dest, opts)
1360 o, other = _outgoing(ui, repo, dest, opts)
1360 try:
1361 try:
1361 if not o:
1362 if not o:
1362 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1363 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1363 return recurse()
1364 return recurse()
1364
1365
1365 if opts.get(b'newest_first'):
1366 if opts.get(b'newest_first'):
1366 o.reverse()
1367 o.reverse()
1367 ui.pager(b'outgoing')
1368 ui.pager(b'outgoing')
1368 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1369 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1369 count = 0
1370 count = 0
1370 for n in o:
1371 for n in o:
1371 if limit is not None and count >= limit:
1372 if limit is not None and count >= limit:
1372 break
1373 break
1373 parents = [p for p in repo.changelog.parents(n) if p != nullid]
1374 parents = [p for p in repo.changelog.parents(n) if p != nullid]
1374 if opts.get(b'no_merges') and len(parents) == 2:
1375 if opts.get(b'no_merges') and len(parents) == 2:
1375 continue
1376 continue
1376 count += 1
1377 count += 1
1377 displayer.show(repo[n])
1378 displayer.show(repo[n])
1378 displayer.close()
1379 displayer.close()
1379 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1380 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1380 recurse()
1381 recurse()
1381 return 0 # exit code is zero since we found outgoing changes
1382 return 0 # exit code is zero since we found outgoing changes
1382 finally:
1383 finally:
1383 other.close()
1384 other.close()
1384
1385
1385
1386
1386 def verify(repo, level=None):
1387 def verify(repo, level=None):
1387 """verify the consistency of a repository"""
1388 """verify the consistency of a repository"""
1388 ret = verifymod.verify(repo, level=level)
1389 ret = verifymod.verify(repo, level=level)
1389
1390
1390 # Broken subrepo references in hidden csets don't seem worth worrying about,
1391 # Broken subrepo references in hidden csets don't seem worth worrying about,
1391 # since they can't be pushed/pulled, and --hidden can be used if they are a
1392 # since they can't be pushed/pulled, and --hidden can be used if they are a
1392 # concern.
1393 # concern.
1393
1394
1394 # pathto() is needed for -R case
1395 # pathto() is needed for -R case
1395 revs = repo.revs(
1396 revs = repo.revs(
1396 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1397 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1397 )
1398 )
1398
1399
1399 if revs:
1400 if revs:
1400 repo.ui.status(_(b'checking subrepo links\n'))
1401 repo.ui.status(_(b'checking subrepo links\n'))
1401 for rev in revs:
1402 for rev in revs:
1402 ctx = repo[rev]
1403 ctx = repo[rev]
1403 try:
1404 try:
1404 for subpath in ctx.substate:
1405 for subpath in ctx.substate:
1405 try:
1406 try:
1406 ret = (
1407 ret = (
1407 ctx.sub(subpath, allowcreate=False).verify() or ret
1408 ctx.sub(subpath, allowcreate=False).verify() or ret
1408 )
1409 )
1409 except error.RepoError as e:
1410 except error.RepoError as e:
1410 repo.ui.warn(b'%d: %s\n' % (rev, e))
1411 repo.ui.warn(b'%d: %s\n' % (rev, e))
1411 except Exception:
1412 except Exception:
1412 repo.ui.warn(
1413 repo.ui.warn(
1413 _(b'.hgsubstate is corrupt in revision %s\n')
1414 _(b'.hgsubstate is corrupt in revision %s\n')
1414 % short(ctx.node())
1415 % short(ctx.node())
1415 )
1416 )
1416
1417
1417 return ret
1418 return ret
1418
1419
1419
1420
1420 def remoteui(src, opts):
1421 def remoteui(src, opts):
1421 """build a remote ui from ui or repo and opts"""
1422 """build a remote ui from ui or repo and opts"""
1422 if util.safehasattr(src, b'baseui'): # looks like a repository
1423 if util.safehasattr(src, b'baseui'): # looks like a repository
1423 dst = src.baseui.copy() # drop repo-specific config
1424 dst = src.baseui.copy() # drop repo-specific config
1424 src = src.ui # copy target options from repo
1425 src = src.ui # copy target options from repo
1425 else: # assume it's a global ui object
1426 else: # assume it's a global ui object
1426 dst = src.copy() # keep all global options
1427 dst = src.copy() # keep all global options
1427
1428
1428 # copy ssh-specific options
1429 # copy ssh-specific options
1429 for o in b'ssh', b'remotecmd':
1430 for o in b'ssh', b'remotecmd':
1430 v = opts.get(o) or src.config(b'ui', o)
1431 v = opts.get(o) or src.config(b'ui', o)
1431 if v:
1432 if v:
1432 dst.setconfig(b"ui", o, v, b'copied')
1433 dst.setconfig(b"ui", o, v, b'copied')
1433
1434
1434 # copy bundle-specific options
1435 # copy bundle-specific options
1435 r = src.config(b'bundle', b'mainreporoot')
1436 r = src.config(b'bundle', b'mainreporoot')
1436 if r:
1437 if r:
1437 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1438 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1438
1439
1439 # copy selected local settings to the remote ui
1440 # copy selected local settings to the remote ui
1440 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1441 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1441 for key, val in src.configitems(sect):
1442 for key, val in src.configitems(sect):
1442 dst.setconfig(sect, key, val, b'copied')
1443 dst.setconfig(sect, key, val, b'copied')
1443 v = src.config(b'web', b'cacerts')
1444 v = src.config(b'web', b'cacerts')
1444 if v:
1445 if v:
1445 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1446 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1446
1447
1447 return dst
1448 return dst
1448
1449
1449
1450
1450 # Files of interest
1451 # Files of interest
1451 # Used to check if the repository has changed looking at mtime and size of
1452 # Used to check if the repository has changed looking at mtime and size of
1452 # these files.
1453 # these files.
1453 foi = [
1454 foi = [
1454 (b'spath', b'00changelog.i'),
1455 (b'spath', b'00changelog.i'),
1455 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1456 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1456 (b'spath', b'obsstore'),
1457 (b'spath', b'obsstore'),
1457 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1458 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1458 ]
1459 ]
1459
1460
1460
1461
1461 class cachedlocalrepo(object):
1462 class cachedlocalrepo(object):
1462 """Holds a localrepository that can be cached and reused."""
1463 """Holds a localrepository that can be cached and reused."""
1463
1464
1464 def __init__(self, repo):
1465 def __init__(self, repo):
1465 """Create a new cached repo from an existing repo.
1466 """Create a new cached repo from an existing repo.
1466
1467
1467 We assume the passed in repo was recently created. If the
1468 We assume the passed in repo was recently created. If the
1468 repo has changed between when it was created and when it was
1469 repo has changed between when it was created and when it was
1469 turned into a cache, it may not refresh properly.
1470 turned into a cache, it may not refresh properly.
1470 """
1471 """
1471 assert isinstance(repo, localrepo.localrepository)
1472 assert isinstance(repo, localrepo.localrepository)
1472 self._repo = repo
1473 self._repo = repo
1473 self._state, self.mtime = self._repostate()
1474 self._state, self.mtime = self._repostate()
1474 self._filtername = repo.filtername
1475 self._filtername = repo.filtername
1475
1476
1476 def fetch(self):
1477 def fetch(self):
1477 """Refresh (if necessary) and return a repository.
1478 """Refresh (if necessary) and return a repository.
1478
1479
1479 If the cached instance is out of date, it will be recreated
1480 If the cached instance is out of date, it will be recreated
1480 automatically and returned.
1481 automatically and returned.
1481
1482
1482 Returns a tuple of the repo and a boolean indicating whether a new
1483 Returns a tuple of the repo and a boolean indicating whether a new
1483 repo instance was created.
1484 repo instance was created.
1484 """
1485 """
1485 # We compare the mtimes and sizes of some well-known files to
1486 # We compare the mtimes and sizes of some well-known files to
1486 # determine if the repo changed. This is not precise, as mtimes
1487 # determine if the repo changed. This is not precise, as mtimes
1487 # are susceptible to clock skew and imprecise filesystems and
1488 # are susceptible to clock skew and imprecise filesystems and
1488 # file content can change while maintaining the same size.
1489 # file content can change while maintaining the same size.
1489
1490
1490 state, mtime = self._repostate()
1491 state, mtime = self._repostate()
1491 if state == self._state:
1492 if state == self._state:
1492 return self._repo, False
1493 return self._repo, False
1493
1494
1494 repo = repository(self._repo.baseui, self._repo.url())
1495 repo = repository(self._repo.baseui, self._repo.url())
1495 if self._filtername:
1496 if self._filtername:
1496 self._repo = repo.filtered(self._filtername)
1497 self._repo = repo.filtered(self._filtername)
1497 else:
1498 else:
1498 self._repo = repo.unfiltered()
1499 self._repo = repo.unfiltered()
1499 self._state = state
1500 self._state = state
1500 self.mtime = mtime
1501 self.mtime = mtime
1501
1502
1502 return self._repo, True
1503 return self._repo, True
1503
1504
1504 def _repostate(self):
1505 def _repostate(self):
1505 state = []
1506 state = []
1506 maxmtime = -1
1507 maxmtime = -1
1507 for attr, fname in foi:
1508 for attr, fname in foi:
1508 prefix = getattr(self._repo, attr)
1509 prefix = getattr(self._repo, attr)
1509 p = os.path.join(prefix, fname)
1510 p = os.path.join(prefix, fname)
1510 try:
1511 try:
1511 st = os.stat(p)
1512 st = os.stat(p)
1512 except OSError:
1513 except OSError:
1513 st = os.stat(prefix)
1514 st = os.stat(prefix)
1514 state.append((st[stat.ST_MTIME], st.st_size))
1515 state.append((st[stat.ST_MTIME], st.st_size))
1515 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1516 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1516
1517
1517 return tuple(state), maxmtime
1518 return tuple(state), maxmtime
1518
1519
1519 def copy(self):
1520 def copy(self):
1520 """Obtain a copy of this class instance.
1521 """Obtain a copy of this class instance.
1521
1522
1522 A new localrepository instance is obtained. The new instance should be
1523 A new localrepository instance is obtained. The new instance should be
1523 completely independent of the original.
1524 completely independent of the original.
1524 """
1525 """
1525 repo = repository(self._repo.baseui, self._repo.origroot)
1526 repo = repository(self._repo.baseui, self._repo.origroot)
1526 if self._filtername:
1527 if self._filtername:
1527 repo = repo.filtered(self._filtername)
1528 repo = repo.filtered(self._filtername)
1528 else:
1529 else:
1529 repo = repo.unfiltered()
1530 repo = repo.unfiltered()
1530 c = cachedlocalrepo(repo)
1531 c = cachedlocalrepo(repo)
1531 c._state = self._state
1532 c._state = self._state
1532 c.mtime = self.mtime
1533 c.mtime = self.mtime
1533 return c
1534 return c
General Comments 0
You need to be logged in to leave comments. Login now