##// END OF EJS Templates
hg: convert an exception to bytes in the repo creation exception handler...
Matt Harbison -
r47518:1099541b stable
parent child Browse files
Show More
@@ -1,1514 +1,1517
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import os
12 import os
13 import shutil
13 import shutil
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import (
17 from .node import (
18 hex,
18 hex,
19 nullhex,
19 nullhex,
20 nullid,
20 nullid,
21 short,
21 short,
22 )
22 )
23 from .pycompat import getattr
23 from .pycompat import getattr
24
24
25 from . import (
25 from . import (
26 bookmarks,
26 bookmarks,
27 bundlerepo,
27 bundlerepo,
28 cacheutil,
28 cacheutil,
29 cmdutil,
29 cmdutil,
30 destutil,
30 destutil,
31 discovery,
31 discovery,
32 error,
32 error,
33 exchange,
33 exchange,
34 extensions,
34 extensions,
35 httppeer,
35 httppeer,
36 localrepo,
36 localrepo,
37 lock,
37 lock,
38 logcmdutil,
38 logcmdutil,
39 logexchange,
39 logexchange,
40 merge as mergemod,
40 merge as mergemod,
41 mergestate as mergestatemod,
41 mergestate as mergestatemod,
42 narrowspec,
42 narrowspec,
43 phases,
43 phases,
44 pycompat,
45 requirements,
44 requirements,
46 scmutil,
45 scmutil,
47 sshpeer,
46 sshpeer,
48 statichttprepo,
47 statichttprepo,
49 ui as uimod,
48 ui as uimod,
50 unionrepo,
49 unionrepo,
51 url,
50 url,
52 util,
51 util,
53 verify as verifymod,
52 verify as verifymod,
54 vfs as vfsmod,
53 vfs as vfsmod,
55 )
54 )
56 from .utils import hashutil
55 from .utils import (
56 hashutil,
57 stringutil,
58 )
59
57
60
58 release = lock.release
61 release = lock.release
59
62
60 # shared features
63 # shared features
61 sharedbookmarks = b'bookmarks'
64 sharedbookmarks = b'bookmarks'
62
65
63
66
64 def _local(path):
67 def _local(path):
65 path = util.expandpath(util.urllocalpath(path))
68 path = util.expandpath(util.urllocalpath(path))
66
69
67 try:
70 try:
68 # we use os.stat() directly here instead of os.path.isfile()
71 # we use os.stat() directly here instead of os.path.isfile()
69 # because the latter started returning `False` on invalid path
72 # because the latter started returning `False` on invalid path
70 # exceptions starting in 3.8 and we care about handling
73 # exceptions starting in 3.8 and we care about handling
71 # invalid paths specially here.
74 # invalid paths specially here.
72 st = os.stat(path)
75 st = os.stat(path)
73 isfile = stat.S_ISREG(st.st_mode)
76 isfile = stat.S_ISREG(st.st_mode)
74 # Python 2 raises TypeError, Python 3 ValueError.
77 # Python 2 raises TypeError, Python 3 ValueError.
75 except (TypeError, ValueError) as e:
78 except (TypeError, ValueError) as e:
76 raise error.Abort(
79 raise error.Abort(
77 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
80 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
78 )
81 )
79 except OSError:
82 except OSError:
80 isfile = False
83 isfile = False
81
84
82 return isfile and bundlerepo or localrepo
85 return isfile and bundlerepo or localrepo
83
86
84
87
85 def addbranchrevs(lrepo, other, branches, revs):
88 def addbranchrevs(lrepo, other, branches, revs):
86 peer = other.peer() # a courtesy to callers using a localrepo for other
89 peer = other.peer() # a courtesy to callers using a localrepo for other
87 hashbranch, branches = branches
90 hashbranch, branches = branches
88 if not hashbranch and not branches:
91 if not hashbranch and not branches:
89 x = revs or None
92 x = revs or None
90 if revs:
93 if revs:
91 y = revs[0]
94 y = revs[0]
92 else:
95 else:
93 y = None
96 y = None
94 return x, y
97 return x, y
95 if revs:
98 if revs:
96 revs = list(revs)
99 revs = list(revs)
97 else:
100 else:
98 revs = []
101 revs = []
99
102
100 if not peer.capable(b'branchmap'):
103 if not peer.capable(b'branchmap'):
101 if branches:
104 if branches:
102 raise error.Abort(_(b"remote branch lookup not supported"))
105 raise error.Abort(_(b"remote branch lookup not supported"))
103 revs.append(hashbranch)
106 revs.append(hashbranch)
104 return revs, revs[0]
107 return revs, revs[0]
105
108
106 with peer.commandexecutor() as e:
109 with peer.commandexecutor() as e:
107 branchmap = e.callcommand(b'branchmap', {}).result()
110 branchmap = e.callcommand(b'branchmap', {}).result()
108
111
109 def primary(branch):
112 def primary(branch):
110 if branch == b'.':
113 if branch == b'.':
111 if not lrepo:
114 if not lrepo:
112 raise error.Abort(_(b"dirstate branch not accessible"))
115 raise error.Abort(_(b"dirstate branch not accessible"))
113 branch = lrepo.dirstate.branch()
116 branch = lrepo.dirstate.branch()
114 if branch in branchmap:
117 if branch in branchmap:
115 revs.extend(hex(r) for r in reversed(branchmap[branch]))
118 revs.extend(hex(r) for r in reversed(branchmap[branch]))
116 return True
119 return True
117 else:
120 else:
118 return False
121 return False
119
122
120 for branch in branches:
123 for branch in branches:
121 if not primary(branch):
124 if not primary(branch):
122 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
125 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
123 if hashbranch:
126 if hashbranch:
124 if not primary(hashbranch):
127 if not primary(hashbranch):
125 revs.append(hashbranch)
128 revs.append(hashbranch)
126 return revs, revs[0]
129 return revs, revs[0]
127
130
128
131
129 def parseurl(path, branches=None):
132 def parseurl(path, branches=None):
130 '''parse url#branch, returning (url, (branch, branches))'''
133 '''parse url#branch, returning (url, (branch, branches))'''
131
134
132 u = util.url(path)
135 u = util.url(path)
133 branch = None
136 branch = None
134 if u.fragment:
137 if u.fragment:
135 branch = u.fragment
138 branch = u.fragment
136 u.fragment = None
139 u.fragment = None
137 return bytes(u), (branch, branches or [])
140 return bytes(u), (branch, branches or [])
138
141
139
142
140 schemes = {
143 schemes = {
141 b'bundle': bundlerepo,
144 b'bundle': bundlerepo,
142 b'union': unionrepo,
145 b'union': unionrepo,
143 b'file': _local,
146 b'file': _local,
144 b'http': httppeer,
147 b'http': httppeer,
145 b'https': httppeer,
148 b'https': httppeer,
146 b'ssh': sshpeer,
149 b'ssh': sshpeer,
147 b'static-http': statichttprepo,
150 b'static-http': statichttprepo,
148 }
151 }
149
152
150
153
151 def _peerlookup(path):
154 def _peerlookup(path):
152 u = util.url(path)
155 u = util.url(path)
153 scheme = u.scheme or b'file'
156 scheme = u.scheme or b'file'
154 thing = schemes.get(scheme) or schemes[b'file']
157 thing = schemes.get(scheme) or schemes[b'file']
155 try:
158 try:
156 return thing(path)
159 return thing(path)
157 except TypeError:
160 except TypeError:
158 # we can't test callable(thing) because 'thing' can be an unloaded
161 # we can't test callable(thing) because 'thing' can be an unloaded
159 # module that implements __call__
162 # module that implements __call__
160 if not util.safehasattr(thing, b'instance'):
163 if not util.safehasattr(thing, b'instance'):
161 raise
164 raise
162 return thing
165 return thing
163
166
164
167
165 def islocal(repo):
168 def islocal(repo):
166 '''return true if repo (or path pointing to repo) is local'''
169 '''return true if repo (or path pointing to repo) is local'''
167 if isinstance(repo, bytes):
170 if isinstance(repo, bytes):
168 try:
171 try:
169 return _peerlookup(repo).islocal(repo)
172 return _peerlookup(repo).islocal(repo)
170 except AttributeError:
173 except AttributeError:
171 return False
174 return False
172 return repo.local()
175 return repo.local()
173
176
174
177
175 def openpath(ui, path, sendaccept=True):
178 def openpath(ui, path, sendaccept=True):
176 '''open path with open if local, url.open if remote'''
179 '''open path with open if local, url.open if remote'''
177 pathurl = util.url(path, parsequery=False, parsefragment=False)
180 pathurl = util.url(path, parsequery=False, parsefragment=False)
178 if pathurl.islocal():
181 if pathurl.islocal():
179 return util.posixfile(pathurl.localpath(), b'rb')
182 return util.posixfile(pathurl.localpath(), b'rb')
180 else:
183 else:
181 return url.open(ui, path, sendaccept=sendaccept)
184 return url.open(ui, path, sendaccept=sendaccept)
182
185
183
186
184 # a list of (ui, repo) functions called for wire peer initialization
187 # a list of (ui, repo) functions called for wire peer initialization
185 wirepeersetupfuncs = []
188 wirepeersetupfuncs = []
186
189
187
190
188 def _peerorrepo(
191 def _peerorrepo(
189 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
192 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
190 ):
193 ):
191 """return a repository object for the specified path"""
194 """return a repository object for the specified path"""
192 obj = _peerlookup(path).instance(
195 obj = _peerlookup(path).instance(
193 ui, path, create, intents=intents, createopts=createopts
196 ui, path, create, intents=intents, createopts=createopts
194 )
197 )
195 ui = getattr(obj, "ui", ui)
198 ui = getattr(obj, "ui", ui)
196 for f in presetupfuncs or []:
199 for f in presetupfuncs or []:
197 f(ui, obj)
200 f(ui, obj)
198 ui.log(b'extension', b'- executing reposetup hooks\n')
201 ui.log(b'extension', b'- executing reposetup hooks\n')
199 with util.timedcm('all reposetup') as allreposetupstats:
202 with util.timedcm('all reposetup') as allreposetupstats:
200 for name, module in extensions.extensions(ui):
203 for name, module in extensions.extensions(ui):
201 ui.log(b'extension', b' - running reposetup for %s\n', name)
204 ui.log(b'extension', b' - running reposetup for %s\n', name)
202 hook = getattr(module, 'reposetup', None)
205 hook = getattr(module, 'reposetup', None)
203 if hook:
206 if hook:
204 with util.timedcm('reposetup %r', name) as stats:
207 with util.timedcm('reposetup %r', name) as stats:
205 hook(ui, obj)
208 hook(ui, obj)
206 ui.log(
209 ui.log(
207 b'extension', b' > reposetup for %s took %s\n', name, stats
210 b'extension', b' > reposetup for %s took %s\n', name, stats
208 )
211 )
209 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
212 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
210 if not obj.local():
213 if not obj.local():
211 for f in wirepeersetupfuncs:
214 for f in wirepeersetupfuncs:
212 f(ui, obj)
215 f(ui, obj)
213 return obj
216 return obj
214
217
215
218
216 def repository(
219 def repository(
217 ui,
220 ui,
218 path=b'',
221 path=b'',
219 create=False,
222 create=False,
220 presetupfuncs=None,
223 presetupfuncs=None,
221 intents=None,
224 intents=None,
222 createopts=None,
225 createopts=None,
223 ):
226 ):
224 """return a repository object for the specified path"""
227 """return a repository object for the specified path"""
225 peer = _peerorrepo(
228 peer = _peerorrepo(
226 ui,
229 ui,
227 path,
230 path,
228 create,
231 create,
229 presetupfuncs=presetupfuncs,
232 presetupfuncs=presetupfuncs,
230 intents=intents,
233 intents=intents,
231 createopts=createopts,
234 createopts=createopts,
232 )
235 )
233 repo = peer.local()
236 repo = peer.local()
234 if not repo:
237 if not repo:
235 raise error.Abort(
238 raise error.Abort(
236 _(b"repository '%s' is not local") % (path or peer.url())
239 _(b"repository '%s' is not local") % (path or peer.url())
237 )
240 )
238 return repo.filtered(b'visible')
241 return repo.filtered(b'visible')
239
242
240
243
241 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
244 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
242 '''return a repository peer for the specified path'''
245 '''return a repository peer for the specified path'''
243 rui = remoteui(uiorrepo, opts)
246 rui = remoteui(uiorrepo, opts)
244 return _peerorrepo(
247 return _peerorrepo(
245 rui, path, create, intents=intents, createopts=createopts
248 rui, path, create, intents=intents, createopts=createopts
246 ).peer()
249 ).peer()
247
250
248
251
249 def defaultdest(source):
252 def defaultdest(source):
250 """return default destination of clone if none is given
253 """return default destination of clone if none is given
251
254
252 >>> defaultdest(b'foo')
255 >>> defaultdest(b'foo')
253 'foo'
256 'foo'
254 >>> defaultdest(b'/foo/bar')
257 >>> defaultdest(b'/foo/bar')
255 'bar'
258 'bar'
256 >>> defaultdest(b'/')
259 >>> defaultdest(b'/')
257 ''
260 ''
258 >>> defaultdest(b'')
261 >>> defaultdest(b'')
259 ''
262 ''
260 >>> defaultdest(b'http://example.org/')
263 >>> defaultdest(b'http://example.org/')
261 ''
264 ''
262 >>> defaultdest(b'http://example.org/foo/')
265 >>> defaultdest(b'http://example.org/foo/')
263 'foo'
266 'foo'
264 """
267 """
265 path = util.url(source).path
268 path = util.url(source).path
266 if not path:
269 if not path:
267 return b''
270 return b''
268 return os.path.basename(os.path.normpath(path))
271 return os.path.basename(os.path.normpath(path))
269
272
270
273
271 def sharedreposource(repo):
274 def sharedreposource(repo):
272 """Returns repository object for source repository of a shared repo.
275 """Returns repository object for source repository of a shared repo.
273
276
274 If repo is not a shared repository, returns None.
277 If repo is not a shared repository, returns None.
275 """
278 """
276 if repo.sharedpath == repo.path:
279 if repo.sharedpath == repo.path:
277 return None
280 return None
278
281
279 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
282 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
280 return repo.srcrepo
283 return repo.srcrepo
281
284
282 # the sharedpath always ends in the .hg; we want the path to the repo
285 # the sharedpath always ends in the .hg; we want the path to the repo
283 source = repo.vfs.split(repo.sharedpath)[0]
286 source = repo.vfs.split(repo.sharedpath)[0]
284 srcurl, branches = parseurl(source)
287 srcurl, branches = parseurl(source)
285 srcrepo = repository(repo.ui, srcurl)
288 srcrepo = repository(repo.ui, srcurl)
286 repo.srcrepo = srcrepo
289 repo.srcrepo = srcrepo
287 return srcrepo
290 return srcrepo
288
291
289
292
290 def share(
293 def share(
291 ui,
294 ui,
292 source,
295 source,
293 dest=None,
296 dest=None,
294 update=True,
297 update=True,
295 bookmarks=True,
298 bookmarks=True,
296 defaultpath=None,
299 defaultpath=None,
297 relative=False,
300 relative=False,
298 ):
301 ):
299 '''create a shared repository'''
302 '''create a shared repository'''
300
303
301 if not islocal(source):
304 if not islocal(source):
302 raise error.Abort(_(b'can only share local repositories'))
305 raise error.Abort(_(b'can only share local repositories'))
303
306
304 if not dest:
307 if not dest:
305 dest = defaultdest(source)
308 dest = defaultdest(source)
306 else:
309 else:
307 dest = ui.expandpath(dest)
310 dest = ui.expandpath(dest)
308
311
309 if isinstance(source, bytes):
312 if isinstance(source, bytes):
310 origsource = ui.expandpath(source)
313 origsource = ui.expandpath(source)
311 source, branches = parseurl(origsource)
314 source, branches = parseurl(origsource)
312 srcrepo = repository(ui, source)
315 srcrepo = repository(ui, source)
313 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
316 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
314 else:
317 else:
315 srcrepo = source.local()
318 srcrepo = source.local()
316 checkout = None
319 checkout = None
317
320
318 shareditems = set()
321 shareditems = set()
319 if bookmarks:
322 if bookmarks:
320 shareditems.add(sharedbookmarks)
323 shareditems.add(sharedbookmarks)
321
324
322 r = repository(
325 r = repository(
323 ui,
326 ui,
324 dest,
327 dest,
325 create=True,
328 create=True,
326 createopts={
329 createopts={
327 b'sharedrepo': srcrepo,
330 b'sharedrepo': srcrepo,
328 b'sharedrelative': relative,
331 b'sharedrelative': relative,
329 b'shareditems': shareditems,
332 b'shareditems': shareditems,
330 },
333 },
331 )
334 )
332
335
333 postshare(srcrepo, r, defaultpath=defaultpath)
336 postshare(srcrepo, r, defaultpath=defaultpath)
334 r = repository(ui, dest)
337 r = repository(ui, dest)
335 _postshareupdate(r, update, checkout=checkout)
338 _postshareupdate(r, update, checkout=checkout)
336 return r
339 return r
337
340
338
341
339 def _prependsourcehgrc(repo):
342 def _prependsourcehgrc(repo):
340 """copies the source repo config and prepend it in current repo .hg/hgrc
343 """copies the source repo config and prepend it in current repo .hg/hgrc
341 on unshare. This is only done if the share was perfomed using share safe
344 on unshare. This is only done if the share was perfomed using share safe
342 method where we share config of source in shares"""
345 method where we share config of source in shares"""
343 srcvfs = vfsmod.vfs(repo.sharedpath)
346 srcvfs = vfsmod.vfs(repo.sharedpath)
344 dstvfs = vfsmod.vfs(repo.path)
347 dstvfs = vfsmod.vfs(repo.path)
345
348
346 if not srcvfs.exists(b'hgrc'):
349 if not srcvfs.exists(b'hgrc'):
347 return
350 return
348
351
349 currentconfig = b''
352 currentconfig = b''
350 if dstvfs.exists(b'hgrc'):
353 if dstvfs.exists(b'hgrc'):
351 currentconfig = dstvfs.read(b'hgrc')
354 currentconfig = dstvfs.read(b'hgrc')
352
355
353 with dstvfs(b'hgrc', b'wb') as fp:
356 with dstvfs(b'hgrc', b'wb') as fp:
354 sourceconfig = srcvfs.read(b'hgrc')
357 sourceconfig = srcvfs.read(b'hgrc')
355 fp.write(b"# Config copied from shared source\n")
358 fp.write(b"# Config copied from shared source\n")
356 fp.write(sourceconfig)
359 fp.write(sourceconfig)
357 fp.write(b'\n')
360 fp.write(b'\n')
358 fp.write(currentconfig)
361 fp.write(currentconfig)
359
362
360
363
361 def unshare(ui, repo):
364 def unshare(ui, repo):
362 """convert a shared repository to a normal one
365 """convert a shared repository to a normal one
363
366
364 Copy the store data to the repo and remove the sharedpath data.
367 Copy the store data to the repo and remove the sharedpath data.
365
368
366 Returns a new repository object representing the unshared repository.
369 Returns a new repository object representing the unshared repository.
367
370
368 The passed repository object is not usable after this function is
371 The passed repository object is not usable after this function is
369 called.
372 called.
370 """
373 """
371
374
372 with repo.lock():
375 with repo.lock():
373 # we use locks here because if we race with commit, we
376 # we use locks here because if we race with commit, we
374 # can end up with extra data in the cloned revlogs that's
377 # can end up with extra data in the cloned revlogs that's
375 # not pointed to by changesets, thus causing verify to
378 # not pointed to by changesets, thus causing verify to
376 # fail
379 # fail
377 destlock = copystore(ui, repo, repo.path)
380 destlock = copystore(ui, repo, repo.path)
378 with destlock or util.nullcontextmanager():
381 with destlock or util.nullcontextmanager():
379 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
382 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
380 # we were sharing .hg/hgrc of the share source with the current
383 # we were sharing .hg/hgrc of the share source with the current
381 # repo. We need to copy that while unsharing otherwise it can
384 # repo. We need to copy that while unsharing otherwise it can
382 # disable hooks and other checks
385 # disable hooks and other checks
383 _prependsourcehgrc(repo)
386 _prependsourcehgrc(repo)
384
387
385 sharefile = repo.vfs.join(b'sharedpath')
388 sharefile = repo.vfs.join(b'sharedpath')
386 util.rename(sharefile, sharefile + b'.old')
389 util.rename(sharefile, sharefile + b'.old')
387
390
388 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
391 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
389 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
392 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
390 scmutil.writereporequirements(repo)
393 scmutil.writereporequirements(repo)
391
394
392 # Removing share changes some fundamental properties of the repo instance.
395 # Removing share changes some fundamental properties of the repo instance.
393 # So we instantiate a new repo object and operate on it rather than
396 # So we instantiate a new repo object and operate on it rather than
394 # try to keep the existing repo usable.
397 # try to keep the existing repo usable.
395 newrepo = repository(repo.baseui, repo.root, create=False)
398 newrepo = repository(repo.baseui, repo.root, create=False)
396
399
397 # TODO: figure out how to access subrepos that exist, but were previously
400 # TODO: figure out how to access subrepos that exist, but were previously
398 # removed from .hgsub
401 # removed from .hgsub
399 c = newrepo[b'.']
402 c = newrepo[b'.']
400 subs = c.substate
403 subs = c.substate
401 for s in sorted(subs):
404 for s in sorted(subs):
402 c.sub(s).unshare()
405 c.sub(s).unshare()
403
406
404 localrepo.poisonrepository(repo)
407 localrepo.poisonrepository(repo)
405
408
406 return newrepo
409 return newrepo
407
410
408
411
409 def postshare(sourcerepo, destrepo, defaultpath=None):
412 def postshare(sourcerepo, destrepo, defaultpath=None):
410 """Called after a new shared repo is created.
413 """Called after a new shared repo is created.
411
414
412 The new repo only has a requirements file and pointer to the source.
415 The new repo only has a requirements file and pointer to the source.
413 This function configures additional shared data.
416 This function configures additional shared data.
414
417
415 Extensions can wrap this function and write additional entries to
418 Extensions can wrap this function and write additional entries to
416 destrepo/.hg/shared to indicate additional pieces of data to be shared.
419 destrepo/.hg/shared to indicate additional pieces of data to be shared.
417 """
420 """
418 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
421 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
419 if default:
422 if default:
420 template = b'[paths]\ndefault = %s\n'
423 template = b'[paths]\ndefault = %s\n'
421 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
424 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
422 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
425 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
423 with destrepo.wlock():
426 with destrepo.wlock():
424 narrowspec.copytoworkingcopy(destrepo)
427 narrowspec.copytoworkingcopy(destrepo)
425
428
426
429
427 def _postshareupdate(repo, update, checkout=None):
430 def _postshareupdate(repo, update, checkout=None):
428 """Maybe perform a working directory update after a shared repo is created.
431 """Maybe perform a working directory update after a shared repo is created.
429
432
430 ``update`` can be a boolean or a revision to update to.
433 ``update`` can be a boolean or a revision to update to.
431 """
434 """
432 if not update:
435 if not update:
433 return
436 return
434
437
435 repo.ui.status(_(b"updating working directory\n"))
438 repo.ui.status(_(b"updating working directory\n"))
436 if update is not True:
439 if update is not True:
437 checkout = update
440 checkout = update
438 for test in (checkout, b'default', b'tip'):
441 for test in (checkout, b'default', b'tip'):
439 if test is None:
442 if test is None:
440 continue
443 continue
441 try:
444 try:
442 uprev = repo.lookup(test)
445 uprev = repo.lookup(test)
443 break
446 break
444 except error.RepoLookupError:
447 except error.RepoLookupError:
445 continue
448 continue
446 _update(repo, uprev)
449 _update(repo, uprev)
447
450
448
451
449 def copystore(ui, srcrepo, destpath):
452 def copystore(ui, srcrepo, destpath):
450 """copy files from store of srcrepo in destpath
453 """copy files from store of srcrepo in destpath
451
454
452 returns destlock
455 returns destlock
453 """
456 """
454 destlock = None
457 destlock = None
455 try:
458 try:
456 hardlink = None
459 hardlink = None
457 topic = _(b'linking') if hardlink else _(b'copying')
460 topic = _(b'linking') if hardlink else _(b'copying')
458 with ui.makeprogress(topic, unit=_(b'files')) as progress:
461 with ui.makeprogress(topic, unit=_(b'files')) as progress:
459 num = 0
462 num = 0
460 srcpublishing = srcrepo.publishing()
463 srcpublishing = srcrepo.publishing()
461 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
464 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
462 dstvfs = vfsmod.vfs(destpath)
465 dstvfs = vfsmod.vfs(destpath)
463 for f in srcrepo.store.copylist():
466 for f in srcrepo.store.copylist():
464 if srcpublishing and f.endswith(b'phaseroots'):
467 if srcpublishing and f.endswith(b'phaseroots'):
465 continue
468 continue
466 dstbase = os.path.dirname(f)
469 dstbase = os.path.dirname(f)
467 if dstbase and not dstvfs.exists(dstbase):
470 if dstbase and not dstvfs.exists(dstbase):
468 dstvfs.mkdir(dstbase)
471 dstvfs.mkdir(dstbase)
469 if srcvfs.exists(f):
472 if srcvfs.exists(f):
470 if f.endswith(b'data'):
473 if f.endswith(b'data'):
471 # 'dstbase' may be empty (e.g. revlog format 0)
474 # 'dstbase' may be empty (e.g. revlog format 0)
472 lockfile = os.path.join(dstbase, b"lock")
475 lockfile = os.path.join(dstbase, b"lock")
473 # lock to avoid premature writing to the target
476 # lock to avoid premature writing to the target
474 destlock = lock.lock(dstvfs, lockfile)
477 destlock = lock.lock(dstvfs, lockfile)
475 hardlink, n = util.copyfiles(
478 hardlink, n = util.copyfiles(
476 srcvfs.join(f), dstvfs.join(f), hardlink, progress
479 srcvfs.join(f), dstvfs.join(f), hardlink, progress
477 )
480 )
478 num += n
481 num += n
479 if hardlink:
482 if hardlink:
480 ui.debug(b"linked %d files\n" % num)
483 ui.debug(b"linked %d files\n" % num)
481 else:
484 else:
482 ui.debug(b"copied %d files\n" % num)
485 ui.debug(b"copied %d files\n" % num)
483 return destlock
486 return destlock
484 except: # re-raises
487 except: # re-raises
485 release(destlock)
488 release(destlock)
486 raise
489 raise
487
490
488
491
489 def clonewithshare(
492 def clonewithshare(
490 ui,
493 ui,
491 peeropts,
494 peeropts,
492 sharepath,
495 sharepath,
493 source,
496 source,
494 srcpeer,
497 srcpeer,
495 dest,
498 dest,
496 pull=False,
499 pull=False,
497 rev=None,
500 rev=None,
498 update=True,
501 update=True,
499 stream=False,
502 stream=False,
500 ):
503 ):
501 """Perform a clone using a shared repo.
504 """Perform a clone using a shared repo.
502
505
503 The store for the repository will be located at <sharepath>/.hg. The
506 The store for the repository will be located at <sharepath>/.hg. The
504 specified revisions will be cloned or pulled from "source". A shared repo
507 specified revisions will be cloned or pulled from "source". A shared repo
505 will be created at "dest" and a working copy will be created if "update" is
508 will be created at "dest" and a working copy will be created if "update" is
506 True.
509 True.
507 """
510 """
508 revs = None
511 revs = None
509 if rev:
512 if rev:
510 if not srcpeer.capable(b'lookup'):
513 if not srcpeer.capable(b'lookup'):
511 raise error.Abort(
514 raise error.Abort(
512 _(
515 _(
513 b"src repository does not support "
516 b"src repository does not support "
514 b"revision lookup and so doesn't "
517 b"revision lookup and so doesn't "
515 b"support clone by revision"
518 b"support clone by revision"
516 )
519 )
517 )
520 )
518
521
519 # TODO this is batchable.
522 # TODO this is batchable.
520 remoterevs = []
523 remoterevs = []
521 for r in rev:
524 for r in rev:
522 with srcpeer.commandexecutor() as e:
525 with srcpeer.commandexecutor() as e:
523 remoterevs.append(
526 remoterevs.append(
524 e.callcommand(
527 e.callcommand(
525 b'lookup',
528 b'lookup',
526 {
529 {
527 b'key': r,
530 b'key': r,
528 },
531 },
529 ).result()
532 ).result()
530 )
533 )
531 revs = remoterevs
534 revs = remoterevs
532
535
533 # Obtain a lock before checking for or cloning the pooled repo otherwise
536 # Obtain a lock before checking for or cloning the pooled repo otherwise
534 # 2 clients may race creating or populating it.
537 # 2 clients may race creating or populating it.
535 pooldir = os.path.dirname(sharepath)
538 pooldir = os.path.dirname(sharepath)
536 # lock class requires the directory to exist.
539 # lock class requires the directory to exist.
537 try:
540 try:
538 util.makedir(pooldir, False)
541 util.makedir(pooldir, False)
539 except OSError as e:
542 except OSError as e:
540 if e.errno != errno.EEXIST:
543 if e.errno != errno.EEXIST:
541 raise
544 raise
542
545
543 poolvfs = vfsmod.vfs(pooldir)
546 poolvfs = vfsmod.vfs(pooldir)
544 basename = os.path.basename(sharepath)
547 basename = os.path.basename(sharepath)
545
548
546 with lock.lock(poolvfs, b'%s.lock' % basename):
549 with lock.lock(poolvfs, b'%s.lock' % basename):
547 if os.path.exists(sharepath):
550 if os.path.exists(sharepath):
548 ui.status(
551 ui.status(
549 _(b'(sharing from existing pooled repository %s)\n') % basename
552 _(b'(sharing from existing pooled repository %s)\n') % basename
550 )
553 )
551 else:
554 else:
552 ui.status(
555 ui.status(
553 _(b'(sharing from new pooled repository %s)\n') % basename
556 _(b'(sharing from new pooled repository %s)\n') % basename
554 )
557 )
555 # Always use pull mode because hardlinks in share mode don't work
558 # Always use pull mode because hardlinks in share mode don't work
556 # well. Never update because working copies aren't necessary in
559 # well. Never update because working copies aren't necessary in
557 # share mode.
560 # share mode.
558 clone(
561 clone(
559 ui,
562 ui,
560 peeropts,
563 peeropts,
561 source,
564 source,
562 dest=sharepath,
565 dest=sharepath,
563 pull=True,
566 pull=True,
564 revs=rev,
567 revs=rev,
565 update=False,
568 update=False,
566 stream=stream,
569 stream=stream,
567 )
570 )
568
571
569 # Resolve the value to put in [paths] section for the source.
572 # Resolve the value to put in [paths] section for the source.
570 if islocal(source):
573 if islocal(source):
571 defaultpath = os.path.abspath(util.urllocalpath(source))
574 defaultpath = os.path.abspath(util.urllocalpath(source))
572 else:
575 else:
573 defaultpath = source
576 defaultpath = source
574
577
575 sharerepo = repository(ui, path=sharepath)
578 sharerepo = repository(ui, path=sharepath)
576 destrepo = share(
579 destrepo = share(
577 ui,
580 ui,
578 sharerepo,
581 sharerepo,
579 dest=dest,
582 dest=dest,
580 update=False,
583 update=False,
581 bookmarks=False,
584 bookmarks=False,
582 defaultpath=defaultpath,
585 defaultpath=defaultpath,
583 )
586 )
584
587
585 # We need to perform a pull against the dest repo to fetch bookmarks
588 # We need to perform a pull against the dest repo to fetch bookmarks
586 # and other non-store data that isn't shared by default. In the case of
589 # and other non-store data that isn't shared by default. In the case of
587 # non-existing shared repo, this means we pull from the remote twice. This
590 # non-existing shared repo, this means we pull from the remote twice. This
588 # is a bit weird. But at the time it was implemented, there wasn't an easy
591 # is a bit weird. But at the time it was implemented, there wasn't an easy
589 # way to pull just non-changegroup data.
592 # way to pull just non-changegroup data.
590 exchange.pull(destrepo, srcpeer, heads=revs)
593 exchange.pull(destrepo, srcpeer, heads=revs)
591
594
592 _postshareupdate(destrepo, update)
595 _postshareupdate(destrepo, update)
593
596
594 return srcpeer, peer(ui, peeropts, dest)
597 return srcpeer, peer(ui, peeropts, dest)
595
598
596
599
597 # Recomputing caches is often slow on big repos, so copy them.
600 # Recomputing caches is often slow on big repos, so copy them.
598 def _copycache(srcrepo, dstcachedir, fname):
601 def _copycache(srcrepo, dstcachedir, fname):
599 """copy a cache from srcrepo to destcachedir (if it exists)"""
602 """copy a cache from srcrepo to destcachedir (if it exists)"""
600 srcfname = srcrepo.cachevfs.join(fname)
603 srcfname = srcrepo.cachevfs.join(fname)
601 dstfname = os.path.join(dstcachedir, fname)
604 dstfname = os.path.join(dstcachedir, fname)
602 if os.path.exists(srcfname):
605 if os.path.exists(srcfname):
603 if not os.path.exists(dstcachedir):
606 if not os.path.exists(dstcachedir):
604 os.mkdir(dstcachedir)
607 os.mkdir(dstcachedir)
605 util.copyfile(srcfname, dstfname)
608 util.copyfile(srcfname, dstfname)
606
609
607
610
608 def clone(
611 def clone(
609 ui,
612 ui,
610 peeropts,
613 peeropts,
611 source,
614 source,
612 dest=None,
615 dest=None,
613 pull=False,
616 pull=False,
614 revs=None,
617 revs=None,
615 update=True,
618 update=True,
616 stream=False,
619 stream=False,
617 branch=None,
620 branch=None,
618 shareopts=None,
621 shareopts=None,
619 storeincludepats=None,
622 storeincludepats=None,
620 storeexcludepats=None,
623 storeexcludepats=None,
621 depth=None,
624 depth=None,
622 ):
625 ):
623 """Make a copy of an existing repository.
626 """Make a copy of an existing repository.
624
627
625 Create a copy of an existing repository in a new directory. The
628 Create a copy of an existing repository in a new directory. The
626 source and destination are URLs, as passed to the repository
629 source and destination are URLs, as passed to the repository
627 function. Returns a pair of repository peers, the source and
630 function. Returns a pair of repository peers, the source and
628 newly created destination.
631 newly created destination.
629
632
630 The location of the source is added to the new repository's
633 The location of the source is added to the new repository's
631 .hg/hgrc file, as the default to be used for future pulls and
634 .hg/hgrc file, as the default to be used for future pulls and
632 pushes.
635 pushes.
633
636
634 If an exception is raised, the partly cloned/updated destination
637 If an exception is raised, the partly cloned/updated destination
635 repository will be deleted.
638 repository will be deleted.
636
639
637 Arguments:
640 Arguments:
638
641
639 source: repository object or URL
642 source: repository object or URL
640
643
641 dest: URL of destination repository to create (defaults to base
644 dest: URL of destination repository to create (defaults to base
642 name of source repository)
645 name of source repository)
643
646
644 pull: always pull from source repository, even in local case or if the
647 pull: always pull from source repository, even in local case or if the
645 server prefers streaming
648 server prefers streaming
646
649
647 stream: stream raw data uncompressed from repository (fast over
650 stream: stream raw data uncompressed from repository (fast over
648 LAN, slow over WAN)
651 LAN, slow over WAN)
649
652
650 revs: revision to clone up to (implies pull=True)
653 revs: revision to clone up to (implies pull=True)
651
654
652 update: update working directory after clone completes, if
655 update: update working directory after clone completes, if
653 destination is local repository (True means update to default rev,
656 destination is local repository (True means update to default rev,
654 anything else is treated as a revision)
657 anything else is treated as a revision)
655
658
656 branch: branches to clone
659 branch: branches to clone
657
660
658 shareopts: dict of options to control auto sharing behavior. The "pool" key
661 shareopts: dict of options to control auto sharing behavior. The "pool" key
659 activates auto sharing mode and defines the directory for stores. The
662 activates auto sharing mode and defines the directory for stores. The
660 "mode" key determines how to construct the directory name of the shared
663 "mode" key determines how to construct the directory name of the shared
661 repository. "identity" means the name is derived from the node of the first
664 repository. "identity" means the name is derived from the node of the first
662 changeset in the repository. "remote" means the name is derived from the
665 changeset in the repository. "remote" means the name is derived from the
663 remote's path/URL. Defaults to "identity."
666 remote's path/URL. Defaults to "identity."
664
667
665 storeincludepats and storeexcludepats: sets of file patterns to include and
668 storeincludepats and storeexcludepats: sets of file patterns to include and
666 exclude in the repository copy, respectively. If not defined, all files
669 exclude in the repository copy, respectively. If not defined, all files
667 will be included (a "full" clone). Otherwise a "narrow" clone containing
670 will be included (a "full" clone). Otherwise a "narrow" clone containing
668 only the requested files will be performed. If ``storeincludepats`` is not
671 only the requested files will be performed. If ``storeincludepats`` is not
669 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
672 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
670 ``path:.``. If both are empty sets, no files will be cloned.
673 ``path:.``. If both are empty sets, no files will be cloned.
671 """
674 """
672
675
673 if isinstance(source, bytes):
676 if isinstance(source, bytes):
674 origsource = ui.expandpath(source)
677 origsource = ui.expandpath(source)
675 source, branches = parseurl(origsource, branch)
678 source, branches = parseurl(origsource, branch)
676 srcpeer = peer(ui, peeropts, source)
679 srcpeer = peer(ui, peeropts, source)
677 else:
680 else:
678 srcpeer = source.peer() # in case we were called with a localrepo
681 srcpeer = source.peer() # in case we were called with a localrepo
679 branches = (None, branch or [])
682 branches = (None, branch or [])
680 origsource = source = srcpeer.url()
683 origsource = source = srcpeer.url()
681 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
684 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
682
685
683 if dest is None:
686 if dest is None:
684 dest = defaultdest(source)
687 dest = defaultdest(source)
685 if dest:
688 if dest:
686 ui.status(_(b"destination directory: %s\n") % dest)
689 ui.status(_(b"destination directory: %s\n") % dest)
687 else:
690 else:
688 dest = ui.expandpath(dest)
691 dest = ui.expandpath(dest)
689
692
690 dest = util.urllocalpath(dest)
693 dest = util.urllocalpath(dest)
691 source = util.urllocalpath(source)
694 source = util.urllocalpath(source)
692
695
693 if not dest:
696 if not dest:
694 raise error.InputError(_(b"empty destination path is not valid"))
697 raise error.InputError(_(b"empty destination path is not valid"))
695
698
696 destvfs = vfsmod.vfs(dest, expandpath=True)
699 destvfs = vfsmod.vfs(dest, expandpath=True)
697 if destvfs.lexists():
700 if destvfs.lexists():
698 if not destvfs.isdir():
701 if not destvfs.isdir():
699 raise error.InputError(_(b"destination '%s' already exists") % dest)
702 raise error.InputError(_(b"destination '%s' already exists") % dest)
700 elif destvfs.listdir():
703 elif destvfs.listdir():
701 raise error.InputError(_(b"destination '%s' is not empty") % dest)
704 raise error.InputError(_(b"destination '%s' is not empty") % dest)
702
705
703 createopts = {}
706 createopts = {}
704 narrow = False
707 narrow = False
705
708
706 if storeincludepats is not None:
709 if storeincludepats is not None:
707 narrowspec.validatepatterns(storeincludepats)
710 narrowspec.validatepatterns(storeincludepats)
708 narrow = True
711 narrow = True
709
712
710 if storeexcludepats is not None:
713 if storeexcludepats is not None:
711 narrowspec.validatepatterns(storeexcludepats)
714 narrowspec.validatepatterns(storeexcludepats)
712 narrow = True
715 narrow = True
713
716
714 if narrow:
717 if narrow:
715 # Include everything by default if only exclusion patterns defined.
718 # Include everything by default if only exclusion patterns defined.
716 if storeexcludepats and not storeincludepats:
719 if storeexcludepats and not storeincludepats:
717 storeincludepats = {b'path:.'}
720 storeincludepats = {b'path:.'}
718
721
719 createopts[b'narrowfiles'] = True
722 createopts[b'narrowfiles'] = True
720
723
721 if depth:
724 if depth:
722 createopts[b'shallowfilestore'] = True
725 createopts[b'shallowfilestore'] = True
723
726
724 if srcpeer.capable(b'lfs-serve'):
727 if srcpeer.capable(b'lfs-serve'):
725 # Repository creation honors the config if it disabled the extension, so
728 # Repository creation honors the config if it disabled the extension, so
726 # we can't just announce that lfs will be enabled. This check avoids
729 # we can't just announce that lfs will be enabled. This check avoids
727 # saying that lfs will be enabled, and then saying it's an unknown
730 # saying that lfs will be enabled, and then saying it's an unknown
728 # feature. The lfs creation option is set in either case so that a
731 # feature. The lfs creation option is set in either case so that a
729 # requirement is added. If the extension is explicitly disabled but the
732 # requirement is added. If the extension is explicitly disabled but the
730 # requirement is set, the clone aborts early, before transferring any
733 # requirement is set, the clone aborts early, before transferring any
731 # data.
734 # data.
732 createopts[b'lfs'] = True
735 createopts[b'lfs'] = True
733
736
734 if extensions.disabled_help(b'lfs'):
737 if extensions.disabled_help(b'lfs'):
735 ui.status(
738 ui.status(
736 _(
739 _(
737 b'(remote is using large file support (lfs), but it is '
740 b'(remote is using large file support (lfs), but it is '
738 b'explicitly disabled in the local configuration)\n'
741 b'explicitly disabled in the local configuration)\n'
739 )
742 )
740 )
743 )
741 else:
744 else:
742 ui.status(
745 ui.status(
743 _(
746 _(
744 b'(remote is using large file support (lfs); lfs will '
747 b'(remote is using large file support (lfs); lfs will '
745 b'be enabled for this repository)\n'
748 b'be enabled for this repository)\n'
746 )
749 )
747 )
750 )
748
751
749 shareopts = shareopts or {}
752 shareopts = shareopts or {}
750 sharepool = shareopts.get(b'pool')
753 sharepool = shareopts.get(b'pool')
751 sharenamemode = shareopts.get(b'mode')
754 sharenamemode = shareopts.get(b'mode')
752 if sharepool and islocal(dest):
755 if sharepool and islocal(dest):
753 sharepath = None
756 sharepath = None
754 if sharenamemode == b'identity':
757 if sharenamemode == b'identity':
755 # Resolve the name from the initial changeset in the remote
758 # Resolve the name from the initial changeset in the remote
756 # repository. This returns nullid when the remote is empty. It
759 # repository. This returns nullid when the remote is empty. It
757 # raises RepoLookupError if revision 0 is filtered or otherwise
760 # raises RepoLookupError if revision 0 is filtered or otherwise
758 # not available. If we fail to resolve, sharing is not enabled.
761 # not available. If we fail to resolve, sharing is not enabled.
759 try:
762 try:
760 with srcpeer.commandexecutor() as e:
763 with srcpeer.commandexecutor() as e:
761 rootnode = e.callcommand(
764 rootnode = e.callcommand(
762 b'lookup',
765 b'lookup',
763 {
766 {
764 b'key': b'0',
767 b'key': b'0',
765 },
768 },
766 ).result()
769 ).result()
767
770
768 if rootnode != nullid:
771 if rootnode != nullid:
769 sharepath = os.path.join(sharepool, hex(rootnode))
772 sharepath = os.path.join(sharepool, hex(rootnode))
770 else:
773 else:
771 ui.status(
774 ui.status(
772 _(
775 _(
773 b'(not using pooled storage: '
776 b'(not using pooled storage: '
774 b'remote appears to be empty)\n'
777 b'remote appears to be empty)\n'
775 )
778 )
776 )
779 )
777 except error.RepoLookupError:
780 except error.RepoLookupError:
778 ui.status(
781 ui.status(
779 _(
782 _(
780 b'(not using pooled storage: '
783 b'(not using pooled storage: '
781 b'unable to resolve identity of remote)\n'
784 b'unable to resolve identity of remote)\n'
782 )
785 )
783 )
786 )
784 elif sharenamemode == b'remote':
787 elif sharenamemode == b'remote':
785 sharepath = os.path.join(
788 sharepath = os.path.join(
786 sharepool, hex(hashutil.sha1(source).digest())
789 sharepool, hex(hashutil.sha1(source).digest())
787 )
790 )
788 else:
791 else:
789 raise error.Abort(
792 raise error.Abort(
790 _(b'unknown share naming mode: %s') % sharenamemode
793 _(b'unknown share naming mode: %s') % sharenamemode
791 )
794 )
792
795
793 # TODO this is a somewhat arbitrary restriction.
796 # TODO this is a somewhat arbitrary restriction.
794 if narrow:
797 if narrow:
795 ui.status(_(b'(pooled storage not supported for narrow clones)\n'))
798 ui.status(_(b'(pooled storage not supported for narrow clones)\n'))
796 sharepath = None
799 sharepath = None
797
800
798 if sharepath:
801 if sharepath:
799 return clonewithshare(
802 return clonewithshare(
800 ui,
803 ui,
801 peeropts,
804 peeropts,
802 sharepath,
805 sharepath,
803 source,
806 source,
804 srcpeer,
807 srcpeer,
805 dest,
808 dest,
806 pull=pull,
809 pull=pull,
807 rev=revs,
810 rev=revs,
808 update=update,
811 update=update,
809 stream=stream,
812 stream=stream,
810 )
813 )
811
814
812 srclock = destlock = cleandir = None
815 srclock = destlock = cleandir = None
813 srcrepo = srcpeer.local()
816 srcrepo = srcpeer.local()
814 try:
817 try:
815 abspath = origsource
818 abspath = origsource
816 if islocal(origsource):
819 if islocal(origsource):
817 abspath = os.path.abspath(util.urllocalpath(origsource))
820 abspath = os.path.abspath(util.urllocalpath(origsource))
818
821
819 if islocal(dest):
822 if islocal(dest):
820 cleandir = dest
823 cleandir = dest
821
824
822 copy = False
825 copy = False
823 if (
826 if (
824 srcrepo
827 srcrepo
825 and srcrepo.cancopy()
828 and srcrepo.cancopy()
826 and islocal(dest)
829 and islocal(dest)
827 and not phases.hassecret(srcrepo)
830 and not phases.hassecret(srcrepo)
828 ):
831 ):
829 copy = not pull and not revs
832 copy = not pull and not revs
830
833
831 # TODO this is a somewhat arbitrary restriction.
834 # TODO this is a somewhat arbitrary restriction.
832 if narrow:
835 if narrow:
833 copy = False
836 copy = False
834
837
835 if copy:
838 if copy:
836 try:
839 try:
837 # we use a lock here because if we race with commit, we
840 # we use a lock here because if we race with commit, we
838 # can end up with extra data in the cloned revlogs that's
841 # can end up with extra data in the cloned revlogs that's
839 # not pointed to by changesets, thus causing verify to
842 # not pointed to by changesets, thus causing verify to
840 # fail
843 # fail
841 srclock = srcrepo.lock(wait=False)
844 srclock = srcrepo.lock(wait=False)
842 except error.LockError:
845 except error.LockError:
843 copy = False
846 copy = False
844
847
845 if copy:
848 if copy:
846 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
849 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
847 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
850 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
848 if not os.path.exists(dest):
851 if not os.path.exists(dest):
849 util.makedirs(dest)
852 util.makedirs(dest)
850 else:
853 else:
851 # only clean up directories we create ourselves
854 # only clean up directories we create ourselves
852 cleandir = hgdir
855 cleandir = hgdir
853 try:
856 try:
854 destpath = hgdir
857 destpath = hgdir
855 util.makedir(destpath, notindexed=True)
858 util.makedir(destpath, notindexed=True)
856 except OSError as inst:
859 except OSError as inst:
857 if inst.errno == errno.EEXIST:
860 if inst.errno == errno.EEXIST:
858 cleandir = None
861 cleandir = None
859 raise error.Abort(
862 raise error.Abort(
860 _(b"destination '%s' already exists") % dest
863 _(b"destination '%s' already exists") % dest
861 )
864 )
862 raise
865 raise
863
866
864 destlock = copystore(ui, srcrepo, destpath)
867 destlock = copystore(ui, srcrepo, destpath)
865 # copy bookmarks over
868 # copy bookmarks over
866 srcbookmarks = srcrepo.vfs.join(b'bookmarks')
869 srcbookmarks = srcrepo.vfs.join(b'bookmarks')
867 dstbookmarks = os.path.join(destpath, b'bookmarks')
870 dstbookmarks = os.path.join(destpath, b'bookmarks')
868 if os.path.exists(srcbookmarks):
871 if os.path.exists(srcbookmarks):
869 util.copyfile(srcbookmarks, dstbookmarks)
872 util.copyfile(srcbookmarks, dstbookmarks)
870
873
871 dstcachedir = os.path.join(destpath, b'cache')
874 dstcachedir = os.path.join(destpath, b'cache')
872 for cache in cacheutil.cachetocopy(srcrepo):
875 for cache in cacheutil.cachetocopy(srcrepo):
873 _copycache(srcrepo, dstcachedir, cache)
876 _copycache(srcrepo, dstcachedir, cache)
874
877
875 # we need to re-init the repo after manually copying the data
878 # we need to re-init the repo after manually copying the data
876 # into it
879 # into it
877 destpeer = peer(srcrepo, peeropts, dest)
880 destpeer = peer(srcrepo, peeropts, dest)
878 srcrepo.hook(b'outgoing', source=b'clone', node=nullhex)
881 srcrepo.hook(b'outgoing', source=b'clone', node=nullhex)
879 else:
882 else:
880 try:
883 try:
881 # only pass ui when no srcrepo
884 # only pass ui when no srcrepo
882 destpeer = peer(
885 destpeer = peer(
883 srcrepo or ui,
886 srcrepo or ui,
884 peeropts,
887 peeropts,
885 dest,
888 dest,
886 create=True,
889 create=True,
887 createopts=createopts,
890 createopts=createopts,
888 )
891 )
889 except OSError as inst:
892 except OSError as inst:
890 if inst.errno == errno.EEXIST:
893 if inst.errno == errno.EEXIST:
891 cleandir = None
894 cleandir = None
892 raise error.Abort(
895 raise error.Abort(
893 _(b"destination '%s' already exists") % dest
896 _(b"destination '%s' already exists") % dest
894 )
897 )
895 raise
898 raise
896
899
897 if revs:
900 if revs:
898 if not srcpeer.capable(b'lookup'):
901 if not srcpeer.capable(b'lookup'):
899 raise error.Abort(
902 raise error.Abort(
900 _(
903 _(
901 b"src repository does not support "
904 b"src repository does not support "
902 b"revision lookup and so doesn't "
905 b"revision lookup and so doesn't "
903 b"support clone by revision"
906 b"support clone by revision"
904 )
907 )
905 )
908 )
906
909
907 # TODO this is batchable.
910 # TODO this is batchable.
908 remoterevs = []
911 remoterevs = []
909 for rev in revs:
912 for rev in revs:
910 with srcpeer.commandexecutor() as e:
913 with srcpeer.commandexecutor() as e:
911 remoterevs.append(
914 remoterevs.append(
912 e.callcommand(
915 e.callcommand(
913 b'lookup',
916 b'lookup',
914 {
917 {
915 b'key': rev,
918 b'key': rev,
916 },
919 },
917 ).result()
920 ).result()
918 )
921 )
919 revs = remoterevs
922 revs = remoterevs
920
923
921 checkout = revs[0]
924 checkout = revs[0]
922 else:
925 else:
923 revs = None
926 revs = None
924 local = destpeer.local()
927 local = destpeer.local()
925 if local:
928 if local:
926 if narrow:
929 if narrow:
927 with local.wlock(), local.lock():
930 with local.wlock(), local.lock():
928 local.setnarrowpats(storeincludepats, storeexcludepats)
931 local.setnarrowpats(storeincludepats, storeexcludepats)
929 narrowspec.copytoworkingcopy(local)
932 narrowspec.copytoworkingcopy(local)
930
933
931 u = util.url(abspath)
934 u = util.url(abspath)
932 defaulturl = bytes(u)
935 defaulturl = bytes(u)
933 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
936 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
934 if not stream:
937 if not stream:
935 if pull:
938 if pull:
936 stream = False
939 stream = False
937 else:
940 else:
938 stream = None
941 stream = None
939 # internal config: ui.quietbookmarkmove
942 # internal config: ui.quietbookmarkmove
940 overrides = {(b'ui', b'quietbookmarkmove'): True}
943 overrides = {(b'ui', b'quietbookmarkmove'): True}
941 with local.ui.configoverride(overrides, b'clone'):
944 with local.ui.configoverride(overrides, b'clone'):
942 exchange.pull(
945 exchange.pull(
943 local,
946 local,
944 srcpeer,
947 srcpeer,
945 revs,
948 revs,
946 streamclonerequested=stream,
949 streamclonerequested=stream,
947 includepats=storeincludepats,
950 includepats=storeincludepats,
948 excludepats=storeexcludepats,
951 excludepats=storeexcludepats,
949 depth=depth,
952 depth=depth,
950 )
953 )
951 elif srcrepo:
954 elif srcrepo:
952 # TODO lift restriction once exchange.push() accepts narrow
955 # TODO lift restriction once exchange.push() accepts narrow
953 # push.
956 # push.
954 if narrow:
957 if narrow:
955 raise error.Abort(
958 raise error.Abort(
956 _(
959 _(
957 b'narrow clone not available for '
960 b'narrow clone not available for '
958 b'remote destinations'
961 b'remote destinations'
959 )
962 )
960 )
963 )
961
964
962 exchange.push(
965 exchange.push(
963 srcrepo,
966 srcrepo,
964 destpeer,
967 destpeer,
965 revs=revs,
968 revs=revs,
966 bookmarks=srcrepo._bookmarks.keys(),
969 bookmarks=srcrepo._bookmarks.keys(),
967 )
970 )
968 else:
971 else:
969 raise error.Abort(
972 raise error.Abort(
970 _(b"clone from remote to remote not supported")
973 _(b"clone from remote to remote not supported")
971 )
974 )
972
975
973 cleandir = None
976 cleandir = None
974
977
975 destrepo = destpeer.local()
978 destrepo = destpeer.local()
976 if destrepo:
979 if destrepo:
977 template = uimod.samplehgrcs[b'cloned']
980 template = uimod.samplehgrcs[b'cloned']
978 u = util.url(abspath)
981 u = util.url(abspath)
979 u.passwd = None
982 u.passwd = None
980 defaulturl = bytes(u)
983 defaulturl = bytes(u)
981 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
984 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
982 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
985 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
983
986
984 if ui.configbool(b'experimental', b'remotenames'):
987 if ui.configbool(b'experimental', b'remotenames'):
985 logexchange.pullremotenames(destrepo, srcpeer)
988 logexchange.pullremotenames(destrepo, srcpeer)
986
989
987 if update:
990 if update:
988 if update is not True:
991 if update is not True:
989 with srcpeer.commandexecutor() as e:
992 with srcpeer.commandexecutor() as e:
990 checkout = e.callcommand(
993 checkout = e.callcommand(
991 b'lookup',
994 b'lookup',
992 {
995 {
993 b'key': update,
996 b'key': update,
994 },
997 },
995 ).result()
998 ).result()
996
999
997 uprev = None
1000 uprev = None
998 status = None
1001 status = None
999 if checkout is not None:
1002 if checkout is not None:
1000 # Some extensions (at least hg-git and hg-subversion) have
1003 # Some extensions (at least hg-git and hg-subversion) have
1001 # a peer.lookup() implementation that returns a name instead
1004 # a peer.lookup() implementation that returns a name instead
1002 # of a nodeid. We work around it here until we've figured
1005 # of a nodeid. We work around it here until we've figured
1003 # out a better solution.
1006 # out a better solution.
1004 if len(checkout) == 20 and checkout in destrepo:
1007 if len(checkout) == 20 and checkout in destrepo:
1005 uprev = checkout
1008 uprev = checkout
1006 elif scmutil.isrevsymbol(destrepo, checkout):
1009 elif scmutil.isrevsymbol(destrepo, checkout):
1007 uprev = scmutil.revsymbol(destrepo, checkout).node()
1010 uprev = scmutil.revsymbol(destrepo, checkout).node()
1008 else:
1011 else:
1009 if update is not True:
1012 if update is not True:
1010 try:
1013 try:
1011 uprev = destrepo.lookup(update)
1014 uprev = destrepo.lookup(update)
1012 except error.RepoLookupError:
1015 except error.RepoLookupError:
1013 pass
1016 pass
1014 if uprev is None:
1017 if uprev is None:
1015 try:
1018 try:
1016 if destrepo._activebookmark:
1019 if destrepo._activebookmark:
1017 uprev = destrepo.lookup(destrepo._activebookmark)
1020 uprev = destrepo.lookup(destrepo._activebookmark)
1018 update = destrepo._activebookmark
1021 update = destrepo._activebookmark
1019 else:
1022 else:
1020 uprev = destrepo._bookmarks[b'@']
1023 uprev = destrepo._bookmarks[b'@']
1021 update = b'@'
1024 update = b'@'
1022 bn = destrepo[uprev].branch()
1025 bn = destrepo[uprev].branch()
1023 if bn == b'default':
1026 if bn == b'default':
1024 status = _(b"updating to bookmark %s\n" % update)
1027 status = _(b"updating to bookmark %s\n" % update)
1025 else:
1028 else:
1026 status = (
1029 status = (
1027 _(b"updating to bookmark %s on branch %s\n")
1030 _(b"updating to bookmark %s on branch %s\n")
1028 ) % (update, bn)
1031 ) % (update, bn)
1029 except KeyError:
1032 except KeyError:
1030 try:
1033 try:
1031 uprev = destrepo.branchtip(b'default')
1034 uprev = destrepo.branchtip(b'default')
1032 except error.RepoLookupError:
1035 except error.RepoLookupError:
1033 uprev = destrepo.lookup(b'tip')
1036 uprev = destrepo.lookup(b'tip')
1034 if not status:
1037 if not status:
1035 bn = destrepo[uprev].branch()
1038 bn = destrepo[uprev].branch()
1036 status = _(b"updating to branch %s\n") % bn
1039 status = _(b"updating to branch %s\n") % bn
1037 destrepo.ui.status(status)
1040 destrepo.ui.status(status)
1038 _update(destrepo, uprev)
1041 _update(destrepo, uprev)
1039 if update in destrepo._bookmarks:
1042 if update in destrepo._bookmarks:
1040 bookmarks.activate(destrepo, update)
1043 bookmarks.activate(destrepo, update)
1041 if destlock is not None:
1044 if destlock is not None:
1042 release(destlock)
1045 release(destlock)
1043 # here is a tiny windows were someone could end up writing the
1046 # here is a tiny windows were someone could end up writing the
1044 # repository before the cache are sure to be warm. This is "fine"
1047 # repository before the cache are sure to be warm. This is "fine"
1045 # as the only "bad" outcome would be some slowness. That potential
1048 # as the only "bad" outcome would be some slowness. That potential
1046 # slowness already affect reader.
1049 # slowness already affect reader.
1047 with destrepo.lock():
1050 with destrepo.lock():
1048 destrepo.updatecaches(full=True)
1051 destrepo.updatecaches(full=True)
1049 finally:
1052 finally:
1050 release(srclock, destlock)
1053 release(srclock, destlock)
1051 if cleandir is not None:
1054 if cleandir is not None:
1052 shutil.rmtree(cleandir, True)
1055 shutil.rmtree(cleandir, True)
1053 if srcpeer is not None:
1056 if srcpeer is not None:
1054 srcpeer.close()
1057 srcpeer.close()
1055 return srcpeer, destpeer
1058 return srcpeer, destpeer
1056
1059
1057
1060
1058 def _showstats(repo, stats, quietempty=False):
1061 def _showstats(repo, stats, quietempty=False):
1059 if quietempty and stats.isempty():
1062 if quietempty and stats.isempty():
1060 return
1063 return
1061 repo.ui.status(
1064 repo.ui.status(
1062 _(
1065 _(
1063 b"%d files updated, %d files merged, "
1066 b"%d files updated, %d files merged, "
1064 b"%d files removed, %d files unresolved\n"
1067 b"%d files removed, %d files unresolved\n"
1065 )
1068 )
1066 % (
1069 % (
1067 stats.updatedcount,
1070 stats.updatedcount,
1068 stats.mergedcount,
1071 stats.mergedcount,
1069 stats.removedcount,
1072 stats.removedcount,
1070 stats.unresolvedcount,
1073 stats.unresolvedcount,
1071 )
1074 )
1072 )
1075 )
1073
1076
1074
1077
1075 def updaterepo(repo, node, overwrite, updatecheck=None):
1078 def updaterepo(repo, node, overwrite, updatecheck=None):
1076 """Update the working directory to node.
1079 """Update the working directory to node.
1077
1080
1078 When overwrite is set, changes are clobbered, merged else
1081 When overwrite is set, changes are clobbered, merged else
1079
1082
1080 returns stats (see pydoc mercurial.merge.applyupdates)"""
1083 returns stats (see pydoc mercurial.merge.applyupdates)"""
1081 repo.ui.deprecwarn(
1084 repo.ui.deprecwarn(
1082 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1085 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1083 b'5.7',
1086 b'5.7',
1084 )
1087 )
1085 return mergemod._update(
1088 return mergemod._update(
1086 repo,
1089 repo,
1087 node,
1090 node,
1088 branchmerge=False,
1091 branchmerge=False,
1089 force=overwrite,
1092 force=overwrite,
1090 labels=[b'working copy', b'destination'],
1093 labels=[b'working copy', b'destination'],
1091 updatecheck=updatecheck,
1094 updatecheck=updatecheck,
1092 )
1095 )
1093
1096
1094
1097
1095 def update(repo, node, quietempty=False, updatecheck=None):
1098 def update(repo, node, quietempty=False, updatecheck=None):
1096 """update the working directory to node"""
1099 """update the working directory to node"""
1097 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1100 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1098 _showstats(repo, stats, quietempty)
1101 _showstats(repo, stats, quietempty)
1099 if stats.unresolvedcount:
1102 if stats.unresolvedcount:
1100 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1103 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1101 return stats.unresolvedcount > 0
1104 return stats.unresolvedcount > 0
1102
1105
1103
1106
1104 # naming conflict in clone()
1107 # naming conflict in clone()
1105 _update = update
1108 _update = update
1106
1109
1107
1110
1108 def clean(repo, node, show_stats=True, quietempty=False):
1111 def clean(repo, node, show_stats=True, quietempty=False):
1109 """forcibly switch the working directory to node, clobbering changes"""
1112 """forcibly switch the working directory to node, clobbering changes"""
1110 stats = mergemod.clean_update(repo[node])
1113 stats = mergemod.clean_update(repo[node])
1111 assert stats.unresolvedcount == 0
1114 assert stats.unresolvedcount == 0
1112 if show_stats:
1115 if show_stats:
1113 _showstats(repo, stats, quietempty)
1116 _showstats(repo, stats, quietempty)
1114
1117
1115
1118
1116 # naming conflict in updatetotally()
1119 # naming conflict in updatetotally()
1117 _clean = clean
1120 _clean = clean
1118
1121
1119 _VALID_UPDATECHECKS = {
1122 _VALID_UPDATECHECKS = {
1120 mergemod.UPDATECHECK_ABORT,
1123 mergemod.UPDATECHECK_ABORT,
1121 mergemod.UPDATECHECK_NONE,
1124 mergemod.UPDATECHECK_NONE,
1122 mergemod.UPDATECHECK_LINEAR,
1125 mergemod.UPDATECHECK_LINEAR,
1123 mergemod.UPDATECHECK_NO_CONFLICT,
1126 mergemod.UPDATECHECK_NO_CONFLICT,
1124 }
1127 }
1125
1128
1126
1129
1127 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1130 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1128 """Update the working directory with extra care for non-file components
1131 """Update the working directory with extra care for non-file components
1129
1132
1130 This takes care of non-file components below:
1133 This takes care of non-file components below:
1131
1134
1132 :bookmark: might be advanced or (in)activated
1135 :bookmark: might be advanced or (in)activated
1133
1136
1134 This takes arguments below:
1137 This takes arguments below:
1135
1138
1136 :checkout: to which revision the working directory is updated
1139 :checkout: to which revision the working directory is updated
1137 :brev: a name, which might be a bookmark to be activated after updating
1140 :brev: a name, which might be a bookmark to be activated after updating
1138 :clean: whether changes in the working directory can be discarded
1141 :clean: whether changes in the working directory can be discarded
1139 :updatecheck: how to deal with a dirty working directory
1142 :updatecheck: how to deal with a dirty working directory
1140
1143
1141 Valid values for updatecheck are the UPDATECHECK_* constants
1144 Valid values for updatecheck are the UPDATECHECK_* constants
1142 defined in the merge module. Passing `None` will result in using the
1145 defined in the merge module. Passing `None` will result in using the
1143 configured default.
1146 configured default.
1144
1147
1145 * ABORT: abort if the working directory is dirty
1148 * ABORT: abort if the working directory is dirty
1146 * NONE: don't check (merge working directory changes into destination)
1149 * NONE: don't check (merge working directory changes into destination)
1147 * LINEAR: check that update is linear before merging working directory
1150 * LINEAR: check that update is linear before merging working directory
1148 changes into destination
1151 changes into destination
1149 * NO_CONFLICT: check that the update does not result in file merges
1152 * NO_CONFLICT: check that the update does not result in file merges
1150
1153
1151 This returns whether conflict is detected at updating or not.
1154 This returns whether conflict is detected at updating or not.
1152 """
1155 """
1153 if updatecheck is None:
1156 if updatecheck is None:
1154 updatecheck = ui.config(b'commands', b'update.check')
1157 updatecheck = ui.config(b'commands', b'update.check')
1155 if updatecheck not in _VALID_UPDATECHECKS:
1158 if updatecheck not in _VALID_UPDATECHECKS:
1156 # If not configured, or invalid value configured
1159 # If not configured, or invalid value configured
1157 updatecheck = mergemod.UPDATECHECK_LINEAR
1160 updatecheck = mergemod.UPDATECHECK_LINEAR
1158 if updatecheck not in _VALID_UPDATECHECKS:
1161 if updatecheck not in _VALID_UPDATECHECKS:
1159 raise ValueError(
1162 raise ValueError(
1160 r'Invalid updatecheck value %r (can accept %r)'
1163 r'Invalid updatecheck value %r (can accept %r)'
1161 % (updatecheck, _VALID_UPDATECHECKS)
1164 % (updatecheck, _VALID_UPDATECHECKS)
1162 )
1165 )
1163 with repo.wlock():
1166 with repo.wlock():
1164 movemarkfrom = None
1167 movemarkfrom = None
1165 warndest = False
1168 warndest = False
1166 if checkout is None:
1169 if checkout is None:
1167 updata = destutil.destupdate(repo, clean=clean)
1170 updata = destutil.destupdate(repo, clean=clean)
1168 checkout, movemarkfrom, brev = updata
1171 checkout, movemarkfrom, brev = updata
1169 warndest = True
1172 warndest = True
1170
1173
1171 if clean:
1174 if clean:
1172 ret = _clean(repo, checkout)
1175 ret = _clean(repo, checkout)
1173 else:
1176 else:
1174 if updatecheck == mergemod.UPDATECHECK_ABORT:
1177 if updatecheck == mergemod.UPDATECHECK_ABORT:
1175 cmdutil.bailifchanged(repo, merge=False)
1178 cmdutil.bailifchanged(repo, merge=False)
1176 updatecheck = mergemod.UPDATECHECK_NONE
1179 updatecheck = mergemod.UPDATECHECK_NONE
1177 ret = _update(repo, checkout, updatecheck=updatecheck)
1180 ret = _update(repo, checkout, updatecheck=updatecheck)
1178
1181
1179 if not ret and movemarkfrom:
1182 if not ret and movemarkfrom:
1180 if movemarkfrom == repo[b'.'].node():
1183 if movemarkfrom == repo[b'.'].node():
1181 pass # no-op update
1184 pass # no-op update
1182 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1185 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1183 b = ui.label(repo._activebookmark, b'bookmarks.active')
1186 b = ui.label(repo._activebookmark, b'bookmarks.active')
1184 ui.status(_(b"updating bookmark %s\n") % b)
1187 ui.status(_(b"updating bookmark %s\n") % b)
1185 else:
1188 else:
1186 # this can happen with a non-linear update
1189 # this can happen with a non-linear update
1187 b = ui.label(repo._activebookmark, b'bookmarks')
1190 b = ui.label(repo._activebookmark, b'bookmarks')
1188 ui.status(_(b"(leaving bookmark %s)\n") % b)
1191 ui.status(_(b"(leaving bookmark %s)\n") % b)
1189 bookmarks.deactivate(repo)
1192 bookmarks.deactivate(repo)
1190 elif brev in repo._bookmarks:
1193 elif brev in repo._bookmarks:
1191 if brev != repo._activebookmark:
1194 if brev != repo._activebookmark:
1192 b = ui.label(brev, b'bookmarks.active')
1195 b = ui.label(brev, b'bookmarks.active')
1193 ui.status(_(b"(activating bookmark %s)\n") % b)
1196 ui.status(_(b"(activating bookmark %s)\n") % b)
1194 bookmarks.activate(repo, brev)
1197 bookmarks.activate(repo, brev)
1195 elif brev:
1198 elif brev:
1196 if repo._activebookmark:
1199 if repo._activebookmark:
1197 b = ui.label(repo._activebookmark, b'bookmarks')
1200 b = ui.label(repo._activebookmark, b'bookmarks')
1198 ui.status(_(b"(leaving bookmark %s)\n") % b)
1201 ui.status(_(b"(leaving bookmark %s)\n") % b)
1199 bookmarks.deactivate(repo)
1202 bookmarks.deactivate(repo)
1200
1203
1201 if warndest:
1204 if warndest:
1202 destutil.statusotherdests(ui, repo)
1205 destutil.statusotherdests(ui, repo)
1203
1206
1204 return ret
1207 return ret
1205
1208
1206
1209
1207 def merge(
1210 def merge(
1208 ctx,
1211 ctx,
1209 force=False,
1212 force=False,
1210 remind=True,
1213 remind=True,
1211 labels=None,
1214 labels=None,
1212 ):
1215 ):
1213 """Branch merge with node, resolving changes. Return true if any
1216 """Branch merge with node, resolving changes. Return true if any
1214 unresolved conflicts."""
1217 unresolved conflicts."""
1215 repo = ctx.repo()
1218 repo = ctx.repo()
1216 stats = mergemod.merge(ctx, force=force, labels=labels)
1219 stats = mergemod.merge(ctx, force=force, labels=labels)
1217 _showstats(repo, stats)
1220 _showstats(repo, stats)
1218 if stats.unresolvedcount:
1221 if stats.unresolvedcount:
1219 repo.ui.status(
1222 repo.ui.status(
1220 _(
1223 _(
1221 b"use 'hg resolve' to retry unresolved file merges "
1224 b"use 'hg resolve' to retry unresolved file merges "
1222 b"or 'hg merge --abort' to abandon\n"
1225 b"or 'hg merge --abort' to abandon\n"
1223 )
1226 )
1224 )
1227 )
1225 elif remind:
1228 elif remind:
1226 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1229 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1227 return stats.unresolvedcount > 0
1230 return stats.unresolvedcount > 0
1228
1231
1229
1232
1230 def abortmerge(ui, repo):
1233 def abortmerge(ui, repo):
1231 ms = mergestatemod.mergestate.read(repo)
1234 ms = mergestatemod.mergestate.read(repo)
1232 if ms.active():
1235 if ms.active():
1233 # there were conflicts
1236 # there were conflicts
1234 node = ms.localctx.hex()
1237 node = ms.localctx.hex()
1235 else:
1238 else:
1236 # there were no conficts, mergestate was not stored
1239 # there were no conficts, mergestate was not stored
1237 node = repo[b'.'].hex()
1240 node = repo[b'.'].hex()
1238
1241
1239 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1242 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1240 stats = mergemod.clean_update(repo[node])
1243 stats = mergemod.clean_update(repo[node])
1241 assert stats.unresolvedcount == 0
1244 assert stats.unresolvedcount == 0
1242 _showstats(repo, stats)
1245 _showstats(repo, stats)
1243
1246
1244
1247
1245 def _incoming(
1248 def _incoming(
1246 displaychlist, subreporecurse, ui, repo, source, opts, buffered=False
1249 displaychlist, subreporecurse, ui, repo, source, opts, buffered=False
1247 ):
1250 ):
1248 """
1251 """
1249 Helper for incoming / gincoming.
1252 Helper for incoming / gincoming.
1250 displaychlist gets called with
1253 displaychlist gets called with
1251 (remoterepo, incomingchangesetlist, displayer) parameters,
1254 (remoterepo, incomingchangesetlist, displayer) parameters,
1252 and is supposed to contain only code that can't be unified.
1255 and is supposed to contain only code that can't be unified.
1253 """
1256 """
1254 source, branches = parseurl(ui.expandpath(source), opts.get(b'branch'))
1257 source, branches = parseurl(ui.expandpath(source), opts.get(b'branch'))
1255 other = peer(repo, opts, source)
1258 other = peer(repo, opts, source)
1256 ui.status(_(b'comparing with %s\n') % util.hidepassword(source))
1259 ui.status(_(b'comparing with %s\n') % util.hidepassword(source))
1257 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1260 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1258
1261
1259 if revs:
1262 if revs:
1260 revs = [other.lookup(rev) for rev in revs]
1263 revs = [other.lookup(rev) for rev in revs]
1261 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1264 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1262 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
1265 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
1263 )
1266 )
1264 try:
1267 try:
1265 if not chlist:
1268 if not chlist:
1266 ui.status(_(b"no changes found\n"))
1269 ui.status(_(b"no changes found\n"))
1267 return subreporecurse()
1270 return subreporecurse()
1268 ui.pager(b'incoming')
1271 ui.pager(b'incoming')
1269 displayer = logcmdutil.changesetdisplayer(
1272 displayer = logcmdutil.changesetdisplayer(
1270 ui, other, opts, buffered=buffered
1273 ui, other, opts, buffered=buffered
1271 )
1274 )
1272 displaychlist(other, chlist, displayer)
1275 displaychlist(other, chlist, displayer)
1273 displayer.close()
1276 displayer.close()
1274 finally:
1277 finally:
1275 cleanupfn()
1278 cleanupfn()
1276 subreporecurse()
1279 subreporecurse()
1277 return 0 # exit code is zero since we found incoming changes
1280 return 0 # exit code is zero since we found incoming changes
1278
1281
1279
1282
1280 def incoming(ui, repo, source, opts):
1283 def incoming(ui, repo, source, opts):
1281 def subreporecurse():
1284 def subreporecurse():
1282 ret = 1
1285 ret = 1
1283 if opts.get(b'subrepos'):
1286 if opts.get(b'subrepos'):
1284 ctx = repo[None]
1287 ctx = repo[None]
1285 for subpath in sorted(ctx.substate):
1288 for subpath in sorted(ctx.substate):
1286 sub = ctx.sub(subpath)
1289 sub = ctx.sub(subpath)
1287 ret = min(ret, sub.incoming(ui, source, opts))
1290 ret = min(ret, sub.incoming(ui, source, opts))
1288 return ret
1291 return ret
1289
1292
1290 def display(other, chlist, displayer):
1293 def display(other, chlist, displayer):
1291 limit = logcmdutil.getlimit(opts)
1294 limit = logcmdutil.getlimit(opts)
1292 if opts.get(b'newest_first'):
1295 if opts.get(b'newest_first'):
1293 chlist.reverse()
1296 chlist.reverse()
1294 count = 0
1297 count = 0
1295 for n in chlist:
1298 for n in chlist:
1296 if limit is not None and count >= limit:
1299 if limit is not None and count >= limit:
1297 break
1300 break
1298 parents = [p for p in other.changelog.parents(n) if p != nullid]
1301 parents = [p for p in other.changelog.parents(n) if p != nullid]
1299 if opts.get(b'no_merges') and len(parents) == 2:
1302 if opts.get(b'no_merges') and len(parents) == 2:
1300 continue
1303 continue
1301 count += 1
1304 count += 1
1302 displayer.show(other[n])
1305 displayer.show(other[n])
1303
1306
1304 return _incoming(display, subreporecurse, ui, repo, source, opts)
1307 return _incoming(display, subreporecurse, ui, repo, source, opts)
1305
1308
1306
1309
1307 def _outgoing(ui, repo, dest, opts):
1310 def _outgoing(ui, repo, dest, opts):
1308 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1311 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1309 if not path:
1312 if not path:
1310 raise error.Abort(
1313 raise error.Abort(
1311 _(b'default repository not configured!'),
1314 _(b'default repository not configured!'),
1312 hint=_(b"see 'hg help config.paths'"),
1315 hint=_(b"see 'hg help config.paths'"),
1313 )
1316 )
1314 dest = path.pushloc or path.loc
1317 dest = path.pushloc or path.loc
1315 branches = path.branch, opts.get(b'branch') or []
1318 branches = path.branch, opts.get(b'branch') or []
1316
1319
1317 ui.status(_(b'comparing with %s\n') % util.hidepassword(dest))
1320 ui.status(_(b'comparing with %s\n') % util.hidepassword(dest))
1318 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1321 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1319 if revs:
1322 if revs:
1320 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1323 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1321
1324
1322 other = peer(repo, opts, dest)
1325 other = peer(repo, opts, dest)
1323 outgoing = discovery.findcommonoutgoing(
1326 outgoing = discovery.findcommonoutgoing(
1324 repo, other, revs, force=opts.get(b'force')
1327 repo, other, revs, force=opts.get(b'force')
1325 )
1328 )
1326 o = outgoing.missing
1329 o = outgoing.missing
1327 if not o:
1330 if not o:
1328 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1331 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1329 return o, other
1332 return o, other
1330
1333
1331
1334
1332 def outgoing(ui, repo, dest, opts):
1335 def outgoing(ui, repo, dest, opts):
1333 def recurse():
1336 def recurse():
1334 ret = 1
1337 ret = 1
1335 if opts.get(b'subrepos'):
1338 if opts.get(b'subrepos'):
1336 ctx = repo[None]
1339 ctx = repo[None]
1337 for subpath in sorted(ctx.substate):
1340 for subpath in sorted(ctx.substate):
1338 sub = ctx.sub(subpath)
1341 sub = ctx.sub(subpath)
1339 ret = min(ret, sub.outgoing(ui, dest, opts))
1342 ret = min(ret, sub.outgoing(ui, dest, opts))
1340 return ret
1343 return ret
1341
1344
1342 limit = logcmdutil.getlimit(opts)
1345 limit = logcmdutil.getlimit(opts)
1343 o, other = _outgoing(ui, repo, dest, opts)
1346 o, other = _outgoing(ui, repo, dest, opts)
1344 if not o:
1347 if not o:
1345 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1348 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1346 return recurse()
1349 return recurse()
1347
1350
1348 if opts.get(b'newest_first'):
1351 if opts.get(b'newest_first'):
1349 o.reverse()
1352 o.reverse()
1350 ui.pager(b'outgoing')
1353 ui.pager(b'outgoing')
1351 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1354 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1352 count = 0
1355 count = 0
1353 for n in o:
1356 for n in o:
1354 if limit is not None and count >= limit:
1357 if limit is not None and count >= limit:
1355 break
1358 break
1356 parents = [p for p in repo.changelog.parents(n) if p != nullid]
1359 parents = [p for p in repo.changelog.parents(n) if p != nullid]
1357 if opts.get(b'no_merges') and len(parents) == 2:
1360 if opts.get(b'no_merges') and len(parents) == 2:
1358 continue
1361 continue
1359 count += 1
1362 count += 1
1360 displayer.show(repo[n])
1363 displayer.show(repo[n])
1361 displayer.close()
1364 displayer.close()
1362 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1365 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1363 recurse()
1366 recurse()
1364 return 0 # exit code is zero since we found outgoing changes
1367 return 0 # exit code is zero since we found outgoing changes
1365
1368
1366
1369
1367 def verify(repo, level=None):
1370 def verify(repo, level=None):
1368 """verify the consistency of a repository"""
1371 """verify the consistency of a repository"""
1369 ret = verifymod.verify(repo, level=level)
1372 ret = verifymod.verify(repo, level=level)
1370
1373
1371 # Broken subrepo references in hidden csets don't seem worth worrying about,
1374 # Broken subrepo references in hidden csets don't seem worth worrying about,
1372 # since they can't be pushed/pulled, and --hidden can be used if they are a
1375 # since they can't be pushed/pulled, and --hidden can be used if they are a
1373 # concern.
1376 # concern.
1374
1377
1375 # pathto() is needed for -R case
1378 # pathto() is needed for -R case
1376 revs = repo.revs(
1379 revs = repo.revs(
1377 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1380 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1378 )
1381 )
1379
1382
1380 if revs:
1383 if revs:
1381 repo.ui.status(_(b'checking subrepo links\n'))
1384 repo.ui.status(_(b'checking subrepo links\n'))
1382 for rev in revs:
1385 for rev in revs:
1383 ctx = repo[rev]
1386 ctx = repo[rev]
1384 try:
1387 try:
1385 for subpath in ctx.substate:
1388 for subpath in ctx.substate:
1386 try:
1389 try:
1387 ret = (
1390 ret = (
1388 ctx.sub(subpath, allowcreate=False).verify() or ret
1391 ctx.sub(subpath, allowcreate=False).verify() or ret
1389 )
1392 )
1390 except error.RepoError as e:
1393 except error.RepoError as e:
1391 repo.ui.warn(b'%d: %s\n' % (rev, e))
1394 repo.ui.warn(b'%d: %s\n' % (rev, e))
1392 except Exception:
1395 except Exception:
1393 repo.ui.warn(
1396 repo.ui.warn(
1394 _(b'.hgsubstate is corrupt in revision %s\n')
1397 _(b'.hgsubstate is corrupt in revision %s\n')
1395 % short(ctx.node())
1398 % short(ctx.node())
1396 )
1399 )
1397
1400
1398 return ret
1401 return ret
1399
1402
1400
1403
1401 def remoteui(src, opts):
1404 def remoteui(src, opts):
1402 """build a remote ui from ui or repo and opts"""
1405 """build a remote ui from ui or repo and opts"""
1403 if util.safehasattr(src, b'baseui'): # looks like a repository
1406 if util.safehasattr(src, b'baseui'): # looks like a repository
1404 dst = src.baseui.copy() # drop repo-specific config
1407 dst = src.baseui.copy() # drop repo-specific config
1405 src = src.ui # copy target options from repo
1408 src = src.ui # copy target options from repo
1406 else: # assume it's a global ui object
1409 else: # assume it's a global ui object
1407 dst = src.copy() # keep all global options
1410 dst = src.copy() # keep all global options
1408
1411
1409 # copy ssh-specific options
1412 # copy ssh-specific options
1410 for o in b'ssh', b'remotecmd':
1413 for o in b'ssh', b'remotecmd':
1411 v = opts.get(o) or src.config(b'ui', o)
1414 v = opts.get(o) or src.config(b'ui', o)
1412 if v:
1415 if v:
1413 dst.setconfig(b"ui", o, v, b'copied')
1416 dst.setconfig(b"ui", o, v, b'copied')
1414
1417
1415 # copy bundle-specific options
1418 # copy bundle-specific options
1416 r = src.config(b'bundle', b'mainreporoot')
1419 r = src.config(b'bundle', b'mainreporoot')
1417 if r:
1420 if r:
1418 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1421 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1419
1422
1420 # copy selected local settings to the remote ui
1423 # copy selected local settings to the remote ui
1421 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1424 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1422 for key, val in src.configitems(sect):
1425 for key, val in src.configitems(sect):
1423 dst.setconfig(sect, key, val, b'copied')
1426 dst.setconfig(sect, key, val, b'copied')
1424 v = src.config(b'web', b'cacerts')
1427 v = src.config(b'web', b'cacerts')
1425 if v:
1428 if v:
1426 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1429 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1427
1430
1428 return dst
1431 return dst
1429
1432
1430
1433
1431 # Files of interest
1434 # Files of interest
1432 # Used to check if the repository has changed looking at mtime and size of
1435 # Used to check if the repository has changed looking at mtime and size of
1433 # these files.
1436 # these files.
1434 foi = [
1437 foi = [
1435 (b'spath', b'00changelog.i'),
1438 (b'spath', b'00changelog.i'),
1436 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1439 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1437 (b'spath', b'obsstore'),
1440 (b'spath', b'obsstore'),
1438 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1441 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1439 ]
1442 ]
1440
1443
1441
1444
1442 class cachedlocalrepo(object):
1445 class cachedlocalrepo(object):
1443 """Holds a localrepository that can be cached and reused."""
1446 """Holds a localrepository that can be cached and reused."""
1444
1447
1445 def __init__(self, repo):
1448 def __init__(self, repo):
1446 """Create a new cached repo from an existing repo.
1449 """Create a new cached repo from an existing repo.
1447
1450
1448 We assume the passed in repo was recently created. If the
1451 We assume the passed in repo was recently created. If the
1449 repo has changed between when it was created and when it was
1452 repo has changed between when it was created and when it was
1450 turned into a cache, it may not refresh properly.
1453 turned into a cache, it may not refresh properly.
1451 """
1454 """
1452 assert isinstance(repo, localrepo.localrepository)
1455 assert isinstance(repo, localrepo.localrepository)
1453 self._repo = repo
1456 self._repo = repo
1454 self._state, self.mtime = self._repostate()
1457 self._state, self.mtime = self._repostate()
1455 self._filtername = repo.filtername
1458 self._filtername = repo.filtername
1456
1459
1457 def fetch(self):
1460 def fetch(self):
1458 """Refresh (if necessary) and return a repository.
1461 """Refresh (if necessary) and return a repository.
1459
1462
1460 If the cached instance is out of date, it will be recreated
1463 If the cached instance is out of date, it will be recreated
1461 automatically and returned.
1464 automatically and returned.
1462
1465
1463 Returns a tuple of the repo and a boolean indicating whether a new
1466 Returns a tuple of the repo and a boolean indicating whether a new
1464 repo instance was created.
1467 repo instance was created.
1465 """
1468 """
1466 # We compare the mtimes and sizes of some well-known files to
1469 # We compare the mtimes and sizes of some well-known files to
1467 # determine if the repo changed. This is not precise, as mtimes
1470 # determine if the repo changed. This is not precise, as mtimes
1468 # are susceptible to clock skew and imprecise filesystems and
1471 # are susceptible to clock skew and imprecise filesystems and
1469 # file content can change while maintaining the same size.
1472 # file content can change while maintaining the same size.
1470
1473
1471 state, mtime = self._repostate()
1474 state, mtime = self._repostate()
1472 if state == self._state:
1475 if state == self._state:
1473 return self._repo, False
1476 return self._repo, False
1474
1477
1475 repo = repository(self._repo.baseui, self._repo.url())
1478 repo = repository(self._repo.baseui, self._repo.url())
1476 if self._filtername:
1479 if self._filtername:
1477 self._repo = repo.filtered(self._filtername)
1480 self._repo = repo.filtered(self._filtername)
1478 else:
1481 else:
1479 self._repo = repo.unfiltered()
1482 self._repo = repo.unfiltered()
1480 self._state = state
1483 self._state = state
1481 self.mtime = mtime
1484 self.mtime = mtime
1482
1485
1483 return self._repo, True
1486 return self._repo, True
1484
1487
1485 def _repostate(self):
1488 def _repostate(self):
1486 state = []
1489 state = []
1487 maxmtime = -1
1490 maxmtime = -1
1488 for attr, fname in foi:
1491 for attr, fname in foi:
1489 prefix = getattr(self._repo, attr)
1492 prefix = getattr(self._repo, attr)
1490 p = os.path.join(prefix, fname)
1493 p = os.path.join(prefix, fname)
1491 try:
1494 try:
1492 st = os.stat(p)
1495 st = os.stat(p)
1493 except OSError:
1496 except OSError:
1494 st = os.stat(prefix)
1497 st = os.stat(prefix)
1495 state.append((st[stat.ST_MTIME], st.st_size))
1498 state.append((st[stat.ST_MTIME], st.st_size))
1496 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1499 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1497
1500
1498 return tuple(state), maxmtime
1501 return tuple(state), maxmtime
1499
1502
1500 def copy(self):
1503 def copy(self):
1501 """Obtain a copy of this class instance.
1504 """Obtain a copy of this class instance.
1502
1505
1503 A new localrepository instance is obtained. The new instance should be
1506 A new localrepository instance is obtained. The new instance should be
1504 completely independent of the original.
1507 completely independent of the original.
1505 """
1508 """
1506 repo = repository(self._repo.baseui, self._repo.origroot)
1509 repo = repository(self._repo.baseui, self._repo.origroot)
1507 if self._filtername:
1510 if self._filtername:
1508 repo = repo.filtered(self._filtername)
1511 repo = repo.filtered(self._filtername)
1509 else:
1512 else:
1510 repo = repo.unfiltered()
1513 repo = repo.unfiltered()
1511 c = cachedlocalrepo(repo)
1514 c = cachedlocalrepo(repo)
1512 c._state = self._state
1515 c._state = self._state
1513 c.mtime = self.mtime
1516 c.mtime = self.mtime
1514 return c
1517 return c
General Comments 0
You need to be logged in to leave comments. Login now