##// END OF EJS Templates
unshare: use context manager for locks...
Martin von Zweigbergk -
r41436:bc843e25 default
parent child Browse files
Show More
@@ -1,1230 +1,1225 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13 import os
13 import os
14 import shutil
14 import shutil
15 import stat
15 import stat
16
16
17 from .i18n import _
17 from .i18n import _
18 from .node import (
18 from .node import (
19 nullid,
19 nullid,
20 )
20 )
21
21
22 from . import (
22 from . import (
23 bookmarks,
23 bookmarks,
24 bundlerepo,
24 bundlerepo,
25 cacheutil,
25 cacheutil,
26 cmdutil,
26 cmdutil,
27 destutil,
27 destutil,
28 discovery,
28 discovery,
29 error,
29 error,
30 exchange,
30 exchange,
31 extensions,
31 extensions,
32 httppeer,
32 httppeer,
33 localrepo,
33 localrepo,
34 lock,
34 lock,
35 logcmdutil,
35 logcmdutil,
36 logexchange,
36 logexchange,
37 merge as mergemod,
37 merge as mergemod,
38 narrowspec,
38 narrowspec,
39 node,
39 node,
40 phases,
40 phases,
41 repository as repositorymod,
41 repository as repositorymod,
42 scmutil,
42 scmutil,
43 sshpeer,
43 sshpeer,
44 statichttprepo,
44 statichttprepo,
45 ui as uimod,
45 ui as uimod,
46 unionrepo,
46 unionrepo,
47 url,
47 url,
48 util,
48 util,
49 verify as verifymod,
49 verify as verifymod,
50 vfs as vfsmod,
50 vfs as vfsmod,
51 )
51 )
52
52
53 release = lock.release
53 release = lock.release
54
54
55 # shared features
55 # shared features
56 sharedbookmarks = 'bookmarks'
56 sharedbookmarks = 'bookmarks'
57
57
58 def _local(path):
58 def _local(path):
59 path = util.expandpath(util.urllocalpath(path))
59 path = util.expandpath(util.urllocalpath(path))
60 return (os.path.isfile(path) and bundlerepo or localrepo)
60 return (os.path.isfile(path) and bundlerepo or localrepo)
61
61
62 def addbranchrevs(lrepo, other, branches, revs):
62 def addbranchrevs(lrepo, other, branches, revs):
63 peer = other.peer() # a courtesy to callers using a localrepo for other
63 peer = other.peer() # a courtesy to callers using a localrepo for other
64 hashbranch, branches = branches
64 hashbranch, branches = branches
65 if not hashbranch and not branches:
65 if not hashbranch and not branches:
66 x = revs or None
66 x = revs or None
67 if revs:
67 if revs:
68 y = revs[0]
68 y = revs[0]
69 else:
69 else:
70 y = None
70 y = None
71 return x, y
71 return x, y
72 if revs:
72 if revs:
73 revs = list(revs)
73 revs = list(revs)
74 else:
74 else:
75 revs = []
75 revs = []
76
76
77 if not peer.capable('branchmap'):
77 if not peer.capable('branchmap'):
78 if branches:
78 if branches:
79 raise error.Abort(_("remote branch lookup not supported"))
79 raise error.Abort(_("remote branch lookup not supported"))
80 revs.append(hashbranch)
80 revs.append(hashbranch)
81 return revs, revs[0]
81 return revs, revs[0]
82
82
83 with peer.commandexecutor() as e:
83 with peer.commandexecutor() as e:
84 branchmap = e.callcommand('branchmap', {}).result()
84 branchmap = e.callcommand('branchmap', {}).result()
85
85
86 def primary(branch):
86 def primary(branch):
87 if branch == '.':
87 if branch == '.':
88 if not lrepo:
88 if not lrepo:
89 raise error.Abort(_("dirstate branch not accessible"))
89 raise error.Abort(_("dirstate branch not accessible"))
90 branch = lrepo.dirstate.branch()
90 branch = lrepo.dirstate.branch()
91 if branch in branchmap:
91 if branch in branchmap:
92 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
92 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
93 return True
93 return True
94 else:
94 else:
95 return False
95 return False
96
96
97 for branch in branches:
97 for branch in branches:
98 if not primary(branch):
98 if not primary(branch):
99 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
99 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
100 if hashbranch:
100 if hashbranch:
101 if not primary(hashbranch):
101 if not primary(hashbranch):
102 revs.append(hashbranch)
102 revs.append(hashbranch)
103 return revs, revs[0]
103 return revs, revs[0]
104
104
105 def parseurl(path, branches=None):
105 def parseurl(path, branches=None):
106 '''parse url#branch, returning (url, (branch, branches))'''
106 '''parse url#branch, returning (url, (branch, branches))'''
107
107
108 u = util.url(path)
108 u = util.url(path)
109 branch = None
109 branch = None
110 if u.fragment:
110 if u.fragment:
111 branch = u.fragment
111 branch = u.fragment
112 u.fragment = None
112 u.fragment = None
113 return bytes(u), (branch, branches or [])
113 return bytes(u), (branch, branches or [])
114
114
115 schemes = {
115 schemes = {
116 'bundle': bundlerepo,
116 'bundle': bundlerepo,
117 'union': unionrepo,
117 'union': unionrepo,
118 'file': _local,
118 'file': _local,
119 'http': httppeer,
119 'http': httppeer,
120 'https': httppeer,
120 'https': httppeer,
121 'ssh': sshpeer,
121 'ssh': sshpeer,
122 'static-http': statichttprepo,
122 'static-http': statichttprepo,
123 }
123 }
124
124
125 def _peerlookup(path):
125 def _peerlookup(path):
126 u = util.url(path)
126 u = util.url(path)
127 scheme = u.scheme or 'file'
127 scheme = u.scheme or 'file'
128 thing = schemes.get(scheme) or schemes['file']
128 thing = schemes.get(scheme) or schemes['file']
129 try:
129 try:
130 return thing(path)
130 return thing(path)
131 except TypeError:
131 except TypeError:
132 # we can't test callable(thing) because 'thing' can be an unloaded
132 # we can't test callable(thing) because 'thing' can be an unloaded
133 # module that implements __call__
133 # module that implements __call__
134 if not util.safehasattr(thing, 'instance'):
134 if not util.safehasattr(thing, 'instance'):
135 raise
135 raise
136 return thing
136 return thing
137
137
138 def islocal(repo):
138 def islocal(repo):
139 '''return true if repo (or path pointing to repo) is local'''
139 '''return true if repo (or path pointing to repo) is local'''
140 if isinstance(repo, bytes):
140 if isinstance(repo, bytes):
141 try:
141 try:
142 return _peerlookup(repo).islocal(repo)
142 return _peerlookup(repo).islocal(repo)
143 except AttributeError:
143 except AttributeError:
144 return False
144 return False
145 return repo.local()
145 return repo.local()
146
146
147 def openpath(ui, path):
147 def openpath(ui, path):
148 '''open path with open if local, url.open if remote'''
148 '''open path with open if local, url.open if remote'''
149 pathurl = util.url(path, parsequery=False, parsefragment=False)
149 pathurl = util.url(path, parsequery=False, parsefragment=False)
150 if pathurl.islocal():
150 if pathurl.islocal():
151 return util.posixfile(pathurl.localpath(), 'rb')
151 return util.posixfile(pathurl.localpath(), 'rb')
152 else:
152 else:
153 return url.open(ui, path)
153 return url.open(ui, path)
154
154
155 # a list of (ui, repo) functions called for wire peer initialization
155 # a list of (ui, repo) functions called for wire peer initialization
156 wirepeersetupfuncs = []
156 wirepeersetupfuncs = []
157
157
158 def _peerorrepo(ui, path, create=False, presetupfuncs=None,
158 def _peerorrepo(ui, path, create=False, presetupfuncs=None,
159 intents=None, createopts=None):
159 intents=None, createopts=None):
160 """return a repository object for the specified path"""
160 """return a repository object for the specified path"""
161 obj = _peerlookup(path).instance(ui, path, create, intents=intents,
161 obj = _peerlookup(path).instance(ui, path, create, intents=intents,
162 createopts=createopts)
162 createopts=createopts)
163 ui = getattr(obj, "ui", ui)
163 ui = getattr(obj, "ui", ui)
164 for f in presetupfuncs or []:
164 for f in presetupfuncs or []:
165 f(ui, obj)
165 f(ui, obj)
166 ui.log(b'extension', b'- executing reposetup hooks\n')
166 ui.log(b'extension', b'- executing reposetup hooks\n')
167 with util.timedcm('all reposetup') as allreposetupstats:
167 with util.timedcm('all reposetup') as allreposetupstats:
168 for name, module in extensions.extensions(ui):
168 for name, module in extensions.extensions(ui):
169 ui.log(b'extension', b' - running reposetup for %s\n', name)
169 ui.log(b'extension', b' - running reposetup for %s\n', name)
170 hook = getattr(module, 'reposetup', None)
170 hook = getattr(module, 'reposetup', None)
171 if hook:
171 if hook:
172 with util.timedcm('reposetup %r', name) as stats:
172 with util.timedcm('reposetup %r', name) as stats:
173 hook(ui, obj)
173 hook(ui, obj)
174 ui.log(b'extension', b' > reposetup for %s took %s\n',
174 ui.log(b'extension', b' > reposetup for %s took %s\n',
175 name, stats)
175 name, stats)
176 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
176 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
177 if not obj.local():
177 if not obj.local():
178 for f in wirepeersetupfuncs:
178 for f in wirepeersetupfuncs:
179 f(ui, obj)
179 f(ui, obj)
180 return obj
180 return obj
181
181
182 def repository(ui, path='', create=False, presetupfuncs=None, intents=None,
182 def repository(ui, path='', create=False, presetupfuncs=None, intents=None,
183 createopts=None):
183 createopts=None):
184 """return a repository object for the specified path"""
184 """return a repository object for the specified path"""
185 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs,
185 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs,
186 intents=intents, createopts=createopts)
186 intents=intents, createopts=createopts)
187 repo = peer.local()
187 repo = peer.local()
188 if not repo:
188 if not repo:
189 raise error.Abort(_("repository '%s' is not local") %
189 raise error.Abort(_("repository '%s' is not local") %
190 (path or peer.url()))
190 (path or peer.url()))
191 return repo.filtered('visible')
191 return repo.filtered('visible')
192
192
193 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
193 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
194 '''return a repository peer for the specified path'''
194 '''return a repository peer for the specified path'''
195 rui = remoteui(uiorrepo, opts)
195 rui = remoteui(uiorrepo, opts)
196 return _peerorrepo(rui, path, create, intents=intents,
196 return _peerorrepo(rui, path, create, intents=intents,
197 createopts=createopts).peer()
197 createopts=createopts).peer()
198
198
199 def defaultdest(source):
199 def defaultdest(source):
200 '''return default destination of clone if none is given
200 '''return default destination of clone if none is given
201
201
202 >>> defaultdest(b'foo')
202 >>> defaultdest(b'foo')
203 'foo'
203 'foo'
204 >>> defaultdest(b'/foo/bar')
204 >>> defaultdest(b'/foo/bar')
205 'bar'
205 'bar'
206 >>> defaultdest(b'/')
206 >>> defaultdest(b'/')
207 ''
207 ''
208 >>> defaultdest(b'')
208 >>> defaultdest(b'')
209 ''
209 ''
210 >>> defaultdest(b'http://example.org/')
210 >>> defaultdest(b'http://example.org/')
211 ''
211 ''
212 >>> defaultdest(b'http://example.org/foo/')
212 >>> defaultdest(b'http://example.org/foo/')
213 'foo'
213 'foo'
214 '''
214 '''
215 path = util.url(source).path
215 path = util.url(source).path
216 if not path:
216 if not path:
217 return ''
217 return ''
218 return os.path.basename(os.path.normpath(path))
218 return os.path.basename(os.path.normpath(path))
219
219
220 def sharedreposource(repo):
220 def sharedreposource(repo):
221 """Returns repository object for source repository of a shared repo.
221 """Returns repository object for source repository of a shared repo.
222
222
223 If repo is not a shared repository, returns None.
223 If repo is not a shared repository, returns None.
224 """
224 """
225 if repo.sharedpath == repo.path:
225 if repo.sharedpath == repo.path:
226 return None
226 return None
227
227
228 if util.safehasattr(repo, 'srcrepo') and repo.srcrepo:
228 if util.safehasattr(repo, 'srcrepo') and repo.srcrepo:
229 return repo.srcrepo
229 return repo.srcrepo
230
230
231 # the sharedpath always ends in the .hg; we want the path to the repo
231 # the sharedpath always ends in the .hg; we want the path to the repo
232 source = repo.vfs.split(repo.sharedpath)[0]
232 source = repo.vfs.split(repo.sharedpath)[0]
233 srcurl, branches = parseurl(source)
233 srcurl, branches = parseurl(source)
234 srcrepo = repository(repo.ui, srcurl)
234 srcrepo = repository(repo.ui, srcurl)
235 repo.srcrepo = srcrepo
235 repo.srcrepo = srcrepo
236 return srcrepo
236 return srcrepo
237
237
238 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
238 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
239 relative=False):
239 relative=False):
240 '''create a shared repository'''
240 '''create a shared repository'''
241
241
242 if not islocal(source):
242 if not islocal(source):
243 raise error.Abort(_('can only share local repositories'))
243 raise error.Abort(_('can only share local repositories'))
244
244
245 if not dest:
245 if not dest:
246 dest = defaultdest(source)
246 dest = defaultdest(source)
247 else:
247 else:
248 dest = ui.expandpath(dest)
248 dest = ui.expandpath(dest)
249
249
250 if isinstance(source, bytes):
250 if isinstance(source, bytes):
251 origsource = ui.expandpath(source)
251 origsource = ui.expandpath(source)
252 source, branches = parseurl(origsource)
252 source, branches = parseurl(origsource)
253 srcrepo = repository(ui, source)
253 srcrepo = repository(ui, source)
254 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
254 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
255 else:
255 else:
256 srcrepo = source.local()
256 srcrepo = source.local()
257 checkout = None
257 checkout = None
258
258
259 shareditems = set()
259 shareditems = set()
260 if bookmarks:
260 if bookmarks:
261 shareditems.add(sharedbookmarks)
261 shareditems.add(sharedbookmarks)
262
262
263 r = repository(ui, dest, create=True, createopts={
263 r = repository(ui, dest, create=True, createopts={
264 'sharedrepo': srcrepo,
264 'sharedrepo': srcrepo,
265 'sharedrelative': relative,
265 'sharedrelative': relative,
266 'shareditems': shareditems,
266 'shareditems': shareditems,
267 })
267 })
268
268
269 postshare(srcrepo, r, defaultpath=defaultpath)
269 postshare(srcrepo, r, defaultpath=defaultpath)
270 r = repository(ui, dest)
270 r = repository(ui, dest)
271 _postshareupdate(r, update, checkout=checkout)
271 _postshareupdate(r, update, checkout=checkout)
272 return r
272 return r
273
273
274 def unshare(ui, repo):
274 def unshare(ui, repo):
275 """convert a shared repository to a normal one
275 """convert a shared repository to a normal one
276
276
277 Copy the store data to the repo and remove the sharedpath data.
277 Copy the store data to the repo and remove the sharedpath data.
278
278
279 Returns a new repository object representing the unshared repository.
279 Returns a new repository object representing the unshared repository.
280
280
281 The passed repository object is not usable after this function is
281 The passed repository object is not usable after this function is
282 called.
282 called.
283 """
283 """
284
284
285 destlock = None
285 with repo.lock():
286 lock = repo.lock()
287 try:
288 # we use locks here because if we race with commit, we
286 # we use locks here because if we race with commit, we
289 # can end up with extra data in the cloned revlogs that's
287 # can end up with extra data in the cloned revlogs that's
290 # not pointed to by changesets, thus causing verify to
288 # not pointed to by changesets, thus causing verify to
291 # fail
289 # fail
292
293 destlock = copystore(ui, repo, repo.path)
290 destlock = copystore(ui, repo, repo.path)
294
291 with destlock or util.nullcontextmanager():
295 sharefile = repo.vfs.join('sharedpath')
296 util.rename(sharefile, sharefile + '.old')
297
292
298 repo.requirements.discard('shared')
293 sharefile = repo.vfs.join('sharedpath')
299 repo.requirements.discard('relshared')
294 util.rename(sharefile, sharefile + '.old')
300 repo._writerequirements()
295
301 finally:
296 repo.requirements.discard('shared')
302 destlock and destlock.release()
297 repo.requirements.discard('relshared')
303 lock and lock.release()
298 repo._writerequirements()
304
299
305 # Removing share changes some fundamental properties of the repo instance.
300 # Removing share changes some fundamental properties of the repo instance.
306 # So we instantiate a new repo object and operate on it rather than
301 # So we instantiate a new repo object and operate on it rather than
307 # try to keep the existing repo usable.
302 # try to keep the existing repo usable.
308 newrepo = repository(repo.baseui, repo.root, create=False)
303 newrepo = repository(repo.baseui, repo.root, create=False)
309
304
310 # TODO: figure out how to access subrepos that exist, but were previously
305 # TODO: figure out how to access subrepos that exist, but were previously
311 # removed from .hgsub
306 # removed from .hgsub
312 c = newrepo['.']
307 c = newrepo['.']
313 subs = c.substate
308 subs = c.substate
314 for s in sorted(subs):
309 for s in sorted(subs):
315 c.sub(s).unshare()
310 c.sub(s).unshare()
316
311
317 localrepo.poisonrepository(repo)
312 localrepo.poisonrepository(repo)
318
313
319 return newrepo
314 return newrepo
320
315
321 def postshare(sourcerepo, destrepo, defaultpath=None):
316 def postshare(sourcerepo, destrepo, defaultpath=None):
322 """Called after a new shared repo is created.
317 """Called after a new shared repo is created.
323
318
324 The new repo only has a requirements file and pointer to the source.
319 The new repo only has a requirements file and pointer to the source.
325 This function configures additional shared data.
320 This function configures additional shared data.
326
321
327 Extensions can wrap this function and write additional entries to
322 Extensions can wrap this function and write additional entries to
328 destrepo/.hg/shared to indicate additional pieces of data to be shared.
323 destrepo/.hg/shared to indicate additional pieces of data to be shared.
329 """
324 """
330 default = defaultpath or sourcerepo.ui.config('paths', 'default')
325 default = defaultpath or sourcerepo.ui.config('paths', 'default')
331 if default:
326 if default:
332 template = ('[paths]\n'
327 template = ('[paths]\n'
333 'default = %s\n')
328 'default = %s\n')
334 destrepo.vfs.write('hgrc', util.tonativeeol(template % default))
329 destrepo.vfs.write('hgrc', util.tonativeeol(template % default))
335 if repositorymod.NARROW_REQUIREMENT in sourcerepo.requirements:
330 if repositorymod.NARROW_REQUIREMENT in sourcerepo.requirements:
336 with destrepo.wlock():
331 with destrepo.wlock():
337 narrowspec.copytoworkingcopy(destrepo)
332 narrowspec.copytoworkingcopy(destrepo)
338
333
339 def _postshareupdate(repo, update, checkout=None):
334 def _postshareupdate(repo, update, checkout=None):
340 """Maybe perform a working directory update after a shared repo is created.
335 """Maybe perform a working directory update after a shared repo is created.
341
336
342 ``update`` can be a boolean or a revision to update to.
337 ``update`` can be a boolean or a revision to update to.
343 """
338 """
344 if not update:
339 if not update:
345 return
340 return
346
341
347 repo.ui.status(_("updating working directory\n"))
342 repo.ui.status(_("updating working directory\n"))
348 if update is not True:
343 if update is not True:
349 checkout = update
344 checkout = update
350 for test in (checkout, 'default', 'tip'):
345 for test in (checkout, 'default', 'tip'):
351 if test is None:
346 if test is None:
352 continue
347 continue
353 try:
348 try:
354 uprev = repo.lookup(test)
349 uprev = repo.lookup(test)
355 break
350 break
356 except error.RepoLookupError:
351 except error.RepoLookupError:
357 continue
352 continue
358 _update(repo, uprev)
353 _update(repo, uprev)
359
354
360 def copystore(ui, srcrepo, destpath):
355 def copystore(ui, srcrepo, destpath):
361 '''copy files from store of srcrepo in destpath
356 '''copy files from store of srcrepo in destpath
362
357
363 returns destlock
358 returns destlock
364 '''
359 '''
365 destlock = None
360 destlock = None
366 try:
361 try:
367 hardlink = None
362 hardlink = None
368 topic = _('linking') if hardlink else _('copying')
363 topic = _('linking') if hardlink else _('copying')
369 with ui.makeprogress(topic, unit=_('files')) as progress:
364 with ui.makeprogress(topic, unit=_('files')) as progress:
370 num = 0
365 num = 0
371 srcpublishing = srcrepo.publishing()
366 srcpublishing = srcrepo.publishing()
372 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
367 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
373 dstvfs = vfsmod.vfs(destpath)
368 dstvfs = vfsmod.vfs(destpath)
374 for f in srcrepo.store.copylist():
369 for f in srcrepo.store.copylist():
375 if srcpublishing and f.endswith('phaseroots'):
370 if srcpublishing and f.endswith('phaseroots'):
376 continue
371 continue
377 dstbase = os.path.dirname(f)
372 dstbase = os.path.dirname(f)
378 if dstbase and not dstvfs.exists(dstbase):
373 if dstbase and not dstvfs.exists(dstbase):
379 dstvfs.mkdir(dstbase)
374 dstvfs.mkdir(dstbase)
380 if srcvfs.exists(f):
375 if srcvfs.exists(f):
381 if f.endswith('data'):
376 if f.endswith('data'):
382 # 'dstbase' may be empty (e.g. revlog format 0)
377 # 'dstbase' may be empty (e.g. revlog format 0)
383 lockfile = os.path.join(dstbase, "lock")
378 lockfile = os.path.join(dstbase, "lock")
384 # lock to avoid premature writing to the target
379 # lock to avoid premature writing to the target
385 destlock = lock.lock(dstvfs, lockfile)
380 destlock = lock.lock(dstvfs, lockfile)
386 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
381 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
387 hardlink, progress)
382 hardlink, progress)
388 num += n
383 num += n
389 if hardlink:
384 if hardlink:
390 ui.debug("linked %d files\n" % num)
385 ui.debug("linked %d files\n" % num)
391 else:
386 else:
392 ui.debug("copied %d files\n" % num)
387 ui.debug("copied %d files\n" % num)
393 return destlock
388 return destlock
394 except: # re-raises
389 except: # re-raises
395 release(destlock)
390 release(destlock)
396 raise
391 raise
397
392
398 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
393 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
399 rev=None, update=True, stream=False):
394 rev=None, update=True, stream=False):
400 """Perform a clone using a shared repo.
395 """Perform a clone using a shared repo.
401
396
402 The store for the repository will be located at <sharepath>/.hg. The
397 The store for the repository will be located at <sharepath>/.hg. The
403 specified revisions will be cloned or pulled from "source". A shared repo
398 specified revisions will be cloned or pulled from "source". A shared repo
404 will be created at "dest" and a working copy will be created if "update" is
399 will be created at "dest" and a working copy will be created if "update" is
405 True.
400 True.
406 """
401 """
407 revs = None
402 revs = None
408 if rev:
403 if rev:
409 if not srcpeer.capable('lookup'):
404 if not srcpeer.capable('lookup'):
410 raise error.Abort(_("src repository does not support "
405 raise error.Abort(_("src repository does not support "
411 "revision lookup and so doesn't "
406 "revision lookup and so doesn't "
412 "support clone by revision"))
407 "support clone by revision"))
413
408
414 # TODO this is batchable.
409 # TODO this is batchable.
415 remoterevs = []
410 remoterevs = []
416 for r in rev:
411 for r in rev:
417 with srcpeer.commandexecutor() as e:
412 with srcpeer.commandexecutor() as e:
418 remoterevs.append(e.callcommand('lookup', {
413 remoterevs.append(e.callcommand('lookup', {
419 'key': r,
414 'key': r,
420 }).result())
415 }).result())
421 revs = remoterevs
416 revs = remoterevs
422
417
423 # Obtain a lock before checking for or cloning the pooled repo otherwise
418 # Obtain a lock before checking for or cloning the pooled repo otherwise
424 # 2 clients may race creating or populating it.
419 # 2 clients may race creating or populating it.
425 pooldir = os.path.dirname(sharepath)
420 pooldir = os.path.dirname(sharepath)
426 # lock class requires the directory to exist.
421 # lock class requires the directory to exist.
427 try:
422 try:
428 util.makedir(pooldir, False)
423 util.makedir(pooldir, False)
429 except OSError as e:
424 except OSError as e:
430 if e.errno != errno.EEXIST:
425 if e.errno != errno.EEXIST:
431 raise
426 raise
432
427
433 poolvfs = vfsmod.vfs(pooldir)
428 poolvfs = vfsmod.vfs(pooldir)
434 basename = os.path.basename(sharepath)
429 basename = os.path.basename(sharepath)
435
430
436 with lock.lock(poolvfs, '%s.lock' % basename):
431 with lock.lock(poolvfs, '%s.lock' % basename):
437 if os.path.exists(sharepath):
432 if os.path.exists(sharepath):
438 ui.status(_('(sharing from existing pooled repository %s)\n') %
433 ui.status(_('(sharing from existing pooled repository %s)\n') %
439 basename)
434 basename)
440 else:
435 else:
441 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
436 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
442 # Always use pull mode because hardlinks in share mode don't work
437 # Always use pull mode because hardlinks in share mode don't work
443 # well. Never update because working copies aren't necessary in
438 # well. Never update because working copies aren't necessary in
444 # share mode.
439 # share mode.
445 clone(ui, peeropts, source, dest=sharepath, pull=True,
440 clone(ui, peeropts, source, dest=sharepath, pull=True,
446 revs=rev, update=False, stream=stream)
441 revs=rev, update=False, stream=stream)
447
442
448 # Resolve the value to put in [paths] section for the source.
443 # Resolve the value to put in [paths] section for the source.
449 if islocal(source):
444 if islocal(source):
450 defaultpath = os.path.abspath(util.urllocalpath(source))
445 defaultpath = os.path.abspath(util.urllocalpath(source))
451 else:
446 else:
452 defaultpath = source
447 defaultpath = source
453
448
454 sharerepo = repository(ui, path=sharepath)
449 sharerepo = repository(ui, path=sharepath)
455 destrepo = share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
450 destrepo = share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
456 defaultpath=defaultpath)
451 defaultpath=defaultpath)
457
452
458 # We need to perform a pull against the dest repo to fetch bookmarks
453 # We need to perform a pull against the dest repo to fetch bookmarks
459 # and other non-store data that isn't shared by default. In the case of
454 # and other non-store data that isn't shared by default. In the case of
460 # non-existing shared repo, this means we pull from the remote twice. This
455 # non-existing shared repo, this means we pull from the remote twice. This
461 # is a bit weird. But at the time it was implemented, there wasn't an easy
456 # is a bit weird. But at the time it was implemented, there wasn't an easy
462 # way to pull just non-changegroup data.
457 # way to pull just non-changegroup data.
463 exchange.pull(destrepo, srcpeer, heads=revs)
458 exchange.pull(destrepo, srcpeer, heads=revs)
464
459
465 _postshareupdate(destrepo, update)
460 _postshareupdate(destrepo, update)
466
461
467 return srcpeer, peer(ui, peeropts, dest)
462 return srcpeer, peer(ui, peeropts, dest)
468
463
469 # Recomputing branch cache might be slow on big repos,
464 # Recomputing branch cache might be slow on big repos,
470 # so just copy it
465 # so just copy it
471 def _copycache(srcrepo, dstcachedir, fname):
466 def _copycache(srcrepo, dstcachedir, fname):
472 """copy a cache from srcrepo to destcachedir (if it exists)"""
467 """copy a cache from srcrepo to destcachedir (if it exists)"""
473 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
468 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
474 dstbranchcache = os.path.join(dstcachedir, fname)
469 dstbranchcache = os.path.join(dstcachedir, fname)
475 if os.path.exists(srcbranchcache):
470 if os.path.exists(srcbranchcache):
476 if not os.path.exists(dstcachedir):
471 if not os.path.exists(dstcachedir):
477 os.mkdir(dstcachedir)
472 os.mkdir(dstcachedir)
478 util.copyfile(srcbranchcache, dstbranchcache)
473 util.copyfile(srcbranchcache, dstbranchcache)
479
474
480 def clone(ui, peeropts, source, dest=None, pull=False, revs=None,
475 def clone(ui, peeropts, source, dest=None, pull=False, revs=None,
481 update=True, stream=False, branch=None, shareopts=None,
476 update=True, stream=False, branch=None, shareopts=None,
482 storeincludepats=None, storeexcludepats=None, depth=None):
477 storeincludepats=None, storeexcludepats=None, depth=None):
483 """Make a copy of an existing repository.
478 """Make a copy of an existing repository.
484
479
485 Create a copy of an existing repository in a new directory. The
480 Create a copy of an existing repository in a new directory. The
486 source and destination are URLs, as passed to the repository
481 source and destination are URLs, as passed to the repository
487 function. Returns a pair of repository peers, the source and
482 function. Returns a pair of repository peers, the source and
488 newly created destination.
483 newly created destination.
489
484
490 The location of the source is added to the new repository's
485 The location of the source is added to the new repository's
491 .hg/hgrc file, as the default to be used for future pulls and
486 .hg/hgrc file, as the default to be used for future pulls and
492 pushes.
487 pushes.
493
488
494 If an exception is raised, the partly cloned/updated destination
489 If an exception is raised, the partly cloned/updated destination
495 repository will be deleted.
490 repository will be deleted.
496
491
497 Arguments:
492 Arguments:
498
493
499 source: repository object or URL
494 source: repository object or URL
500
495
501 dest: URL of destination repository to create (defaults to base
496 dest: URL of destination repository to create (defaults to base
502 name of source repository)
497 name of source repository)
503
498
504 pull: always pull from source repository, even in local case or if the
499 pull: always pull from source repository, even in local case or if the
505 server prefers streaming
500 server prefers streaming
506
501
507 stream: stream raw data uncompressed from repository (fast over
502 stream: stream raw data uncompressed from repository (fast over
508 LAN, slow over WAN)
503 LAN, slow over WAN)
509
504
510 revs: revision to clone up to (implies pull=True)
505 revs: revision to clone up to (implies pull=True)
511
506
512 update: update working directory after clone completes, if
507 update: update working directory after clone completes, if
513 destination is local repository (True means update to default rev,
508 destination is local repository (True means update to default rev,
514 anything else is treated as a revision)
509 anything else is treated as a revision)
515
510
516 branch: branches to clone
511 branch: branches to clone
517
512
518 shareopts: dict of options to control auto sharing behavior. The "pool" key
513 shareopts: dict of options to control auto sharing behavior. The "pool" key
519 activates auto sharing mode and defines the directory for stores. The
514 activates auto sharing mode and defines the directory for stores. The
520 "mode" key determines how to construct the directory name of the shared
515 "mode" key determines how to construct the directory name of the shared
521 repository. "identity" means the name is derived from the node of the first
516 repository. "identity" means the name is derived from the node of the first
522 changeset in the repository. "remote" means the name is derived from the
517 changeset in the repository. "remote" means the name is derived from the
523 remote's path/URL. Defaults to "identity."
518 remote's path/URL. Defaults to "identity."
524
519
525 storeincludepats and storeexcludepats: sets of file patterns to include and
520 storeincludepats and storeexcludepats: sets of file patterns to include and
526 exclude in the repository copy, respectively. If not defined, all files
521 exclude in the repository copy, respectively. If not defined, all files
527 will be included (a "full" clone). Otherwise a "narrow" clone containing
522 will be included (a "full" clone). Otherwise a "narrow" clone containing
528 only the requested files will be performed. If ``storeincludepats`` is not
523 only the requested files will be performed. If ``storeincludepats`` is not
529 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
524 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
530 ``path:.``. If both are empty sets, no files will be cloned.
525 ``path:.``. If both are empty sets, no files will be cloned.
531 """
526 """
532
527
533 if isinstance(source, bytes):
528 if isinstance(source, bytes):
534 origsource = ui.expandpath(source)
529 origsource = ui.expandpath(source)
535 source, branches = parseurl(origsource, branch)
530 source, branches = parseurl(origsource, branch)
536 srcpeer = peer(ui, peeropts, source)
531 srcpeer = peer(ui, peeropts, source)
537 else:
532 else:
538 srcpeer = source.peer() # in case we were called with a localrepo
533 srcpeer = source.peer() # in case we were called with a localrepo
539 branches = (None, branch or [])
534 branches = (None, branch or [])
540 origsource = source = srcpeer.url()
535 origsource = source = srcpeer.url()
541 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
536 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
542
537
543 if dest is None:
538 if dest is None:
544 dest = defaultdest(source)
539 dest = defaultdest(source)
545 if dest:
540 if dest:
546 ui.status(_("destination directory: %s\n") % dest)
541 ui.status(_("destination directory: %s\n") % dest)
547 else:
542 else:
548 dest = ui.expandpath(dest)
543 dest = ui.expandpath(dest)
549
544
550 dest = util.urllocalpath(dest)
545 dest = util.urllocalpath(dest)
551 source = util.urllocalpath(source)
546 source = util.urllocalpath(source)
552
547
553 if not dest:
548 if not dest:
554 raise error.Abort(_("empty destination path is not valid"))
549 raise error.Abort(_("empty destination path is not valid"))
555
550
556 destvfs = vfsmod.vfs(dest, expandpath=True)
551 destvfs = vfsmod.vfs(dest, expandpath=True)
557 if destvfs.lexists():
552 if destvfs.lexists():
558 if not destvfs.isdir():
553 if not destvfs.isdir():
559 raise error.Abort(_("destination '%s' already exists") % dest)
554 raise error.Abort(_("destination '%s' already exists") % dest)
560 elif destvfs.listdir():
555 elif destvfs.listdir():
561 raise error.Abort(_("destination '%s' is not empty") % dest)
556 raise error.Abort(_("destination '%s' is not empty") % dest)
562
557
563 createopts = {}
558 createopts = {}
564 narrow = False
559 narrow = False
565
560
566 if storeincludepats is not None:
561 if storeincludepats is not None:
567 narrowspec.validatepatterns(storeincludepats)
562 narrowspec.validatepatterns(storeincludepats)
568 narrow = True
563 narrow = True
569
564
570 if storeexcludepats is not None:
565 if storeexcludepats is not None:
571 narrowspec.validatepatterns(storeexcludepats)
566 narrowspec.validatepatterns(storeexcludepats)
572 narrow = True
567 narrow = True
573
568
574 if narrow:
569 if narrow:
575 # Include everything by default if only exclusion patterns defined.
570 # Include everything by default if only exclusion patterns defined.
576 if storeexcludepats and not storeincludepats:
571 if storeexcludepats and not storeincludepats:
577 storeincludepats = {'path:.'}
572 storeincludepats = {'path:.'}
578
573
579 createopts['narrowfiles'] = True
574 createopts['narrowfiles'] = True
580
575
581 if depth:
576 if depth:
582 createopts['shallowfilestore'] = True
577 createopts['shallowfilestore'] = True
583
578
584 if srcpeer.capable(b'lfs-serve'):
579 if srcpeer.capable(b'lfs-serve'):
585 # Repository creation honors the config if it disabled the extension, so
580 # Repository creation honors the config if it disabled the extension, so
586 # we can't just announce that lfs will be enabled. This check avoids
581 # we can't just announce that lfs will be enabled. This check avoids
587 # saying that lfs will be enabled, and then saying it's an unknown
582 # saying that lfs will be enabled, and then saying it's an unknown
588 # feature. The lfs creation option is set in either case so that a
583 # feature. The lfs creation option is set in either case so that a
589 # requirement is added. If the extension is explicitly disabled but the
584 # requirement is added. If the extension is explicitly disabled but the
590 # requirement is set, the clone aborts early, before transferring any
585 # requirement is set, the clone aborts early, before transferring any
591 # data.
586 # data.
592 createopts['lfs'] = True
587 createopts['lfs'] = True
593
588
594 if extensions.disabledext('lfs'):
589 if extensions.disabledext('lfs'):
595 ui.status(_('(remote is using large file support (lfs), but it is '
590 ui.status(_('(remote is using large file support (lfs), but it is '
596 'explicitly disabled in the local configuration)\n'))
591 'explicitly disabled in the local configuration)\n'))
597 else:
592 else:
598 ui.status(_('(remote is using large file support (lfs); lfs will '
593 ui.status(_('(remote is using large file support (lfs); lfs will '
599 'be enabled for this repository)\n'))
594 'be enabled for this repository)\n'))
600
595
601 shareopts = shareopts or {}
596 shareopts = shareopts or {}
602 sharepool = shareopts.get('pool')
597 sharepool = shareopts.get('pool')
603 sharenamemode = shareopts.get('mode')
598 sharenamemode = shareopts.get('mode')
604 if sharepool and islocal(dest):
599 if sharepool and islocal(dest):
605 sharepath = None
600 sharepath = None
606 if sharenamemode == 'identity':
601 if sharenamemode == 'identity':
607 # Resolve the name from the initial changeset in the remote
602 # Resolve the name from the initial changeset in the remote
608 # repository. This returns nullid when the remote is empty. It
603 # repository. This returns nullid when the remote is empty. It
609 # raises RepoLookupError if revision 0 is filtered or otherwise
604 # raises RepoLookupError if revision 0 is filtered or otherwise
610 # not available. If we fail to resolve, sharing is not enabled.
605 # not available. If we fail to resolve, sharing is not enabled.
611 try:
606 try:
612 with srcpeer.commandexecutor() as e:
607 with srcpeer.commandexecutor() as e:
613 rootnode = e.callcommand('lookup', {
608 rootnode = e.callcommand('lookup', {
614 'key': '0',
609 'key': '0',
615 }).result()
610 }).result()
616
611
617 if rootnode != node.nullid:
612 if rootnode != node.nullid:
618 sharepath = os.path.join(sharepool, node.hex(rootnode))
613 sharepath = os.path.join(sharepool, node.hex(rootnode))
619 else:
614 else:
620 ui.status(_('(not using pooled storage: '
615 ui.status(_('(not using pooled storage: '
621 'remote appears to be empty)\n'))
616 'remote appears to be empty)\n'))
622 except error.RepoLookupError:
617 except error.RepoLookupError:
623 ui.status(_('(not using pooled storage: '
618 ui.status(_('(not using pooled storage: '
624 'unable to resolve identity of remote)\n'))
619 'unable to resolve identity of remote)\n'))
625 elif sharenamemode == 'remote':
620 elif sharenamemode == 'remote':
626 sharepath = os.path.join(
621 sharepath = os.path.join(
627 sharepool, node.hex(hashlib.sha1(source).digest()))
622 sharepool, node.hex(hashlib.sha1(source).digest()))
628 else:
623 else:
629 raise error.Abort(_('unknown share naming mode: %s') %
624 raise error.Abort(_('unknown share naming mode: %s') %
630 sharenamemode)
625 sharenamemode)
631
626
632 # TODO this is a somewhat arbitrary restriction.
627 # TODO this is a somewhat arbitrary restriction.
633 if narrow:
628 if narrow:
634 ui.status(_('(pooled storage not supported for narrow clones)\n'))
629 ui.status(_('(pooled storage not supported for narrow clones)\n'))
635 sharepath = None
630 sharepath = None
636
631
637 if sharepath:
632 if sharepath:
638 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
633 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
639 dest, pull=pull, rev=revs, update=update,
634 dest, pull=pull, rev=revs, update=update,
640 stream=stream)
635 stream=stream)
641
636
642 srclock = destlock = cleandir = None
637 srclock = destlock = cleandir = None
643 srcrepo = srcpeer.local()
638 srcrepo = srcpeer.local()
644 try:
639 try:
645 abspath = origsource
640 abspath = origsource
646 if islocal(origsource):
641 if islocal(origsource):
647 abspath = os.path.abspath(util.urllocalpath(origsource))
642 abspath = os.path.abspath(util.urllocalpath(origsource))
648
643
649 if islocal(dest):
644 if islocal(dest):
650 cleandir = dest
645 cleandir = dest
651
646
652 copy = False
647 copy = False
653 if (srcrepo and srcrepo.cancopy() and islocal(dest)
648 if (srcrepo and srcrepo.cancopy() and islocal(dest)
654 and not phases.hassecret(srcrepo)):
649 and not phases.hassecret(srcrepo)):
655 copy = not pull and not revs
650 copy = not pull and not revs
656
651
657 # TODO this is a somewhat arbitrary restriction.
652 # TODO this is a somewhat arbitrary restriction.
658 if narrow:
653 if narrow:
659 copy = False
654 copy = False
660
655
661 if copy:
656 if copy:
662 try:
657 try:
663 # we use a lock here because if we race with commit, we
658 # we use a lock here because if we race with commit, we
664 # can end up with extra data in the cloned revlogs that's
659 # can end up with extra data in the cloned revlogs that's
665 # not pointed to by changesets, thus causing verify to
660 # not pointed to by changesets, thus causing verify to
666 # fail
661 # fail
667 srclock = srcrepo.lock(wait=False)
662 srclock = srcrepo.lock(wait=False)
668 except error.LockError:
663 except error.LockError:
669 copy = False
664 copy = False
670
665
671 if copy:
666 if copy:
672 srcrepo.hook('preoutgoing', throw=True, source='clone')
667 srcrepo.hook('preoutgoing', throw=True, source='clone')
673 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
668 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
674 if not os.path.exists(dest):
669 if not os.path.exists(dest):
675 util.makedirs(dest)
670 util.makedirs(dest)
676 else:
671 else:
677 # only clean up directories we create ourselves
672 # only clean up directories we create ourselves
678 cleandir = hgdir
673 cleandir = hgdir
679 try:
674 try:
680 destpath = hgdir
675 destpath = hgdir
681 util.makedir(destpath, notindexed=True)
676 util.makedir(destpath, notindexed=True)
682 except OSError as inst:
677 except OSError as inst:
683 if inst.errno == errno.EEXIST:
678 if inst.errno == errno.EEXIST:
684 cleandir = None
679 cleandir = None
685 raise error.Abort(_("destination '%s' already exists")
680 raise error.Abort(_("destination '%s' already exists")
686 % dest)
681 % dest)
687 raise
682 raise
688
683
689 destlock = copystore(ui, srcrepo, destpath)
684 destlock = copystore(ui, srcrepo, destpath)
690 # copy bookmarks over
685 # copy bookmarks over
691 srcbookmarks = srcrepo.vfs.join('bookmarks')
686 srcbookmarks = srcrepo.vfs.join('bookmarks')
692 dstbookmarks = os.path.join(destpath, 'bookmarks')
687 dstbookmarks = os.path.join(destpath, 'bookmarks')
693 if os.path.exists(srcbookmarks):
688 if os.path.exists(srcbookmarks):
694 util.copyfile(srcbookmarks, dstbookmarks)
689 util.copyfile(srcbookmarks, dstbookmarks)
695
690
696 dstcachedir = os.path.join(destpath, 'cache')
691 dstcachedir = os.path.join(destpath, 'cache')
697 for cache in cacheutil.cachetocopy(srcrepo):
692 for cache in cacheutil.cachetocopy(srcrepo):
698 _copycache(srcrepo, dstcachedir, cache)
693 _copycache(srcrepo, dstcachedir, cache)
699
694
700 # we need to re-init the repo after manually copying the data
695 # we need to re-init the repo after manually copying the data
701 # into it
696 # into it
702 destpeer = peer(srcrepo, peeropts, dest)
697 destpeer = peer(srcrepo, peeropts, dest)
703 srcrepo.hook('outgoing', source='clone',
698 srcrepo.hook('outgoing', source='clone',
704 node=node.hex(node.nullid))
699 node=node.hex(node.nullid))
705 else:
700 else:
706 try:
701 try:
707 # only pass ui when no srcrepo
702 # only pass ui when no srcrepo
708 destpeer = peer(srcrepo or ui, peeropts, dest, create=True,
703 destpeer = peer(srcrepo or ui, peeropts, dest, create=True,
709 createopts=createopts)
704 createopts=createopts)
710 except OSError as inst:
705 except OSError as inst:
711 if inst.errno == errno.EEXIST:
706 if inst.errno == errno.EEXIST:
712 cleandir = None
707 cleandir = None
713 raise error.Abort(_("destination '%s' already exists")
708 raise error.Abort(_("destination '%s' already exists")
714 % dest)
709 % dest)
715 raise
710 raise
716
711
717 if revs:
712 if revs:
718 if not srcpeer.capable('lookup'):
713 if not srcpeer.capable('lookup'):
719 raise error.Abort(_("src repository does not support "
714 raise error.Abort(_("src repository does not support "
720 "revision lookup and so doesn't "
715 "revision lookup and so doesn't "
721 "support clone by revision"))
716 "support clone by revision"))
722
717
723 # TODO this is batchable.
718 # TODO this is batchable.
724 remoterevs = []
719 remoterevs = []
725 for rev in revs:
720 for rev in revs:
726 with srcpeer.commandexecutor() as e:
721 with srcpeer.commandexecutor() as e:
727 remoterevs.append(e.callcommand('lookup', {
722 remoterevs.append(e.callcommand('lookup', {
728 'key': rev,
723 'key': rev,
729 }).result())
724 }).result())
730 revs = remoterevs
725 revs = remoterevs
731
726
732 checkout = revs[0]
727 checkout = revs[0]
733 else:
728 else:
734 revs = None
729 revs = None
735 local = destpeer.local()
730 local = destpeer.local()
736 if local:
731 if local:
737 if narrow:
732 if narrow:
738 with local.wlock(), local.lock():
733 with local.wlock(), local.lock():
739 local.setnarrowpats(storeincludepats, storeexcludepats)
734 local.setnarrowpats(storeincludepats, storeexcludepats)
740 narrowspec.copytoworkingcopy(local)
735 narrowspec.copytoworkingcopy(local)
741
736
742 u = util.url(abspath)
737 u = util.url(abspath)
743 defaulturl = bytes(u)
738 defaulturl = bytes(u)
744 local.ui.setconfig('paths', 'default', defaulturl, 'clone')
739 local.ui.setconfig('paths', 'default', defaulturl, 'clone')
745 if not stream:
740 if not stream:
746 if pull:
741 if pull:
747 stream = False
742 stream = False
748 else:
743 else:
749 stream = None
744 stream = None
750 # internal config: ui.quietbookmarkmove
745 # internal config: ui.quietbookmarkmove
751 overrides = {('ui', 'quietbookmarkmove'): True}
746 overrides = {('ui', 'quietbookmarkmove'): True}
752 with local.ui.configoverride(overrides, 'clone'):
747 with local.ui.configoverride(overrides, 'clone'):
753 exchange.pull(local, srcpeer, revs,
748 exchange.pull(local, srcpeer, revs,
754 streamclonerequested=stream,
749 streamclonerequested=stream,
755 includepats=storeincludepats,
750 includepats=storeincludepats,
756 excludepats=storeexcludepats,
751 excludepats=storeexcludepats,
757 depth=depth)
752 depth=depth)
758 elif srcrepo:
753 elif srcrepo:
759 # TODO lift restriction once exchange.push() accepts narrow
754 # TODO lift restriction once exchange.push() accepts narrow
760 # push.
755 # push.
761 if narrow:
756 if narrow:
762 raise error.Abort(_('narrow clone not available for '
757 raise error.Abort(_('narrow clone not available for '
763 'remote destinations'))
758 'remote destinations'))
764
759
765 exchange.push(srcrepo, destpeer, revs=revs,
760 exchange.push(srcrepo, destpeer, revs=revs,
766 bookmarks=srcrepo._bookmarks.keys())
761 bookmarks=srcrepo._bookmarks.keys())
767 else:
762 else:
768 raise error.Abort(_("clone from remote to remote not supported")
763 raise error.Abort(_("clone from remote to remote not supported")
769 )
764 )
770
765
771 cleandir = None
766 cleandir = None
772
767
773 destrepo = destpeer.local()
768 destrepo = destpeer.local()
774 if destrepo:
769 if destrepo:
775 template = uimod.samplehgrcs['cloned']
770 template = uimod.samplehgrcs['cloned']
776 u = util.url(abspath)
771 u = util.url(abspath)
777 u.passwd = None
772 u.passwd = None
778 defaulturl = bytes(u)
773 defaulturl = bytes(u)
779 destrepo.vfs.write('hgrc', util.tonativeeol(template % defaulturl))
774 destrepo.vfs.write('hgrc', util.tonativeeol(template % defaulturl))
780 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
775 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
781
776
782 if ui.configbool('experimental', 'remotenames'):
777 if ui.configbool('experimental', 'remotenames'):
783 logexchange.pullremotenames(destrepo, srcpeer)
778 logexchange.pullremotenames(destrepo, srcpeer)
784
779
785 if update:
780 if update:
786 if update is not True:
781 if update is not True:
787 with srcpeer.commandexecutor() as e:
782 with srcpeer.commandexecutor() as e:
788 checkout = e.callcommand('lookup', {
783 checkout = e.callcommand('lookup', {
789 'key': update,
784 'key': update,
790 }).result()
785 }).result()
791
786
792 uprev = None
787 uprev = None
793 status = None
788 status = None
794 if checkout is not None:
789 if checkout is not None:
795 # Some extensions (at least hg-git and hg-subversion) have
790 # Some extensions (at least hg-git and hg-subversion) have
796 # a peer.lookup() implementation that returns a name instead
791 # a peer.lookup() implementation that returns a name instead
797 # of a nodeid. We work around it here until we've figured
792 # of a nodeid. We work around it here until we've figured
798 # out a better solution.
793 # out a better solution.
799 if len(checkout) == 20 and checkout in destrepo:
794 if len(checkout) == 20 and checkout in destrepo:
800 uprev = checkout
795 uprev = checkout
801 elif scmutil.isrevsymbol(destrepo, checkout):
796 elif scmutil.isrevsymbol(destrepo, checkout):
802 uprev = scmutil.revsymbol(destrepo, checkout).node()
797 uprev = scmutil.revsymbol(destrepo, checkout).node()
803 else:
798 else:
804 if update is not True:
799 if update is not True:
805 try:
800 try:
806 uprev = destrepo.lookup(update)
801 uprev = destrepo.lookup(update)
807 except error.RepoLookupError:
802 except error.RepoLookupError:
808 pass
803 pass
809 if uprev is None:
804 if uprev is None:
810 try:
805 try:
811 uprev = destrepo._bookmarks['@']
806 uprev = destrepo._bookmarks['@']
812 update = '@'
807 update = '@'
813 bn = destrepo[uprev].branch()
808 bn = destrepo[uprev].branch()
814 if bn == 'default':
809 if bn == 'default':
815 status = _("updating to bookmark @\n")
810 status = _("updating to bookmark @\n")
816 else:
811 else:
817 status = (_("updating to bookmark @ on branch %s\n")
812 status = (_("updating to bookmark @ on branch %s\n")
818 % bn)
813 % bn)
819 except KeyError:
814 except KeyError:
820 try:
815 try:
821 uprev = destrepo.branchtip('default')
816 uprev = destrepo.branchtip('default')
822 except error.RepoLookupError:
817 except error.RepoLookupError:
823 uprev = destrepo.lookup('tip')
818 uprev = destrepo.lookup('tip')
824 if not status:
819 if not status:
825 bn = destrepo[uprev].branch()
820 bn = destrepo[uprev].branch()
826 status = _("updating to branch %s\n") % bn
821 status = _("updating to branch %s\n") % bn
827 destrepo.ui.status(status)
822 destrepo.ui.status(status)
828 _update(destrepo, uprev)
823 _update(destrepo, uprev)
829 if update in destrepo._bookmarks:
824 if update in destrepo._bookmarks:
830 bookmarks.activate(destrepo, update)
825 bookmarks.activate(destrepo, update)
831 finally:
826 finally:
832 release(srclock, destlock)
827 release(srclock, destlock)
833 if cleandir is not None:
828 if cleandir is not None:
834 shutil.rmtree(cleandir, True)
829 shutil.rmtree(cleandir, True)
835 if srcpeer is not None:
830 if srcpeer is not None:
836 srcpeer.close()
831 srcpeer.close()
837 return srcpeer, destpeer
832 return srcpeer, destpeer
838
833
839 def _showstats(repo, stats, quietempty=False):
834 def _showstats(repo, stats, quietempty=False):
840 if quietempty and stats.isempty():
835 if quietempty and stats.isempty():
841 return
836 return
842 repo.ui.status(_("%d files updated, %d files merged, "
837 repo.ui.status(_("%d files updated, %d files merged, "
843 "%d files removed, %d files unresolved\n") % (
838 "%d files removed, %d files unresolved\n") % (
844 stats.updatedcount, stats.mergedcount,
839 stats.updatedcount, stats.mergedcount,
845 stats.removedcount, stats.unresolvedcount))
840 stats.removedcount, stats.unresolvedcount))
846
841
847 def updaterepo(repo, node, overwrite, updatecheck=None):
842 def updaterepo(repo, node, overwrite, updatecheck=None):
848 """Update the working directory to node.
843 """Update the working directory to node.
849
844
850 When overwrite is set, changes are clobbered, merged else
845 When overwrite is set, changes are clobbered, merged else
851
846
852 returns stats (see pydoc mercurial.merge.applyupdates)"""
847 returns stats (see pydoc mercurial.merge.applyupdates)"""
853 return mergemod.update(repo, node, branchmerge=False, force=overwrite,
848 return mergemod.update(repo, node, branchmerge=False, force=overwrite,
854 labels=['working copy', 'destination'],
849 labels=['working copy', 'destination'],
855 updatecheck=updatecheck)
850 updatecheck=updatecheck)
856
851
857 def update(repo, node, quietempty=False, updatecheck=None):
852 def update(repo, node, quietempty=False, updatecheck=None):
858 """update the working directory to node"""
853 """update the working directory to node"""
859 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
854 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
860 _showstats(repo, stats, quietempty)
855 _showstats(repo, stats, quietempty)
861 if stats.unresolvedcount:
856 if stats.unresolvedcount:
862 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
857 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
863 return stats.unresolvedcount > 0
858 return stats.unresolvedcount > 0
864
859
865 # naming conflict in clone()
860 # naming conflict in clone()
866 _update = update
861 _update = update
867
862
868 def clean(repo, node, show_stats=True, quietempty=False):
863 def clean(repo, node, show_stats=True, quietempty=False):
869 """forcibly switch the working directory to node, clobbering changes"""
864 """forcibly switch the working directory to node, clobbering changes"""
870 stats = updaterepo(repo, node, True)
865 stats = updaterepo(repo, node, True)
871 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
866 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
872 if show_stats:
867 if show_stats:
873 _showstats(repo, stats, quietempty)
868 _showstats(repo, stats, quietempty)
874 return stats.unresolvedcount > 0
869 return stats.unresolvedcount > 0
875
870
876 # naming conflict in updatetotally()
871 # naming conflict in updatetotally()
877 _clean = clean
872 _clean = clean
878
873
879 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
874 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
880 """Update the working directory with extra care for non-file components
875 """Update the working directory with extra care for non-file components
881
876
882 This takes care of non-file components below:
877 This takes care of non-file components below:
883
878
884 :bookmark: might be advanced or (in)activated
879 :bookmark: might be advanced or (in)activated
885
880
886 This takes arguments below:
881 This takes arguments below:
887
882
888 :checkout: to which revision the working directory is updated
883 :checkout: to which revision the working directory is updated
889 :brev: a name, which might be a bookmark to be activated after updating
884 :brev: a name, which might be a bookmark to be activated after updating
890 :clean: whether changes in the working directory can be discarded
885 :clean: whether changes in the working directory can be discarded
891 :updatecheck: how to deal with a dirty working directory
886 :updatecheck: how to deal with a dirty working directory
892
887
893 Valid values for updatecheck are (None => linear):
888 Valid values for updatecheck are (None => linear):
894
889
895 * abort: abort if the working directory is dirty
890 * abort: abort if the working directory is dirty
896 * none: don't check (merge working directory changes into destination)
891 * none: don't check (merge working directory changes into destination)
897 * linear: check that update is linear before merging working directory
892 * linear: check that update is linear before merging working directory
898 changes into destination
893 changes into destination
899 * noconflict: check that the update does not result in file merges
894 * noconflict: check that the update does not result in file merges
900
895
901 This returns whether conflict is detected at updating or not.
896 This returns whether conflict is detected at updating or not.
902 """
897 """
903 if updatecheck is None:
898 if updatecheck is None:
904 updatecheck = ui.config('commands', 'update.check')
899 updatecheck = ui.config('commands', 'update.check')
905 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
900 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
906 # If not configured, or invalid value configured
901 # If not configured, or invalid value configured
907 updatecheck = 'linear'
902 updatecheck = 'linear'
908 with repo.wlock():
903 with repo.wlock():
909 movemarkfrom = None
904 movemarkfrom = None
910 warndest = False
905 warndest = False
911 if checkout is None:
906 if checkout is None:
912 updata = destutil.destupdate(repo, clean=clean)
907 updata = destutil.destupdate(repo, clean=clean)
913 checkout, movemarkfrom, brev = updata
908 checkout, movemarkfrom, brev = updata
914 warndest = True
909 warndest = True
915
910
916 if clean:
911 if clean:
917 ret = _clean(repo, checkout)
912 ret = _clean(repo, checkout)
918 else:
913 else:
919 if updatecheck == 'abort':
914 if updatecheck == 'abort':
920 cmdutil.bailifchanged(repo, merge=False)
915 cmdutil.bailifchanged(repo, merge=False)
921 updatecheck = 'none'
916 updatecheck = 'none'
922 ret = _update(repo, checkout, updatecheck=updatecheck)
917 ret = _update(repo, checkout, updatecheck=updatecheck)
923
918
924 if not ret and movemarkfrom:
919 if not ret and movemarkfrom:
925 if movemarkfrom == repo['.'].node():
920 if movemarkfrom == repo['.'].node():
926 pass # no-op update
921 pass # no-op update
927 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
922 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
928 b = ui.label(repo._activebookmark, 'bookmarks.active')
923 b = ui.label(repo._activebookmark, 'bookmarks.active')
929 ui.status(_("updating bookmark %s\n") % b)
924 ui.status(_("updating bookmark %s\n") % b)
930 else:
925 else:
931 # this can happen with a non-linear update
926 # this can happen with a non-linear update
932 b = ui.label(repo._activebookmark, 'bookmarks')
927 b = ui.label(repo._activebookmark, 'bookmarks')
933 ui.status(_("(leaving bookmark %s)\n") % b)
928 ui.status(_("(leaving bookmark %s)\n") % b)
934 bookmarks.deactivate(repo)
929 bookmarks.deactivate(repo)
935 elif brev in repo._bookmarks:
930 elif brev in repo._bookmarks:
936 if brev != repo._activebookmark:
931 if brev != repo._activebookmark:
937 b = ui.label(brev, 'bookmarks.active')
932 b = ui.label(brev, 'bookmarks.active')
938 ui.status(_("(activating bookmark %s)\n") % b)
933 ui.status(_("(activating bookmark %s)\n") % b)
939 bookmarks.activate(repo, brev)
934 bookmarks.activate(repo, brev)
940 elif brev:
935 elif brev:
941 if repo._activebookmark:
936 if repo._activebookmark:
942 b = ui.label(repo._activebookmark, 'bookmarks')
937 b = ui.label(repo._activebookmark, 'bookmarks')
943 ui.status(_("(leaving bookmark %s)\n") % b)
938 ui.status(_("(leaving bookmark %s)\n") % b)
944 bookmarks.deactivate(repo)
939 bookmarks.deactivate(repo)
945
940
946 if warndest:
941 if warndest:
947 destutil.statusotherdests(ui, repo)
942 destutil.statusotherdests(ui, repo)
948
943
949 return ret
944 return ret
950
945
951 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None,
946 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None,
952 abort=False):
947 abort=False):
953 """Branch merge with node, resolving changes. Return true if any
948 """Branch merge with node, resolving changes. Return true if any
954 unresolved conflicts."""
949 unresolved conflicts."""
955 if not abort:
950 if not abort:
956 stats = mergemod.update(repo, node, branchmerge=True, force=force,
951 stats = mergemod.update(repo, node, branchmerge=True, force=force,
957 mergeforce=mergeforce, labels=labels)
952 mergeforce=mergeforce, labels=labels)
958 else:
953 else:
959 ms = mergemod.mergestate.read(repo)
954 ms = mergemod.mergestate.read(repo)
960 if ms.active():
955 if ms.active():
961 # there were conflicts
956 # there were conflicts
962 node = ms.localctx.hex()
957 node = ms.localctx.hex()
963 else:
958 else:
964 # there were no conficts, mergestate was not stored
959 # there were no conficts, mergestate was not stored
965 node = repo['.'].hex()
960 node = repo['.'].hex()
966
961
967 repo.ui.status(_("aborting the merge, updating back to"
962 repo.ui.status(_("aborting the merge, updating back to"
968 " %s\n") % node[:12])
963 " %s\n") % node[:12])
969 stats = mergemod.update(repo, node, branchmerge=False, force=True,
964 stats = mergemod.update(repo, node, branchmerge=False, force=True,
970 labels=labels)
965 labels=labels)
971
966
972 _showstats(repo, stats)
967 _showstats(repo, stats)
973 if stats.unresolvedcount:
968 if stats.unresolvedcount:
974 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
969 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
975 "or 'hg merge --abort' to abandon\n"))
970 "or 'hg merge --abort' to abandon\n"))
976 elif remind and not abort:
971 elif remind and not abort:
977 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
972 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
978 return stats.unresolvedcount > 0
973 return stats.unresolvedcount > 0
979
974
980 def _incoming(displaychlist, subreporecurse, ui, repo, source,
975 def _incoming(displaychlist, subreporecurse, ui, repo, source,
981 opts, buffered=False):
976 opts, buffered=False):
982 """
977 """
983 Helper for incoming / gincoming.
978 Helper for incoming / gincoming.
984 displaychlist gets called with
979 displaychlist gets called with
985 (remoterepo, incomingchangesetlist, displayer) parameters,
980 (remoterepo, incomingchangesetlist, displayer) parameters,
986 and is supposed to contain only code that can't be unified.
981 and is supposed to contain only code that can't be unified.
987 """
982 """
988 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
983 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
989 other = peer(repo, opts, source)
984 other = peer(repo, opts, source)
990 ui.status(_('comparing with %s\n') % util.hidepassword(source))
985 ui.status(_('comparing with %s\n') % util.hidepassword(source))
991 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
986 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
992
987
993 if revs:
988 if revs:
994 revs = [other.lookup(rev) for rev in revs]
989 revs = [other.lookup(rev) for rev in revs]
995 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
990 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
996 revs, opts["bundle"], opts["force"])
991 revs, opts["bundle"], opts["force"])
997 try:
992 try:
998 if not chlist:
993 if not chlist:
999 ui.status(_("no changes found\n"))
994 ui.status(_("no changes found\n"))
1000 return subreporecurse()
995 return subreporecurse()
1001 ui.pager('incoming')
996 ui.pager('incoming')
1002 displayer = logcmdutil.changesetdisplayer(ui, other, opts,
997 displayer = logcmdutil.changesetdisplayer(ui, other, opts,
1003 buffered=buffered)
998 buffered=buffered)
1004 displaychlist(other, chlist, displayer)
999 displaychlist(other, chlist, displayer)
1005 displayer.close()
1000 displayer.close()
1006 finally:
1001 finally:
1007 cleanupfn()
1002 cleanupfn()
1008 subreporecurse()
1003 subreporecurse()
1009 return 0 # exit code is zero since we found incoming changes
1004 return 0 # exit code is zero since we found incoming changes
1010
1005
1011 def incoming(ui, repo, source, opts):
1006 def incoming(ui, repo, source, opts):
1012 def subreporecurse():
1007 def subreporecurse():
1013 ret = 1
1008 ret = 1
1014 if opts.get('subrepos'):
1009 if opts.get('subrepos'):
1015 ctx = repo[None]
1010 ctx = repo[None]
1016 for subpath in sorted(ctx.substate):
1011 for subpath in sorted(ctx.substate):
1017 sub = ctx.sub(subpath)
1012 sub = ctx.sub(subpath)
1018 ret = min(ret, sub.incoming(ui, source, opts))
1013 ret = min(ret, sub.incoming(ui, source, opts))
1019 return ret
1014 return ret
1020
1015
1021 def display(other, chlist, displayer):
1016 def display(other, chlist, displayer):
1022 limit = logcmdutil.getlimit(opts)
1017 limit = logcmdutil.getlimit(opts)
1023 if opts.get('newest_first'):
1018 if opts.get('newest_first'):
1024 chlist.reverse()
1019 chlist.reverse()
1025 count = 0
1020 count = 0
1026 for n in chlist:
1021 for n in chlist:
1027 if limit is not None and count >= limit:
1022 if limit is not None and count >= limit:
1028 break
1023 break
1029 parents = [p for p in other.changelog.parents(n) if p != nullid]
1024 parents = [p for p in other.changelog.parents(n) if p != nullid]
1030 if opts.get('no_merges') and len(parents) == 2:
1025 if opts.get('no_merges') and len(parents) == 2:
1031 continue
1026 continue
1032 count += 1
1027 count += 1
1033 displayer.show(other[n])
1028 displayer.show(other[n])
1034 return _incoming(display, subreporecurse, ui, repo, source, opts)
1029 return _incoming(display, subreporecurse, ui, repo, source, opts)
1035
1030
1036 def _outgoing(ui, repo, dest, opts):
1031 def _outgoing(ui, repo, dest, opts):
1037 path = ui.paths.getpath(dest, default=('default-push', 'default'))
1032 path = ui.paths.getpath(dest, default=('default-push', 'default'))
1038 if not path:
1033 if not path:
1039 raise error.Abort(_('default repository not configured!'),
1034 raise error.Abort(_('default repository not configured!'),
1040 hint=_("see 'hg help config.paths'"))
1035 hint=_("see 'hg help config.paths'"))
1041 dest = path.pushloc or path.loc
1036 dest = path.pushloc or path.loc
1042 branches = path.branch, opts.get('branch') or []
1037 branches = path.branch, opts.get('branch') or []
1043
1038
1044 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
1039 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
1045 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
1040 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
1046 if revs:
1041 if revs:
1047 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1042 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1048
1043
1049 other = peer(repo, opts, dest)
1044 other = peer(repo, opts, dest)
1050 outgoing = discovery.findcommonoutgoing(repo, other, revs,
1045 outgoing = discovery.findcommonoutgoing(repo, other, revs,
1051 force=opts.get('force'))
1046 force=opts.get('force'))
1052 o = outgoing.missing
1047 o = outgoing.missing
1053 if not o:
1048 if not o:
1054 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1049 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1055 return o, other
1050 return o, other
1056
1051
1057 def outgoing(ui, repo, dest, opts):
1052 def outgoing(ui, repo, dest, opts):
1058 def recurse():
1053 def recurse():
1059 ret = 1
1054 ret = 1
1060 if opts.get('subrepos'):
1055 if opts.get('subrepos'):
1061 ctx = repo[None]
1056 ctx = repo[None]
1062 for subpath in sorted(ctx.substate):
1057 for subpath in sorted(ctx.substate):
1063 sub = ctx.sub(subpath)
1058 sub = ctx.sub(subpath)
1064 ret = min(ret, sub.outgoing(ui, dest, opts))
1059 ret = min(ret, sub.outgoing(ui, dest, opts))
1065 return ret
1060 return ret
1066
1061
1067 limit = logcmdutil.getlimit(opts)
1062 limit = logcmdutil.getlimit(opts)
1068 o, other = _outgoing(ui, repo, dest, opts)
1063 o, other = _outgoing(ui, repo, dest, opts)
1069 if not o:
1064 if not o:
1070 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1065 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1071 return recurse()
1066 return recurse()
1072
1067
1073 if opts.get('newest_first'):
1068 if opts.get('newest_first'):
1074 o.reverse()
1069 o.reverse()
1075 ui.pager('outgoing')
1070 ui.pager('outgoing')
1076 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1071 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1077 count = 0
1072 count = 0
1078 for n in o:
1073 for n in o:
1079 if limit is not None and count >= limit:
1074 if limit is not None and count >= limit:
1080 break
1075 break
1081 parents = [p for p in repo.changelog.parents(n) if p != nullid]
1076 parents = [p for p in repo.changelog.parents(n) if p != nullid]
1082 if opts.get('no_merges') and len(parents) == 2:
1077 if opts.get('no_merges') and len(parents) == 2:
1083 continue
1078 continue
1084 count += 1
1079 count += 1
1085 displayer.show(repo[n])
1080 displayer.show(repo[n])
1086 displayer.close()
1081 displayer.close()
1087 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1082 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1088 recurse()
1083 recurse()
1089 return 0 # exit code is zero since we found outgoing changes
1084 return 0 # exit code is zero since we found outgoing changes
1090
1085
1091 def verify(repo):
1086 def verify(repo):
1092 """verify the consistency of a repository"""
1087 """verify the consistency of a repository"""
1093 ret = verifymod.verify(repo)
1088 ret = verifymod.verify(repo)
1094
1089
1095 # Broken subrepo references in hidden csets don't seem worth worrying about,
1090 # Broken subrepo references in hidden csets don't seem worth worrying about,
1096 # since they can't be pushed/pulled, and --hidden can be used if they are a
1091 # since they can't be pushed/pulled, and --hidden can be used if they are a
1097 # concern.
1092 # concern.
1098
1093
1099 # pathto() is needed for -R case
1094 # pathto() is needed for -R case
1100 revs = repo.revs("filelog(%s)",
1095 revs = repo.revs("filelog(%s)",
1101 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
1096 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
1102
1097
1103 if revs:
1098 if revs:
1104 repo.ui.status(_('checking subrepo links\n'))
1099 repo.ui.status(_('checking subrepo links\n'))
1105 for rev in revs:
1100 for rev in revs:
1106 ctx = repo[rev]
1101 ctx = repo[rev]
1107 try:
1102 try:
1108 for subpath in ctx.substate:
1103 for subpath in ctx.substate:
1109 try:
1104 try:
1110 ret = (ctx.sub(subpath, allowcreate=False).verify()
1105 ret = (ctx.sub(subpath, allowcreate=False).verify()
1111 or ret)
1106 or ret)
1112 except error.RepoError as e:
1107 except error.RepoError as e:
1113 repo.ui.warn(('%d: %s\n') % (rev, e))
1108 repo.ui.warn(('%d: %s\n') % (rev, e))
1114 except Exception:
1109 except Exception:
1115 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
1110 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
1116 node.short(ctx.node()))
1111 node.short(ctx.node()))
1117
1112
1118 return ret
1113 return ret
1119
1114
1120 def remoteui(src, opts):
1115 def remoteui(src, opts):
1121 'build a remote ui from ui or repo and opts'
1116 'build a remote ui from ui or repo and opts'
1122 if util.safehasattr(src, 'baseui'): # looks like a repository
1117 if util.safehasattr(src, 'baseui'): # looks like a repository
1123 dst = src.baseui.copy() # drop repo-specific config
1118 dst = src.baseui.copy() # drop repo-specific config
1124 src = src.ui # copy target options from repo
1119 src = src.ui # copy target options from repo
1125 else: # assume it's a global ui object
1120 else: # assume it's a global ui object
1126 dst = src.copy() # keep all global options
1121 dst = src.copy() # keep all global options
1127
1122
1128 # copy ssh-specific options
1123 # copy ssh-specific options
1129 for o in 'ssh', 'remotecmd':
1124 for o in 'ssh', 'remotecmd':
1130 v = opts.get(o) or src.config('ui', o)
1125 v = opts.get(o) or src.config('ui', o)
1131 if v:
1126 if v:
1132 dst.setconfig("ui", o, v, 'copied')
1127 dst.setconfig("ui", o, v, 'copied')
1133
1128
1134 # copy bundle-specific options
1129 # copy bundle-specific options
1135 r = src.config('bundle', 'mainreporoot')
1130 r = src.config('bundle', 'mainreporoot')
1136 if r:
1131 if r:
1137 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
1132 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
1138
1133
1139 # copy selected local settings to the remote ui
1134 # copy selected local settings to the remote ui
1140 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
1135 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
1141 for key, val in src.configitems(sect):
1136 for key, val in src.configitems(sect):
1142 dst.setconfig(sect, key, val, 'copied')
1137 dst.setconfig(sect, key, val, 'copied')
1143 v = src.config('web', 'cacerts')
1138 v = src.config('web', 'cacerts')
1144 if v:
1139 if v:
1145 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
1140 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
1146
1141
1147 return dst
1142 return dst
1148
1143
1149 # Files of interest
1144 # Files of interest
1150 # Used to check if the repository has changed looking at mtime and size of
1145 # Used to check if the repository has changed looking at mtime and size of
1151 # these files.
1146 # these files.
1152 foi = [('spath', '00changelog.i'),
1147 foi = [('spath', '00changelog.i'),
1153 ('spath', 'phaseroots'), # ! phase can change content at the same size
1148 ('spath', 'phaseroots'), # ! phase can change content at the same size
1154 ('spath', 'obsstore'),
1149 ('spath', 'obsstore'),
1155 ('path', 'bookmarks'), # ! bookmark can change content at the same size
1150 ('path', 'bookmarks'), # ! bookmark can change content at the same size
1156 ]
1151 ]
1157
1152
1158 class cachedlocalrepo(object):
1153 class cachedlocalrepo(object):
1159 """Holds a localrepository that can be cached and reused."""
1154 """Holds a localrepository that can be cached and reused."""
1160
1155
1161 def __init__(self, repo):
1156 def __init__(self, repo):
1162 """Create a new cached repo from an existing repo.
1157 """Create a new cached repo from an existing repo.
1163
1158
1164 We assume the passed in repo was recently created. If the
1159 We assume the passed in repo was recently created. If the
1165 repo has changed between when it was created and when it was
1160 repo has changed between when it was created and when it was
1166 turned into a cache, it may not refresh properly.
1161 turned into a cache, it may not refresh properly.
1167 """
1162 """
1168 assert isinstance(repo, localrepo.localrepository)
1163 assert isinstance(repo, localrepo.localrepository)
1169 self._repo = repo
1164 self._repo = repo
1170 self._state, self.mtime = self._repostate()
1165 self._state, self.mtime = self._repostate()
1171 self._filtername = repo.filtername
1166 self._filtername = repo.filtername
1172
1167
1173 def fetch(self):
1168 def fetch(self):
1174 """Refresh (if necessary) and return a repository.
1169 """Refresh (if necessary) and return a repository.
1175
1170
1176 If the cached instance is out of date, it will be recreated
1171 If the cached instance is out of date, it will be recreated
1177 automatically and returned.
1172 automatically and returned.
1178
1173
1179 Returns a tuple of the repo and a boolean indicating whether a new
1174 Returns a tuple of the repo and a boolean indicating whether a new
1180 repo instance was created.
1175 repo instance was created.
1181 """
1176 """
1182 # We compare the mtimes and sizes of some well-known files to
1177 # We compare the mtimes and sizes of some well-known files to
1183 # determine if the repo changed. This is not precise, as mtimes
1178 # determine if the repo changed. This is not precise, as mtimes
1184 # are susceptible to clock skew and imprecise filesystems and
1179 # are susceptible to clock skew and imprecise filesystems and
1185 # file content can change while maintaining the same size.
1180 # file content can change while maintaining the same size.
1186
1181
1187 state, mtime = self._repostate()
1182 state, mtime = self._repostate()
1188 if state == self._state:
1183 if state == self._state:
1189 return self._repo, False
1184 return self._repo, False
1190
1185
1191 repo = repository(self._repo.baseui, self._repo.url())
1186 repo = repository(self._repo.baseui, self._repo.url())
1192 if self._filtername:
1187 if self._filtername:
1193 self._repo = repo.filtered(self._filtername)
1188 self._repo = repo.filtered(self._filtername)
1194 else:
1189 else:
1195 self._repo = repo.unfiltered()
1190 self._repo = repo.unfiltered()
1196 self._state = state
1191 self._state = state
1197 self.mtime = mtime
1192 self.mtime = mtime
1198
1193
1199 return self._repo, True
1194 return self._repo, True
1200
1195
1201 def _repostate(self):
1196 def _repostate(self):
1202 state = []
1197 state = []
1203 maxmtime = -1
1198 maxmtime = -1
1204 for attr, fname in foi:
1199 for attr, fname in foi:
1205 prefix = getattr(self._repo, attr)
1200 prefix = getattr(self._repo, attr)
1206 p = os.path.join(prefix, fname)
1201 p = os.path.join(prefix, fname)
1207 try:
1202 try:
1208 st = os.stat(p)
1203 st = os.stat(p)
1209 except OSError:
1204 except OSError:
1210 st = os.stat(prefix)
1205 st = os.stat(prefix)
1211 state.append((st[stat.ST_MTIME], st.st_size))
1206 state.append((st[stat.ST_MTIME], st.st_size))
1212 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1207 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1213
1208
1214 return tuple(state), maxmtime
1209 return tuple(state), maxmtime
1215
1210
1216 def copy(self):
1211 def copy(self):
1217 """Obtain a copy of this class instance.
1212 """Obtain a copy of this class instance.
1218
1213
1219 A new localrepository instance is obtained. The new instance should be
1214 A new localrepository instance is obtained. The new instance should be
1220 completely independent of the original.
1215 completely independent of the original.
1221 """
1216 """
1222 repo = repository(self._repo.baseui, self._repo.origroot)
1217 repo = repository(self._repo.baseui, self._repo.origroot)
1223 if self._filtername:
1218 if self._filtername:
1224 repo = repo.filtered(self._filtername)
1219 repo = repo.filtered(self._filtername)
1225 else:
1220 else:
1226 repo = repo.unfiltered()
1221 repo = repo.unfiltered()
1227 c = cachedlocalrepo(repo)
1222 c = cachedlocalrepo(repo)
1228 c._state = self._state
1223 c._state = self._state
1229 c.mtime = self.mtime
1224 c.mtime = self.mtime
1230 return c
1225 return c
General Comments 0
You need to be logged in to leave comments. Login now