##// END OF EJS Templates
abort: removed labels argument from abortmerge()...
Taapas Agrawal -
r42810:209f2b8a default
parent child Browse files
Show More
@@ -1,1238 +1,1237 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13 import os
13 import os
14 import shutil
14 import shutil
15 import stat
15 import stat
16
16
17 from .i18n import _
17 from .i18n import _
18 from .node import (
18 from .node import (
19 nullid,
19 nullid,
20 )
20 )
21
21
22 from . import (
22 from . import (
23 bookmarks,
23 bookmarks,
24 bundlerepo,
24 bundlerepo,
25 cacheutil,
25 cacheutil,
26 cmdutil,
26 cmdutil,
27 destutil,
27 destutil,
28 discovery,
28 discovery,
29 error,
29 error,
30 exchange,
30 exchange,
31 extensions,
31 extensions,
32 httppeer,
32 httppeer,
33 localrepo,
33 localrepo,
34 lock,
34 lock,
35 logcmdutil,
35 logcmdutil,
36 logexchange,
36 logexchange,
37 merge as mergemod,
37 merge as mergemod,
38 narrowspec,
38 narrowspec,
39 node,
39 node,
40 phases,
40 phases,
41 pycompat,
41 pycompat,
42 repository as repositorymod,
42 repository as repositorymod,
43 scmutil,
43 scmutil,
44 sshpeer,
44 sshpeer,
45 statichttprepo,
45 statichttprepo,
46 ui as uimod,
46 ui as uimod,
47 unionrepo,
47 unionrepo,
48 url,
48 url,
49 util,
49 util,
50 verify as verifymod,
50 verify as verifymod,
51 vfs as vfsmod,
51 vfs as vfsmod,
52 )
52 )
53
53
54 release = lock.release
54 release = lock.release
55
55
56 # shared features
56 # shared features
57 sharedbookmarks = 'bookmarks'
57 sharedbookmarks = 'bookmarks'
58
58
59 def _local(path):
59 def _local(path):
60 path = util.expandpath(util.urllocalpath(path))
60 path = util.expandpath(util.urllocalpath(path))
61
61
62 try:
62 try:
63 isfile = os.path.isfile(path)
63 isfile = os.path.isfile(path)
64 # Python 2 raises TypeError, Python 3 ValueError.
64 # Python 2 raises TypeError, Python 3 ValueError.
65 except (TypeError, ValueError) as e:
65 except (TypeError, ValueError) as e:
66 raise error.Abort(_('invalid path %s: %s') % (
66 raise error.Abort(_('invalid path %s: %s') % (
67 path, pycompat.bytestr(e)))
67 path, pycompat.bytestr(e)))
68
68
69 return isfile and bundlerepo or localrepo
69 return isfile and bundlerepo or localrepo
70
70
71 def addbranchrevs(lrepo, other, branches, revs):
71 def addbranchrevs(lrepo, other, branches, revs):
72 peer = other.peer() # a courtesy to callers using a localrepo for other
72 peer = other.peer() # a courtesy to callers using a localrepo for other
73 hashbranch, branches = branches
73 hashbranch, branches = branches
74 if not hashbranch and not branches:
74 if not hashbranch and not branches:
75 x = revs or None
75 x = revs or None
76 if revs:
76 if revs:
77 y = revs[0]
77 y = revs[0]
78 else:
78 else:
79 y = None
79 y = None
80 return x, y
80 return x, y
81 if revs:
81 if revs:
82 revs = list(revs)
82 revs = list(revs)
83 else:
83 else:
84 revs = []
84 revs = []
85
85
86 if not peer.capable('branchmap'):
86 if not peer.capable('branchmap'):
87 if branches:
87 if branches:
88 raise error.Abort(_("remote branch lookup not supported"))
88 raise error.Abort(_("remote branch lookup not supported"))
89 revs.append(hashbranch)
89 revs.append(hashbranch)
90 return revs, revs[0]
90 return revs, revs[0]
91
91
92 with peer.commandexecutor() as e:
92 with peer.commandexecutor() as e:
93 branchmap = e.callcommand('branchmap', {}).result()
93 branchmap = e.callcommand('branchmap', {}).result()
94
94
95 def primary(branch):
95 def primary(branch):
96 if branch == '.':
96 if branch == '.':
97 if not lrepo:
97 if not lrepo:
98 raise error.Abort(_("dirstate branch not accessible"))
98 raise error.Abort(_("dirstate branch not accessible"))
99 branch = lrepo.dirstate.branch()
99 branch = lrepo.dirstate.branch()
100 if branch in branchmap:
100 if branch in branchmap:
101 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
101 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
102 return True
102 return True
103 else:
103 else:
104 return False
104 return False
105
105
106 for branch in branches:
106 for branch in branches:
107 if not primary(branch):
107 if not primary(branch):
108 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
108 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
109 if hashbranch:
109 if hashbranch:
110 if not primary(hashbranch):
110 if not primary(hashbranch):
111 revs.append(hashbranch)
111 revs.append(hashbranch)
112 return revs, revs[0]
112 return revs, revs[0]
113
113
114 def parseurl(path, branches=None):
114 def parseurl(path, branches=None):
115 '''parse url#branch, returning (url, (branch, branches))'''
115 '''parse url#branch, returning (url, (branch, branches))'''
116
116
117 u = util.url(path)
117 u = util.url(path)
118 branch = None
118 branch = None
119 if u.fragment:
119 if u.fragment:
120 branch = u.fragment
120 branch = u.fragment
121 u.fragment = None
121 u.fragment = None
122 return bytes(u), (branch, branches or [])
122 return bytes(u), (branch, branches or [])
123
123
124 schemes = {
124 schemes = {
125 'bundle': bundlerepo,
125 'bundle': bundlerepo,
126 'union': unionrepo,
126 'union': unionrepo,
127 'file': _local,
127 'file': _local,
128 'http': httppeer,
128 'http': httppeer,
129 'https': httppeer,
129 'https': httppeer,
130 'ssh': sshpeer,
130 'ssh': sshpeer,
131 'static-http': statichttprepo,
131 'static-http': statichttprepo,
132 }
132 }
133
133
134 def _peerlookup(path):
134 def _peerlookup(path):
135 u = util.url(path)
135 u = util.url(path)
136 scheme = u.scheme or 'file'
136 scheme = u.scheme or 'file'
137 thing = schemes.get(scheme) or schemes['file']
137 thing = schemes.get(scheme) or schemes['file']
138 try:
138 try:
139 return thing(path)
139 return thing(path)
140 except TypeError:
140 except TypeError:
141 # we can't test callable(thing) because 'thing' can be an unloaded
141 # we can't test callable(thing) because 'thing' can be an unloaded
142 # module that implements __call__
142 # module that implements __call__
143 if not util.safehasattr(thing, 'instance'):
143 if not util.safehasattr(thing, 'instance'):
144 raise
144 raise
145 return thing
145 return thing
146
146
147 def islocal(repo):
147 def islocal(repo):
148 '''return true if repo (or path pointing to repo) is local'''
148 '''return true if repo (or path pointing to repo) is local'''
149 if isinstance(repo, bytes):
149 if isinstance(repo, bytes):
150 try:
150 try:
151 return _peerlookup(repo).islocal(repo)
151 return _peerlookup(repo).islocal(repo)
152 except AttributeError:
152 except AttributeError:
153 return False
153 return False
154 return repo.local()
154 return repo.local()
155
155
156 def openpath(ui, path, sendaccept=True):
156 def openpath(ui, path, sendaccept=True):
157 '''open path with open if local, url.open if remote'''
157 '''open path with open if local, url.open if remote'''
158 pathurl = util.url(path, parsequery=False, parsefragment=False)
158 pathurl = util.url(path, parsequery=False, parsefragment=False)
159 if pathurl.islocal():
159 if pathurl.islocal():
160 return util.posixfile(pathurl.localpath(), 'rb')
160 return util.posixfile(pathurl.localpath(), 'rb')
161 else:
161 else:
162 return url.open(ui, path, sendaccept=sendaccept)
162 return url.open(ui, path, sendaccept=sendaccept)
163
163
164 # a list of (ui, repo) functions called for wire peer initialization
164 # a list of (ui, repo) functions called for wire peer initialization
165 wirepeersetupfuncs = []
165 wirepeersetupfuncs = []
166
166
167 def _peerorrepo(ui, path, create=False, presetupfuncs=None,
167 def _peerorrepo(ui, path, create=False, presetupfuncs=None,
168 intents=None, createopts=None):
168 intents=None, createopts=None):
169 """return a repository object for the specified path"""
169 """return a repository object for the specified path"""
170 obj = _peerlookup(path).instance(ui, path, create, intents=intents,
170 obj = _peerlookup(path).instance(ui, path, create, intents=intents,
171 createopts=createopts)
171 createopts=createopts)
172 ui = getattr(obj, "ui", ui)
172 ui = getattr(obj, "ui", ui)
173 for f in presetupfuncs or []:
173 for f in presetupfuncs or []:
174 f(ui, obj)
174 f(ui, obj)
175 ui.log(b'extension', b'- executing reposetup hooks\n')
175 ui.log(b'extension', b'- executing reposetup hooks\n')
176 with util.timedcm('all reposetup') as allreposetupstats:
176 with util.timedcm('all reposetup') as allreposetupstats:
177 for name, module in extensions.extensions(ui):
177 for name, module in extensions.extensions(ui):
178 ui.log(b'extension', b' - running reposetup for %s\n', name)
178 ui.log(b'extension', b' - running reposetup for %s\n', name)
179 hook = getattr(module, 'reposetup', None)
179 hook = getattr(module, 'reposetup', None)
180 if hook:
180 if hook:
181 with util.timedcm('reposetup %r', name) as stats:
181 with util.timedcm('reposetup %r', name) as stats:
182 hook(ui, obj)
182 hook(ui, obj)
183 ui.log(b'extension', b' > reposetup for %s took %s\n',
183 ui.log(b'extension', b' > reposetup for %s took %s\n',
184 name, stats)
184 name, stats)
185 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
185 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
186 if not obj.local():
186 if not obj.local():
187 for f in wirepeersetupfuncs:
187 for f in wirepeersetupfuncs:
188 f(ui, obj)
188 f(ui, obj)
189 return obj
189 return obj
190
190
191 def repository(ui, path='', create=False, presetupfuncs=None, intents=None,
191 def repository(ui, path='', create=False, presetupfuncs=None, intents=None,
192 createopts=None):
192 createopts=None):
193 """return a repository object for the specified path"""
193 """return a repository object for the specified path"""
194 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs,
194 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs,
195 intents=intents, createopts=createopts)
195 intents=intents, createopts=createopts)
196 repo = peer.local()
196 repo = peer.local()
197 if not repo:
197 if not repo:
198 raise error.Abort(_("repository '%s' is not local") %
198 raise error.Abort(_("repository '%s' is not local") %
199 (path or peer.url()))
199 (path or peer.url()))
200 return repo.filtered('visible')
200 return repo.filtered('visible')
201
201
202 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
202 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
203 '''return a repository peer for the specified path'''
203 '''return a repository peer for the specified path'''
204 rui = remoteui(uiorrepo, opts)
204 rui = remoteui(uiorrepo, opts)
205 return _peerorrepo(rui, path, create, intents=intents,
205 return _peerorrepo(rui, path, create, intents=intents,
206 createopts=createopts).peer()
206 createopts=createopts).peer()
207
207
208 def defaultdest(source):
208 def defaultdest(source):
209 '''return default destination of clone if none is given
209 '''return default destination of clone if none is given
210
210
211 >>> defaultdest(b'foo')
211 >>> defaultdest(b'foo')
212 'foo'
212 'foo'
213 >>> defaultdest(b'/foo/bar')
213 >>> defaultdest(b'/foo/bar')
214 'bar'
214 'bar'
215 >>> defaultdest(b'/')
215 >>> defaultdest(b'/')
216 ''
216 ''
217 >>> defaultdest(b'')
217 >>> defaultdest(b'')
218 ''
218 ''
219 >>> defaultdest(b'http://example.org/')
219 >>> defaultdest(b'http://example.org/')
220 ''
220 ''
221 >>> defaultdest(b'http://example.org/foo/')
221 >>> defaultdest(b'http://example.org/foo/')
222 'foo'
222 'foo'
223 '''
223 '''
224 path = util.url(source).path
224 path = util.url(source).path
225 if not path:
225 if not path:
226 return ''
226 return ''
227 return os.path.basename(os.path.normpath(path))
227 return os.path.basename(os.path.normpath(path))
228
228
229 def sharedreposource(repo):
229 def sharedreposource(repo):
230 """Returns repository object for source repository of a shared repo.
230 """Returns repository object for source repository of a shared repo.
231
231
232 If repo is not a shared repository, returns None.
232 If repo is not a shared repository, returns None.
233 """
233 """
234 if repo.sharedpath == repo.path:
234 if repo.sharedpath == repo.path:
235 return None
235 return None
236
236
237 if util.safehasattr(repo, 'srcrepo') and repo.srcrepo:
237 if util.safehasattr(repo, 'srcrepo') and repo.srcrepo:
238 return repo.srcrepo
238 return repo.srcrepo
239
239
240 # the sharedpath always ends in the .hg; we want the path to the repo
240 # the sharedpath always ends in the .hg; we want the path to the repo
241 source = repo.vfs.split(repo.sharedpath)[0]
241 source = repo.vfs.split(repo.sharedpath)[0]
242 srcurl, branches = parseurl(source)
242 srcurl, branches = parseurl(source)
243 srcrepo = repository(repo.ui, srcurl)
243 srcrepo = repository(repo.ui, srcurl)
244 repo.srcrepo = srcrepo
244 repo.srcrepo = srcrepo
245 return srcrepo
245 return srcrepo
246
246
247 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
247 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
248 relative=False):
248 relative=False):
249 '''create a shared repository'''
249 '''create a shared repository'''
250
250
251 if not islocal(source):
251 if not islocal(source):
252 raise error.Abort(_('can only share local repositories'))
252 raise error.Abort(_('can only share local repositories'))
253
253
254 if not dest:
254 if not dest:
255 dest = defaultdest(source)
255 dest = defaultdest(source)
256 else:
256 else:
257 dest = ui.expandpath(dest)
257 dest = ui.expandpath(dest)
258
258
259 if isinstance(source, bytes):
259 if isinstance(source, bytes):
260 origsource = ui.expandpath(source)
260 origsource = ui.expandpath(source)
261 source, branches = parseurl(origsource)
261 source, branches = parseurl(origsource)
262 srcrepo = repository(ui, source)
262 srcrepo = repository(ui, source)
263 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
263 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
264 else:
264 else:
265 srcrepo = source.local()
265 srcrepo = source.local()
266 checkout = None
266 checkout = None
267
267
268 shareditems = set()
268 shareditems = set()
269 if bookmarks:
269 if bookmarks:
270 shareditems.add(sharedbookmarks)
270 shareditems.add(sharedbookmarks)
271
271
272 r = repository(ui, dest, create=True, createopts={
272 r = repository(ui, dest, create=True, createopts={
273 'sharedrepo': srcrepo,
273 'sharedrepo': srcrepo,
274 'sharedrelative': relative,
274 'sharedrelative': relative,
275 'shareditems': shareditems,
275 'shareditems': shareditems,
276 })
276 })
277
277
278 postshare(srcrepo, r, defaultpath=defaultpath)
278 postshare(srcrepo, r, defaultpath=defaultpath)
279 r = repository(ui, dest)
279 r = repository(ui, dest)
280 _postshareupdate(r, update, checkout=checkout)
280 _postshareupdate(r, update, checkout=checkout)
281 return r
281 return r
282
282
283 def unshare(ui, repo):
283 def unshare(ui, repo):
284 """convert a shared repository to a normal one
284 """convert a shared repository to a normal one
285
285
286 Copy the store data to the repo and remove the sharedpath data.
286 Copy the store data to the repo and remove the sharedpath data.
287
287
288 Returns a new repository object representing the unshared repository.
288 Returns a new repository object representing the unshared repository.
289
289
290 The passed repository object is not usable after this function is
290 The passed repository object is not usable after this function is
291 called.
291 called.
292 """
292 """
293
293
294 with repo.lock():
294 with repo.lock():
295 # we use locks here because if we race with commit, we
295 # we use locks here because if we race with commit, we
296 # can end up with extra data in the cloned revlogs that's
296 # can end up with extra data in the cloned revlogs that's
297 # not pointed to by changesets, thus causing verify to
297 # not pointed to by changesets, thus causing verify to
298 # fail
298 # fail
299 destlock = copystore(ui, repo, repo.path)
299 destlock = copystore(ui, repo, repo.path)
300 with destlock or util.nullcontextmanager():
300 with destlock or util.nullcontextmanager():
301
301
302 sharefile = repo.vfs.join('sharedpath')
302 sharefile = repo.vfs.join('sharedpath')
303 util.rename(sharefile, sharefile + '.old')
303 util.rename(sharefile, sharefile + '.old')
304
304
305 repo.requirements.discard('shared')
305 repo.requirements.discard('shared')
306 repo.requirements.discard('relshared')
306 repo.requirements.discard('relshared')
307 repo._writerequirements()
307 repo._writerequirements()
308
308
309 # Removing share changes some fundamental properties of the repo instance.
309 # Removing share changes some fundamental properties of the repo instance.
310 # So we instantiate a new repo object and operate on it rather than
310 # So we instantiate a new repo object and operate on it rather than
311 # try to keep the existing repo usable.
311 # try to keep the existing repo usable.
312 newrepo = repository(repo.baseui, repo.root, create=False)
312 newrepo = repository(repo.baseui, repo.root, create=False)
313
313
314 # TODO: figure out how to access subrepos that exist, but were previously
314 # TODO: figure out how to access subrepos that exist, but were previously
315 # removed from .hgsub
315 # removed from .hgsub
316 c = newrepo['.']
316 c = newrepo['.']
317 subs = c.substate
317 subs = c.substate
318 for s in sorted(subs):
318 for s in sorted(subs):
319 c.sub(s).unshare()
319 c.sub(s).unshare()
320
320
321 localrepo.poisonrepository(repo)
321 localrepo.poisonrepository(repo)
322
322
323 return newrepo
323 return newrepo
324
324
325 def postshare(sourcerepo, destrepo, defaultpath=None):
325 def postshare(sourcerepo, destrepo, defaultpath=None):
326 """Called after a new shared repo is created.
326 """Called after a new shared repo is created.
327
327
328 The new repo only has a requirements file and pointer to the source.
328 The new repo only has a requirements file and pointer to the source.
329 This function configures additional shared data.
329 This function configures additional shared data.
330
330
331 Extensions can wrap this function and write additional entries to
331 Extensions can wrap this function and write additional entries to
332 destrepo/.hg/shared to indicate additional pieces of data to be shared.
332 destrepo/.hg/shared to indicate additional pieces of data to be shared.
333 """
333 """
334 default = defaultpath or sourcerepo.ui.config('paths', 'default')
334 default = defaultpath or sourcerepo.ui.config('paths', 'default')
335 if default:
335 if default:
336 template = ('[paths]\n'
336 template = ('[paths]\n'
337 'default = %s\n')
337 'default = %s\n')
338 destrepo.vfs.write('hgrc', util.tonativeeol(template % default))
338 destrepo.vfs.write('hgrc', util.tonativeeol(template % default))
339 if repositorymod.NARROW_REQUIREMENT in sourcerepo.requirements:
339 if repositorymod.NARROW_REQUIREMENT in sourcerepo.requirements:
340 with destrepo.wlock():
340 with destrepo.wlock():
341 narrowspec.copytoworkingcopy(destrepo)
341 narrowspec.copytoworkingcopy(destrepo)
342
342
343 def _postshareupdate(repo, update, checkout=None):
343 def _postshareupdate(repo, update, checkout=None):
344 """Maybe perform a working directory update after a shared repo is created.
344 """Maybe perform a working directory update after a shared repo is created.
345
345
346 ``update`` can be a boolean or a revision to update to.
346 ``update`` can be a boolean or a revision to update to.
347 """
347 """
348 if not update:
348 if not update:
349 return
349 return
350
350
351 repo.ui.status(_("updating working directory\n"))
351 repo.ui.status(_("updating working directory\n"))
352 if update is not True:
352 if update is not True:
353 checkout = update
353 checkout = update
354 for test in (checkout, 'default', 'tip'):
354 for test in (checkout, 'default', 'tip'):
355 if test is None:
355 if test is None:
356 continue
356 continue
357 try:
357 try:
358 uprev = repo.lookup(test)
358 uprev = repo.lookup(test)
359 break
359 break
360 except error.RepoLookupError:
360 except error.RepoLookupError:
361 continue
361 continue
362 _update(repo, uprev)
362 _update(repo, uprev)
363
363
364 def copystore(ui, srcrepo, destpath):
364 def copystore(ui, srcrepo, destpath):
365 '''copy files from store of srcrepo in destpath
365 '''copy files from store of srcrepo in destpath
366
366
367 returns destlock
367 returns destlock
368 '''
368 '''
369 destlock = None
369 destlock = None
370 try:
370 try:
371 hardlink = None
371 hardlink = None
372 topic = _('linking') if hardlink else _('copying')
372 topic = _('linking') if hardlink else _('copying')
373 with ui.makeprogress(topic, unit=_('files')) as progress:
373 with ui.makeprogress(topic, unit=_('files')) as progress:
374 num = 0
374 num = 0
375 srcpublishing = srcrepo.publishing()
375 srcpublishing = srcrepo.publishing()
376 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
376 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
377 dstvfs = vfsmod.vfs(destpath)
377 dstvfs = vfsmod.vfs(destpath)
378 for f in srcrepo.store.copylist():
378 for f in srcrepo.store.copylist():
379 if srcpublishing and f.endswith('phaseroots'):
379 if srcpublishing and f.endswith('phaseroots'):
380 continue
380 continue
381 dstbase = os.path.dirname(f)
381 dstbase = os.path.dirname(f)
382 if dstbase and not dstvfs.exists(dstbase):
382 if dstbase and not dstvfs.exists(dstbase):
383 dstvfs.mkdir(dstbase)
383 dstvfs.mkdir(dstbase)
384 if srcvfs.exists(f):
384 if srcvfs.exists(f):
385 if f.endswith('data'):
385 if f.endswith('data'):
386 # 'dstbase' may be empty (e.g. revlog format 0)
386 # 'dstbase' may be empty (e.g. revlog format 0)
387 lockfile = os.path.join(dstbase, "lock")
387 lockfile = os.path.join(dstbase, "lock")
388 # lock to avoid premature writing to the target
388 # lock to avoid premature writing to the target
389 destlock = lock.lock(dstvfs, lockfile)
389 destlock = lock.lock(dstvfs, lockfile)
390 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
390 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
391 hardlink, progress)
391 hardlink, progress)
392 num += n
392 num += n
393 if hardlink:
393 if hardlink:
394 ui.debug("linked %d files\n" % num)
394 ui.debug("linked %d files\n" % num)
395 else:
395 else:
396 ui.debug("copied %d files\n" % num)
396 ui.debug("copied %d files\n" % num)
397 return destlock
397 return destlock
398 except: # re-raises
398 except: # re-raises
399 release(destlock)
399 release(destlock)
400 raise
400 raise
401
401
402 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
402 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
403 rev=None, update=True, stream=False):
403 rev=None, update=True, stream=False):
404 """Perform a clone using a shared repo.
404 """Perform a clone using a shared repo.
405
405
406 The store for the repository will be located at <sharepath>/.hg. The
406 The store for the repository will be located at <sharepath>/.hg. The
407 specified revisions will be cloned or pulled from "source". A shared repo
407 specified revisions will be cloned or pulled from "source". A shared repo
408 will be created at "dest" and a working copy will be created if "update" is
408 will be created at "dest" and a working copy will be created if "update" is
409 True.
409 True.
410 """
410 """
411 revs = None
411 revs = None
412 if rev:
412 if rev:
413 if not srcpeer.capable('lookup'):
413 if not srcpeer.capable('lookup'):
414 raise error.Abort(_("src repository does not support "
414 raise error.Abort(_("src repository does not support "
415 "revision lookup and so doesn't "
415 "revision lookup and so doesn't "
416 "support clone by revision"))
416 "support clone by revision"))
417
417
418 # TODO this is batchable.
418 # TODO this is batchable.
419 remoterevs = []
419 remoterevs = []
420 for r in rev:
420 for r in rev:
421 with srcpeer.commandexecutor() as e:
421 with srcpeer.commandexecutor() as e:
422 remoterevs.append(e.callcommand('lookup', {
422 remoterevs.append(e.callcommand('lookup', {
423 'key': r,
423 'key': r,
424 }).result())
424 }).result())
425 revs = remoterevs
425 revs = remoterevs
426
426
427 # Obtain a lock before checking for or cloning the pooled repo otherwise
427 # Obtain a lock before checking for or cloning the pooled repo otherwise
428 # 2 clients may race creating or populating it.
428 # 2 clients may race creating or populating it.
429 pooldir = os.path.dirname(sharepath)
429 pooldir = os.path.dirname(sharepath)
430 # lock class requires the directory to exist.
430 # lock class requires the directory to exist.
431 try:
431 try:
432 util.makedir(pooldir, False)
432 util.makedir(pooldir, False)
433 except OSError as e:
433 except OSError as e:
434 if e.errno != errno.EEXIST:
434 if e.errno != errno.EEXIST:
435 raise
435 raise
436
436
437 poolvfs = vfsmod.vfs(pooldir)
437 poolvfs = vfsmod.vfs(pooldir)
438 basename = os.path.basename(sharepath)
438 basename = os.path.basename(sharepath)
439
439
440 with lock.lock(poolvfs, '%s.lock' % basename):
440 with lock.lock(poolvfs, '%s.lock' % basename):
441 if os.path.exists(sharepath):
441 if os.path.exists(sharepath):
442 ui.status(_('(sharing from existing pooled repository %s)\n') %
442 ui.status(_('(sharing from existing pooled repository %s)\n') %
443 basename)
443 basename)
444 else:
444 else:
445 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
445 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
446 # Always use pull mode because hardlinks in share mode don't work
446 # Always use pull mode because hardlinks in share mode don't work
447 # well. Never update because working copies aren't necessary in
447 # well. Never update because working copies aren't necessary in
448 # share mode.
448 # share mode.
449 clone(ui, peeropts, source, dest=sharepath, pull=True,
449 clone(ui, peeropts, source, dest=sharepath, pull=True,
450 revs=rev, update=False, stream=stream)
450 revs=rev, update=False, stream=stream)
451
451
452 # Resolve the value to put in [paths] section for the source.
452 # Resolve the value to put in [paths] section for the source.
453 if islocal(source):
453 if islocal(source):
454 defaultpath = os.path.abspath(util.urllocalpath(source))
454 defaultpath = os.path.abspath(util.urllocalpath(source))
455 else:
455 else:
456 defaultpath = source
456 defaultpath = source
457
457
458 sharerepo = repository(ui, path=sharepath)
458 sharerepo = repository(ui, path=sharepath)
459 destrepo = share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
459 destrepo = share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
460 defaultpath=defaultpath)
460 defaultpath=defaultpath)
461
461
462 # We need to perform a pull against the dest repo to fetch bookmarks
462 # We need to perform a pull against the dest repo to fetch bookmarks
463 # and other non-store data that isn't shared by default. In the case of
463 # and other non-store data that isn't shared by default. In the case of
464 # non-existing shared repo, this means we pull from the remote twice. This
464 # non-existing shared repo, this means we pull from the remote twice. This
465 # is a bit weird. But at the time it was implemented, there wasn't an easy
465 # is a bit weird. But at the time it was implemented, there wasn't an easy
466 # way to pull just non-changegroup data.
466 # way to pull just non-changegroup data.
467 exchange.pull(destrepo, srcpeer, heads=revs)
467 exchange.pull(destrepo, srcpeer, heads=revs)
468
468
469 _postshareupdate(destrepo, update)
469 _postshareupdate(destrepo, update)
470
470
471 return srcpeer, peer(ui, peeropts, dest)
471 return srcpeer, peer(ui, peeropts, dest)
472
472
473 # Recomputing branch cache might be slow on big repos,
473 # Recomputing branch cache might be slow on big repos,
474 # so just copy it
474 # so just copy it
475 def _copycache(srcrepo, dstcachedir, fname):
475 def _copycache(srcrepo, dstcachedir, fname):
476 """copy a cache from srcrepo to destcachedir (if it exists)"""
476 """copy a cache from srcrepo to destcachedir (if it exists)"""
477 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
477 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
478 dstbranchcache = os.path.join(dstcachedir, fname)
478 dstbranchcache = os.path.join(dstcachedir, fname)
479 if os.path.exists(srcbranchcache):
479 if os.path.exists(srcbranchcache):
480 if not os.path.exists(dstcachedir):
480 if not os.path.exists(dstcachedir):
481 os.mkdir(dstcachedir)
481 os.mkdir(dstcachedir)
482 util.copyfile(srcbranchcache, dstbranchcache)
482 util.copyfile(srcbranchcache, dstbranchcache)
483
483
484 def clone(ui, peeropts, source, dest=None, pull=False, revs=None,
484 def clone(ui, peeropts, source, dest=None, pull=False, revs=None,
485 update=True, stream=False, branch=None, shareopts=None,
485 update=True, stream=False, branch=None, shareopts=None,
486 storeincludepats=None, storeexcludepats=None, depth=None):
486 storeincludepats=None, storeexcludepats=None, depth=None):
487 """Make a copy of an existing repository.
487 """Make a copy of an existing repository.
488
488
489 Create a copy of an existing repository in a new directory. The
489 Create a copy of an existing repository in a new directory. The
490 source and destination are URLs, as passed to the repository
490 source and destination are URLs, as passed to the repository
491 function. Returns a pair of repository peers, the source and
491 function. Returns a pair of repository peers, the source and
492 newly created destination.
492 newly created destination.
493
493
494 The location of the source is added to the new repository's
494 The location of the source is added to the new repository's
495 .hg/hgrc file, as the default to be used for future pulls and
495 .hg/hgrc file, as the default to be used for future pulls and
496 pushes.
496 pushes.
497
497
498 If an exception is raised, the partly cloned/updated destination
498 If an exception is raised, the partly cloned/updated destination
499 repository will be deleted.
499 repository will be deleted.
500
500
501 Arguments:
501 Arguments:
502
502
503 source: repository object or URL
503 source: repository object or URL
504
504
505 dest: URL of destination repository to create (defaults to base
505 dest: URL of destination repository to create (defaults to base
506 name of source repository)
506 name of source repository)
507
507
508 pull: always pull from source repository, even in local case or if the
508 pull: always pull from source repository, even in local case or if the
509 server prefers streaming
509 server prefers streaming
510
510
511 stream: stream raw data uncompressed from repository (fast over
511 stream: stream raw data uncompressed from repository (fast over
512 LAN, slow over WAN)
512 LAN, slow over WAN)
513
513
514 revs: revision to clone up to (implies pull=True)
514 revs: revision to clone up to (implies pull=True)
515
515
516 update: update working directory after clone completes, if
516 update: update working directory after clone completes, if
517 destination is local repository (True means update to default rev,
517 destination is local repository (True means update to default rev,
518 anything else is treated as a revision)
518 anything else is treated as a revision)
519
519
520 branch: branches to clone
520 branch: branches to clone
521
521
522 shareopts: dict of options to control auto sharing behavior. The "pool" key
522 shareopts: dict of options to control auto sharing behavior. The "pool" key
523 activates auto sharing mode and defines the directory for stores. The
523 activates auto sharing mode and defines the directory for stores. The
524 "mode" key determines how to construct the directory name of the shared
524 "mode" key determines how to construct the directory name of the shared
525 repository. "identity" means the name is derived from the node of the first
525 repository. "identity" means the name is derived from the node of the first
526 changeset in the repository. "remote" means the name is derived from the
526 changeset in the repository. "remote" means the name is derived from the
527 remote's path/URL. Defaults to "identity."
527 remote's path/URL. Defaults to "identity."
528
528
529 storeincludepats and storeexcludepats: sets of file patterns to include and
529 storeincludepats and storeexcludepats: sets of file patterns to include and
530 exclude in the repository copy, respectively. If not defined, all files
530 exclude in the repository copy, respectively. If not defined, all files
531 will be included (a "full" clone). Otherwise a "narrow" clone containing
531 will be included (a "full" clone). Otherwise a "narrow" clone containing
532 only the requested files will be performed. If ``storeincludepats`` is not
532 only the requested files will be performed. If ``storeincludepats`` is not
533 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
533 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
534 ``path:.``. If both are empty sets, no files will be cloned.
534 ``path:.``. If both are empty sets, no files will be cloned.
535 """
535 """
536
536
537 if isinstance(source, bytes):
537 if isinstance(source, bytes):
538 origsource = ui.expandpath(source)
538 origsource = ui.expandpath(source)
539 source, branches = parseurl(origsource, branch)
539 source, branches = parseurl(origsource, branch)
540 srcpeer = peer(ui, peeropts, source)
540 srcpeer = peer(ui, peeropts, source)
541 else:
541 else:
542 srcpeer = source.peer() # in case we were called with a localrepo
542 srcpeer = source.peer() # in case we were called with a localrepo
543 branches = (None, branch or [])
543 branches = (None, branch or [])
544 origsource = source = srcpeer.url()
544 origsource = source = srcpeer.url()
545 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
545 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
546
546
547 if dest is None:
547 if dest is None:
548 dest = defaultdest(source)
548 dest = defaultdest(source)
549 if dest:
549 if dest:
550 ui.status(_("destination directory: %s\n") % dest)
550 ui.status(_("destination directory: %s\n") % dest)
551 else:
551 else:
552 dest = ui.expandpath(dest)
552 dest = ui.expandpath(dest)
553
553
554 dest = util.urllocalpath(dest)
554 dest = util.urllocalpath(dest)
555 source = util.urllocalpath(source)
555 source = util.urllocalpath(source)
556
556
557 if not dest:
557 if not dest:
558 raise error.Abort(_("empty destination path is not valid"))
558 raise error.Abort(_("empty destination path is not valid"))
559
559
560 destvfs = vfsmod.vfs(dest, expandpath=True)
560 destvfs = vfsmod.vfs(dest, expandpath=True)
561 if destvfs.lexists():
561 if destvfs.lexists():
562 if not destvfs.isdir():
562 if not destvfs.isdir():
563 raise error.Abort(_("destination '%s' already exists") % dest)
563 raise error.Abort(_("destination '%s' already exists") % dest)
564 elif destvfs.listdir():
564 elif destvfs.listdir():
565 raise error.Abort(_("destination '%s' is not empty") % dest)
565 raise error.Abort(_("destination '%s' is not empty") % dest)
566
566
567 createopts = {}
567 createopts = {}
568 narrow = False
568 narrow = False
569
569
570 if storeincludepats is not None:
570 if storeincludepats is not None:
571 narrowspec.validatepatterns(storeincludepats)
571 narrowspec.validatepatterns(storeincludepats)
572 narrow = True
572 narrow = True
573
573
574 if storeexcludepats is not None:
574 if storeexcludepats is not None:
575 narrowspec.validatepatterns(storeexcludepats)
575 narrowspec.validatepatterns(storeexcludepats)
576 narrow = True
576 narrow = True
577
577
578 if narrow:
578 if narrow:
579 # Include everything by default if only exclusion patterns defined.
579 # Include everything by default if only exclusion patterns defined.
580 if storeexcludepats and not storeincludepats:
580 if storeexcludepats and not storeincludepats:
581 storeincludepats = {'path:.'}
581 storeincludepats = {'path:.'}
582
582
583 createopts['narrowfiles'] = True
583 createopts['narrowfiles'] = True
584
584
585 if depth:
585 if depth:
586 createopts['shallowfilestore'] = True
586 createopts['shallowfilestore'] = True
587
587
588 if srcpeer.capable(b'lfs-serve'):
588 if srcpeer.capable(b'lfs-serve'):
589 # Repository creation honors the config if it disabled the extension, so
589 # Repository creation honors the config if it disabled the extension, so
590 # we can't just announce that lfs will be enabled. This check avoids
590 # we can't just announce that lfs will be enabled. This check avoids
591 # saying that lfs will be enabled, and then saying it's an unknown
591 # saying that lfs will be enabled, and then saying it's an unknown
592 # feature. The lfs creation option is set in either case so that a
592 # feature. The lfs creation option is set in either case so that a
593 # requirement is added. If the extension is explicitly disabled but the
593 # requirement is added. If the extension is explicitly disabled but the
594 # requirement is set, the clone aborts early, before transferring any
594 # requirement is set, the clone aborts early, before transferring any
595 # data.
595 # data.
596 createopts['lfs'] = True
596 createopts['lfs'] = True
597
597
598 if extensions.disabledext('lfs'):
598 if extensions.disabledext('lfs'):
599 ui.status(_('(remote is using large file support (lfs), but it is '
599 ui.status(_('(remote is using large file support (lfs), but it is '
600 'explicitly disabled in the local configuration)\n'))
600 'explicitly disabled in the local configuration)\n'))
601 else:
601 else:
602 ui.status(_('(remote is using large file support (lfs); lfs will '
602 ui.status(_('(remote is using large file support (lfs); lfs will '
603 'be enabled for this repository)\n'))
603 'be enabled for this repository)\n'))
604
604
605 shareopts = shareopts or {}
605 shareopts = shareopts or {}
606 sharepool = shareopts.get('pool')
606 sharepool = shareopts.get('pool')
607 sharenamemode = shareopts.get('mode')
607 sharenamemode = shareopts.get('mode')
608 if sharepool and islocal(dest):
608 if sharepool and islocal(dest):
609 sharepath = None
609 sharepath = None
610 if sharenamemode == 'identity':
610 if sharenamemode == 'identity':
611 # Resolve the name from the initial changeset in the remote
611 # Resolve the name from the initial changeset in the remote
612 # repository. This returns nullid when the remote is empty. It
612 # repository. This returns nullid when the remote is empty. It
613 # raises RepoLookupError if revision 0 is filtered or otherwise
613 # raises RepoLookupError if revision 0 is filtered or otherwise
614 # not available. If we fail to resolve, sharing is not enabled.
614 # not available. If we fail to resolve, sharing is not enabled.
615 try:
615 try:
616 with srcpeer.commandexecutor() as e:
616 with srcpeer.commandexecutor() as e:
617 rootnode = e.callcommand('lookup', {
617 rootnode = e.callcommand('lookup', {
618 'key': '0',
618 'key': '0',
619 }).result()
619 }).result()
620
620
621 if rootnode != node.nullid:
621 if rootnode != node.nullid:
622 sharepath = os.path.join(sharepool, node.hex(rootnode))
622 sharepath = os.path.join(sharepool, node.hex(rootnode))
623 else:
623 else:
624 ui.status(_('(not using pooled storage: '
624 ui.status(_('(not using pooled storage: '
625 'remote appears to be empty)\n'))
625 'remote appears to be empty)\n'))
626 except error.RepoLookupError:
626 except error.RepoLookupError:
627 ui.status(_('(not using pooled storage: '
627 ui.status(_('(not using pooled storage: '
628 'unable to resolve identity of remote)\n'))
628 'unable to resolve identity of remote)\n'))
629 elif sharenamemode == 'remote':
629 elif sharenamemode == 'remote':
630 sharepath = os.path.join(
630 sharepath = os.path.join(
631 sharepool, node.hex(hashlib.sha1(source).digest()))
631 sharepool, node.hex(hashlib.sha1(source).digest()))
632 else:
632 else:
633 raise error.Abort(_('unknown share naming mode: %s') %
633 raise error.Abort(_('unknown share naming mode: %s') %
634 sharenamemode)
634 sharenamemode)
635
635
636 # TODO this is a somewhat arbitrary restriction.
636 # TODO this is a somewhat arbitrary restriction.
637 if narrow:
637 if narrow:
638 ui.status(_('(pooled storage not supported for narrow clones)\n'))
638 ui.status(_('(pooled storage not supported for narrow clones)\n'))
639 sharepath = None
639 sharepath = None
640
640
641 if sharepath:
641 if sharepath:
642 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
642 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
643 dest, pull=pull, rev=revs, update=update,
643 dest, pull=pull, rev=revs, update=update,
644 stream=stream)
644 stream=stream)
645
645
646 srclock = destlock = cleandir = None
646 srclock = destlock = cleandir = None
647 srcrepo = srcpeer.local()
647 srcrepo = srcpeer.local()
648 try:
648 try:
649 abspath = origsource
649 abspath = origsource
650 if islocal(origsource):
650 if islocal(origsource):
651 abspath = os.path.abspath(util.urllocalpath(origsource))
651 abspath = os.path.abspath(util.urllocalpath(origsource))
652
652
653 if islocal(dest):
653 if islocal(dest):
654 cleandir = dest
654 cleandir = dest
655
655
656 copy = False
656 copy = False
657 if (srcrepo and srcrepo.cancopy() and islocal(dest)
657 if (srcrepo and srcrepo.cancopy() and islocal(dest)
658 and not phases.hassecret(srcrepo)):
658 and not phases.hassecret(srcrepo)):
659 copy = not pull and not revs
659 copy = not pull and not revs
660
660
661 # TODO this is a somewhat arbitrary restriction.
661 # TODO this is a somewhat arbitrary restriction.
662 if narrow:
662 if narrow:
663 copy = False
663 copy = False
664
664
665 if copy:
665 if copy:
666 try:
666 try:
667 # we use a lock here because if we race with commit, we
667 # we use a lock here because if we race with commit, we
668 # can end up with extra data in the cloned revlogs that's
668 # can end up with extra data in the cloned revlogs that's
669 # not pointed to by changesets, thus causing verify to
669 # not pointed to by changesets, thus causing verify to
670 # fail
670 # fail
671 srclock = srcrepo.lock(wait=False)
671 srclock = srcrepo.lock(wait=False)
672 except error.LockError:
672 except error.LockError:
673 copy = False
673 copy = False
674
674
675 if copy:
675 if copy:
676 srcrepo.hook('preoutgoing', throw=True, source='clone')
676 srcrepo.hook('preoutgoing', throw=True, source='clone')
677 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
677 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
678 if not os.path.exists(dest):
678 if not os.path.exists(dest):
679 util.makedirs(dest)
679 util.makedirs(dest)
680 else:
680 else:
681 # only clean up directories we create ourselves
681 # only clean up directories we create ourselves
682 cleandir = hgdir
682 cleandir = hgdir
683 try:
683 try:
684 destpath = hgdir
684 destpath = hgdir
685 util.makedir(destpath, notindexed=True)
685 util.makedir(destpath, notindexed=True)
686 except OSError as inst:
686 except OSError as inst:
687 if inst.errno == errno.EEXIST:
687 if inst.errno == errno.EEXIST:
688 cleandir = None
688 cleandir = None
689 raise error.Abort(_("destination '%s' already exists")
689 raise error.Abort(_("destination '%s' already exists")
690 % dest)
690 % dest)
691 raise
691 raise
692
692
693 destlock = copystore(ui, srcrepo, destpath)
693 destlock = copystore(ui, srcrepo, destpath)
694 # copy bookmarks over
694 # copy bookmarks over
695 srcbookmarks = srcrepo.vfs.join('bookmarks')
695 srcbookmarks = srcrepo.vfs.join('bookmarks')
696 dstbookmarks = os.path.join(destpath, 'bookmarks')
696 dstbookmarks = os.path.join(destpath, 'bookmarks')
697 if os.path.exists(srcbookmarks):
697 if os.path.exists(srcbookmarks):
698 util.copyfile(srcbookmarks, dstbookmarks)
698 util.copyfile(srcbookmarks, dstbookmarks)
699
699
700 dstcachedir = os.path.join(destpath, 'cache')
700 dstcachedir = os.path.join(destpath, 'cache')
701 for cache in cacheutil.cachetocopy(srcrepo):
701 for cache in cacheutil.cachetocopy(srcrepo):
702 _copycache(srcrepo, dstcachedir, cache)
702 _copycache(srcrepo, dstcachedir, cache)
703
703
704 # we need to re-init the repo after manually copying the data
704 # we need to re-init the repo after manually copying the data
705 # into it
705 # into it
706 destpeer = peer(srcrepo, peeropts, dest)
706 destpeer = peer(srcrepo, peeropts, dest)
707 srcrepo.hook('outgoing', source='clone',
707 srcrepo.hook('outgoing', source='clone',
708 node=node.hex(node.nullid))
708 node=node.hex(node.nullid))
709 else:
709 else:
710 try:
710 try:
711 # only pass ui when no srcrepo
711 # only pass ui when no srcrepo
712 destpeer = peer(srcrepo or ui, peeropts, dest, create=True,
712 destpeer = peer(srcrepo or ui, peeropts, dest, create=True,
713 createopts=createopts)
713 createopts=createopts)
714 except OSError as inst:
714 except OSError as inst:
715 if inst.errno == errno.EEXIST:
715 if inst.errno == errno.EEXIST:
716 cleandir = None
716 cleandir = None
717 raise error.Abort(_("destination '%s' already exists")
717 raise error.Abort(_("destination '%s' already exists")
718 % dest)
718 % dest)
719 raise
719 raise
720
720
721 if revs:
721 if revs:
722 if not srcpeer.capable('lookup'):
722 if not srcpeer.capable('lookup'):
723 raise error.Abort(_("src repository does not support "
723 raise error.Abort(_("src repository does not support "
724 "revision lookup and so doesn't "
724 "revision lookup and so doesn't "
725 "support clone by revision"))
725 "support clone by revision"))
726
726
727 # TODO this is batchable.
727 # TODO this is batchable.
728 remoterevs = []
728 remoterevs = []
729 for rev in revs:
729 for rev in revs:
730 with srcpeer.commandexecutor() as e:
730 with srcpeer.commandexecutor() as e:
731 remoterevs.append(e.callcommand('lookup', {
731 remoterevs.append(e.callcommand('lookup', {
732 'key': rev,
732 'key': rev,
733 }).result())
733 }).result())
734 revs = remoterevs
734 revs = remoterevs
735
735
736 checkout = revs[0]
736 checkout = revs[0]
737 else:
737 else:
738 revs = None
738 revs = None
739 local = destpeer.local()
739 local = destpeer.local()
740 if local:
740 if local:
741 if narrow:
741 if narrow:
742 with local.wlock(), local.lock():
742 with local.wlock(), local.lock():
743 local.setnarrowpats(storeincludepats, storeexcludepats)
743 local.setnarrowpats(storeincludepats, storeexcludepats)
744 narrowspec.copytoworkingcopy(local)
744 narrowspec.copytoworkingcopy(local)
745
745
746 u = util.url(abspath)
746 u = util.url(abspath)
747 defaulturl = bytes(u)
747 defaulturl = bytes(u)
748 local.ui.setconfig('paths', 'default', defaulturl, 'clone')
748 local.ui.setconfig('paths', 'default', defaulturl, 'clone')
749 if not stream:
749 if not stream:
750 if pull:
750 if pull:
751 stream = False
751 stream = False
752 else:
752 else:
753 stream = None
753 stream = None
754 # internal config: ui.quietbookmarkmove
754 # internal config: ui.quietbookmarkmove
755 overrides = {('ui', 'quietbookmarkmove'): True}
755 overrides = {('ui', 'quietbookmarkmove'): True}
756 with local.ui.configoverride(overrides, 'clone'):
756 with local.ui.configoverride(overrides, 'clone'):
757 exchange.pull(local, srcpeer, revs,
757 exchange.pull(local, srcpeer, revs,
758 streamclonerequested=stream,
758 streamclonerequested=stream,
759 includepats=storeincludepats,
759 includepats=storeincludepats,
760 excludepats=storeexcludepats,
760 excludepats=storeexcludepats,
761 depth=depth)
761 depth=depth)
762 elif srcrepo:
762 elif srcrepo:
763 # TODO lift restriction once exchange.push() accepts narrow
763 # TODO lift restriction once exchange.push() accepts narrow
764 # push.
764 # push.
765 if narrow:
765 if narrow:
766 raise error.Abort(_('narrow clone not available for '
766 raise error.Abort(_('narrow clone not available for '
767 'remote destinations'))
767 'remote destinations'))
768
768
769 exchange.push(srcrepo, destpeer, revs=revs,
769 exchange.push(srcrepo, destpeer, revs=revs,
770 bookmarks=srcrepo._bookmarks.keys())
770 bookmarks=srcrepo._bookmarks.keys())
771 else:
771 else:
772 raise error.Abort(_("clone from remote to remote not supported")
772 raise error.Abort(_("clone from remote to remote not supported")
773 )
773 )
774
774
775 cleandir = None
775 cleandir = None
776
776
777 destrepo = destpeer.local()
777 destrepo = destpeer.local()
778 if destrepo:
778 if destrepo:
779 template = uimod.samplehgrcs['cloned']
779 template = uimod.samplehgrcs['cloned']
780 u = util.url(abspath)
780 u = util.url(abspath)
781 u.passwd = None
781 u.passwd = None
782 defaulturl = bytes(u)
782 defaulturl = bytes(u)
783 destrepo.vfs.write('hgrc', util.tonativeeol(template % defaulturl))
783 destrepo.vfs.write('hgrc', util.tonativeeol(template % defaulturl))
784 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
784 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
785
785
786 if ui.configbool('experimental', 'remotenames'):
786 if ui.configbool('experimental', 'remotenames'):
787 logexchange.pullremotenames(destrepo, srcpeer)
787 logexchange.pullremotenames(destrepo, srcpeer)
788
788
789 if update:
789 if update:
790 if update is not True:
790 if update is not True:
791 with srcpeer.commandexecutor() as e:
791 with srcpeer.commandexecutor() as e:
792 checkout = e.callcommand('lookup', {
792 checkout = e.callcommand('lookup', {
793 'key': update,
793 'key': update,
794 }).result()
794 }).result()
795
795
796 uprev = None
796 uprev = None
797 status = None
797 status = None
798 if checkout is not None:
798 if checkout is not None:
799 # Some extensions (at least hg-git and hg-subversion) have
799 # Some extensions (at least hg-git and hg-subversion) have
800 # a peer.lookup() implementation that returns a name instead
800 # a peer.lookup() implementation that returns a name instead
801 # of a nodeid. We work around it here until we've figured
801 # of a nodeid. We work around it here until we've figured
802 # out a better solution.
802 # out a better solution.
803 if len(checkout) == 20 and checkout in destrepo:
803 if len(checkout) == 20 and checkout in destrepo:
804 uprev = checkout
804 uprev = checkout
805 elif scmutil.isrevsymbol(destrepo, checkout):
805 elif scmutil.isrevsymbol(destrepo, checkout):
806 uprev = scmutil.revsymbol(destrepo, checkout).node()
806 uprev = scmutil.revsymbol(destrepo, checkout).node()
807 else:
807 else:
808 if update is not True:
808 if update is not True:
809 try:
809 try:
810 uprev = destrepo.lookup(update)
810 uprev = destrepo.lookup(update)
811 except error.RepoLookupError:
811 except error.RepoLookupError:
812 pass
812 pass
813 if uprev is None:
813 if uprev is None:
814 try:
814 try:
815 uprev = destrepo._bookmarks['@']
815 uprev = destrepo._bookmarks['@']
816 update = '@'
816 update = '@'
817 bn = destrepo[uprev].branch()
817 bn = destrepo[uprev].branch()
818 if bn == 'default':
818 if bn == 'default':
819 status = _("updating to bookmark @\n")
819 status = _("updating to bookmark @\n")
820 else:
820 else:
821 status = (_("updating to bookmark @ on branch %s\n")
821 status = (_("updating to bookmark @ on branch %s\n")
822 % bn)
822 % bn)
823 except KeyError:
823 except KeyError:
824 try:
824 try:
825 uprev = destrepo.branchtip('default')
825 uprev = destrepo.branchtip('default')
826 except error.RepoLookupError:
826 except error.RepoLookupError:
827 uprev = destrepo.lookup('tip')
827 uprev = destrepo.lookup('tip')
828 if not status:
828 if not status:
829 bn = destrepo[uprev].branch()
829 bn = destrepo[uprev].branch()
830 status = _("updating to branch %s\n") % bn
830 status = _("updating to branch %s\n") % bn
831 destrepo.ui.status(status)
831 destrepo.ui.status(status)
832 _update(destrepo, uprev)
832 _update(destrepo, uprev)
833 if update in destrepo._bookmarks:
833 if update in destrepo._bookmarks:
834 bookmarks.activate(destrepo, update)
834 bookmarks.activate(destrepo, update)
835 finally:
835 finally:
836 release(srclock, destlock)
836 release(srclock, destlock)
837 if cleandir is not None:
837 if cleandir is not None:
838 shutil.rmtree(cleandir, True)
838 shutil.rmtree(cleandir, True)
839 if srcpeer is not None:
839 if srcpeer is not None:
840 srcpeer.close()
840 srcpeer.close()
841 return srcpeer, destpeer
841 return srcpeer, destpeer
842
842
843 def _showstats(repo, stats, quietempty=False):
843 def _showstats(repo, stats, quietempty=False):
844 if quietempty and stats.isempty():
844 if quietempty and stats.isempty():
845 return
845 return
846 repo.ui.status(_("%d files updated, %d files merged, "
846 repo.ui.status(_("%d files updated, %d files merged, "
847 "%d files removed, %d files unresolved\n") % (
847 "%d files removed, %d files unresolved\n") % (
848 stats.updatedcount, stats.mergedcount,
848 stats.updatedcount, stats.mergedcount,
849 stats.removedcount, stats.unresolvedcount))
849 stats.removedcount, stats.unresolvedcount))
850
850
851 def updaterepo(repo, node, overwrite, updatecheck=None):
851 def updaterepo(repo, node, overwrite, updatecheck=None):
852 """Update the working directory to node.
852 """Update the working directory to node.
853
853
854 When overwrite is set, changes are clobbered, merged else
854 When overwrite is set, changes are clobbered, merged else
855
855
856 returns stats (see pydoc mercurial.merge.applyupdates)"""
856 returns stats (see pydoc mercurial.merge.applyupdates)"""
857 return mergemod.update(repo, node, branchmerge=False, force=overwrite,
857 return mergemod.update(repo, node, branchmerge=False, force=overwrite,
858 labels=['working copy', 'destination'],
858 labels=['working copy', 'destination'],
859 updatecheck=updatecheck)
859 updatecheck=updatecheck)
860
860
861 def update(repo, node, quietempty=False, updatecheck=None):
861 def update(repo, node, quietempty=False, updatecheck=None):
862 """update the working directory to node"""
862 """update the working directory to node"""
863 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
863 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
864 _showstats(repo, stats, quietempty)
864 _showstats(repo, stats, quietempty)
865 if stats.unresolvedcount:
865 if stats.unresolvedcount:
866 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
866 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
867 return stats.unresolvedcount > 0
867 return stats.unresolvedcount > 0
868
868
869 # naming conflict in clone()
869 # naming conflict in clone()
870 _update = update
870 _update = update
871
871
872 def clean(repo, node, show_stats=True, quietempty=False):
872 def clean(repo, node, show_stats=True, quietempty=False):
873 """forcibly switch the working directory to node, clobbering changes"""
873 """forcibly switch the working directory to node, clobbering changes"""
874 stats = updaterepo(repo, node, True)
874 stats = updaterepo(repo, node, True)
875 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
875 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
876 if show_stats:
876 if show_stats:
877 _showstats(repo, stats, quietempty)
877 _showstats(repo, stats, quietempty)
878 return stats.unresolvedcount > 0
878 return stats.unresolvedcount > 0
879
879
880 # naming conflict in updatetotally()
880 # naming conflict in updatetotally()
881 _clean = clean
881 _clean = clean
882
882
883 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
883 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
884 """Update the working directory with extra care for non-file components
884 """Update the working directory with extra care for non-file components
885
885
886 This takes care of non-file components below:
886 This takes care of non-file components below:
887
887
888 :bookmark: might be advanced or (in)activated
888 :bookmark: might be advanced or (in)activated
889
889
890 This takes arguments below:
890 This takes arguments below:
891
891
892 :checkout: to which revision the working directory is updated
892 :checkout: to which revision the working directory is updated
893 :brev: a name, which might be a bookmark to be activated after updating
893 :brev: a name, which might be a bookmark to be activated after updating
894 :clean: whether changes in the working directory can be discarded
894 :clean: whether changes in the working directory can be discarded
895 :updatecheck: how to deal with a dirty working directory
895 :updatecheck: how to deal with a dirty working directory
896
896
897 Valid values for updatecheck are (None => linear):
897 Valid values for updatecheck are (None => linear):
898
898
899 * abort: abort if the working directory is dirty
899 * abort: abort if the working directory is dirty
900 * none: don't check (merge working directory changes into destination)
900 * none: don't check (merge working directory changes into destination)
901 * linear: check that update is linear before merging working directory
901 * linear: check that update is linear before merging working directory
902 changes into destination
902 changes into destination
903 * noconflict: check that the update does not result in file merges
903 * noconflict: check that the update does not result in file merges
904
904
905 This returns whether conflict is detected at updating or not.
905 This returns whether conflict is detected at updating or not.
906 """
906 """
907 if updatecheck is None:
907 if updatecheck is None:
908 updatecheck = ui.config('commands', 'update.check')
908 updatecheck = ui.config('commands', 'update.check')
909 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
909 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
910 # If not configured, or invalid value configured
910 # If not configured, or invalid value configured
911 updatecheck = 'linear'
911 updatecheck = 'linear'
912 with repo.wlock():
912 with repo.wlock():
913 movemarkfrom = None
913 movemarkfrom = None
914 warndest = False
914 warndest = False
915 if checkout is None:
915 if checkout is None:
916 updata = destutil.destupdate(repo, clean=clean)
916 updata = destutil.destupdate(repo, clean=clean)
917 checkout, movemarkfrom, brev = updata
917 checkout, movemarkfrom, brev = updata
918 warndest = True
918 warndest = True
919
919
920 if clean:
920 if clean:
921 ret = _clean(repo, checkout)
921 ret = _clean(repo, checkout)
922 else:
922 else:
923 if updatecheck == 'abort':
923 if updatecheck == 'abort':
924 cmdutil.bailifchanged(repo, merge=False)
924 cmdutil.bailifchanged(repo, merge=False)
925 updatecheck = 'none'
925 updatecheck = 'none'
926 ret = _update(repo, checkout, updatecheck=updatecheck)
926 ret = _update(repo, checkout, updatecheck=updatecheck)
927
927
928 if not ret and movemarkfrom:
928 if not ret and movemarkfrom:
929 if movemarkfrom == repo['.'].node():
929 if movemarkfrom == repo['.'].node():
930 pass # no-op update
930 pass # no-op update
931 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
931 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
932 b = ui.label(repo._activebookmark, 'bookmarks.active')
932 b = ui.label(repo._activebookmark, 'bookmarks.active')
933 ui.status(_("updating bookmark %s\n") % b)
933 ui.status(_("updating bookmark %s\n") % b)
934 else:
934 else:
935 # this can happen with a non-linear update
935 # this can happen with a non-linear update
936 b = ui.label(repo._activebookmark, 'bookmarks')
936 b = ui.label(repo._activebookmark, 'bookmarks')
937 ui.status(_("(leaving bookmark %s)\n") % b)
937 ui.status(_("(leaving bookmark %s)\n") % b)
938 bookmarks.deactivate(repo)
938 bookmarks.deactivate(repo)
939 elif brev in repo._bookmarks:
939 elif brev in repo._bookmarks:
940 if brev != repo._activebookmark:
940 if brev != repo._activebookmark:
941 b = ui.label(brev, 'bookmarks.active')
941 b = ui.label(brev, 'bookmarks.active')
942 ui.status(_("(activating bookmark %s)\n") % b)
942 ui.status(_("(activating bookmark %s)\n") % b)
943 bookmarks.activate(repo, brev)
943 bookmarks.activate(repo, brev)
944 elif brev:
944 elif brev:
945 if repo._activebookmark:
945 if repo._activebookmark:
946 b = ui.label(repo._activebookmark, 'bookmarks')
946 b = ui.label(repo._activebookmark, 'bookmarks')
947 ui.status(_("(leaving bookmark %s)\n") % b)
947 ui.status(_("(leaving bookmark %s)\n") % b)
948 bookmarks.deactivate(repo)
948 bookmarks.deactivate(repo)
949
949
950 if warndest:
950 if warndest:
951 destutil.statusotherdests(ui, repo)
951 destutil.statusotherdests(ui, repo)
952
952
953 return ret
953 return ret
954
954
955 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None,
955 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None,
956 abort=False):
956 abort=False):
957 """Branch merge with node, resolving changes. Return true if any
957 """Branch merge with node, resolving changes. Return true if any
958 unresolved conflicts."""
958 unresolved conflicts."""
959 if abort:
959 if abort:
960 return abortmerge(repo.ui, repo, labels=labels)
960 return abortmerge(repo.ui, repo)
961
961
962 stats = mergemod.update(repo, node, branchmerge=True, force=force,
962 stats = mergemod.update(repo, node, branchmerge=True, force=force,
963 mergeforce=mergeforce, labels=labels)
963 mergeforce=mergeforce, labels=labels)
964 _showstats(repo, stats)
964 _showstats(repo, stats)
965 if stats.unresolvedcount:
965 if stats.unresolvedcount:
966 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
966 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
967 "or 'hg merge --abort' to abandon\n"))
967 "or 'hg merge --abort' to abandon\n"))
968 elif remind:
968 elif remind:
969 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
969 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
970 return stats.unresolvedcount > 0
970 return stats.unresolvedcount > 0
971
971
972 def abortmerge(ui, repo, labels=None):
972 def abortmerge(ui, repo):
973 ms = mergemod.mergestate.read(repo)
973 ms = mergemod.mergestate.read(repo)
974 if ms.active():
974 if ms.active():
975 # there were conflicts
975 # there were conflicts
976 node = ms.localctx.hex()
976 node = ms.localctx.hex()
977 else:
977 else:
978 # there were no conficts, mergestate was not stored
978 # there were no conficts, mergestate was not stored
979 node = repo['.'].hex()
979 node = repo['.'].hex()
980
980
981 repo.ui.status(_("aborting the merge, updating back to"
981 repo.ui.status(_("aborting the merge, updating back to"
982 " %s\n") % node[:12])
982 " %s\n") % node[:12])
983 stats = mergemod.update(repo, node, branchmerge=False, force=True,
983 stats = mergemod.update(repo, node, branchmerge=False, force=True)
984 labels=labels)
985 _showstats(repo, stats)
984 _showstats(repo, stats)
986 return stats.unresolvedcount > 0
985 return stats.unresolvedcount > 0
987
986
988 def _incoming(displaychlist, subreporecurse, ui, repo, source,
987 def _incoming(displaychlist, subreporecurse, ui, repo, source,
989 opts, buffered=False):
988 opts, buffered=False):
990 """
989 """
991 Helper for incoming / gincoming.
990 Helper for incoming / gincoming.
992 displaychlist gets called with
991 displaychlist gets called with
993 (remoterepo, incomingchangesetlist, displayer) parameters,
992 (remoterepo, incomingchangesetlist, displayer) parameters,
994 and is supposed to contain only code that can't be unified.
993 and is supposed to contain only code that can't be unified.
995 """
994 """
996 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
995 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
997 other = peer(repo, opts, source)
996 other = peer(repo, opts, source)
998 ui.status(_('comparing with %s\n') % util.hidepassword(source))
997 ui.status(_('comparing with %s\n') % util.hidepassword(source))
999 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
998 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
1000
999
1001 if revs:
1000 if revs:
1002 revs = [other.lookup(rev) for rev in revs]
1001 revs = [other.lookup(rev) for rev in revs]
1003 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
1002 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
1004 revs, opts["bundle"], opts["force"])
1003 revs, opts["bundle"], opts["force"])
1005 try:
1004 try:
1006 if not chlist:
1005 if not chlist:
1007 ui.status(_("no changes found\n"))
1006 ui.status(_("no changes found\n"))
1008 return subreporecurse()
1007 return subreporecurse()
1009 ui.pager('incoming')
1008 ui.pager('incoming')
1010 displayer = logcmdutil.changesetdisplayer(ui, other, opts,
1009 displayer = logcmdutil.changesetdisplayer(ui, other, opts,
1011 buffered=buffered)
1010 buffered=buffered)
1012 displaychlist(other, chlist, displayer)
1011 displaychlist(other, chlist, displayer)
1013 displayer.close()
1012 displayer.close()
1014 finally:
1013 finally:
1015 cleanupfn()
1014 cleanupfn()
1016 subreporecurse()
1015 subreporecurse()
1017 return 0 # exit code is zero since we found incoming changes
1016 return 0 # exit code is zero since we found incoming changes
1018
1017
1019 def incoming(ui, repo, source, opts):
1018 def incoming(ui, repo, source, opts):
1020 def subreporecurse():
1019 def subreporecurse():
1021 ret = 1
1020 ret = 1
1022 if opts.get('subrepos'):
1021 if opts.get('subrepos'):
1023 ctx = repo[None]
1022 ctx = repo[None]
1024 for subpath in sorted(ctx.substate):
1023 for subpath in sorted(ctx.substate):
1025 sub = ctx.sub(subpath)
1024 sub = ctx.sub(subpath)
1026 ret = min(ret, sub.incoming(ui, source, opts))
1025 ret = min(ret, sub.incoming(ui, source, opts))
1027 return ret
1026 return ret
1028
1027
1029 def display(other, chlist, displayer):
1028 def display(other, chlist, displayer):
1030 limit = logcmdutil.getlimit(opts)
1029 limit = logcmdutil.getlimit(opts)
1031 if opts.get('newest_first'):
1030 if opts.get('newest_first'):
1032 chlist.reverse()
1031 chlist.reverse()
1033 count = 0
1032 count = 0
1034 for n in chlist:
1033 for n in chlist:
1035 if limit is not None and count >= limit:
1034 if limit is not None and count >= limit:
1036 break
1035 break
1037 parents = [p for p in other.changelog.parents(n) if p != nullid]
1036 parents = [p for p in other.changelog.parents(n) if p != nullid]
1038 if opts.get('no_merges') and len(parents) == 2:
1037 if opts.get('no_merges') and len(parents) == 2:
1039 continue
1038 continue
1040 count += 1
1039 count += 1
1041 displayer.show(other[n])
1040 displayer.show(other[n])
1042 return _incoming(display, subreporecurse, ui, repo, source, opts)
1041 return _incoming(display, subreporecurse, ui, repo, source, opts)
1043
1042
1044 def _outgoing(ui, repo, dest, opts):
1043 def _outgoing(ui, repo, dest, opts):
1045 path = ui.paths.getpath(dest, default=('default-push', 'default'))
1044 path = ui.paths.getpath(dest, default=('default-push', 'default'))
1046 if not path:
1045 if not path:
1047 raise error.Abort(_('default repository not configured!'),
1046 raise error.Abort(_('default repository not configured!'),
1048 hint=_("see 'hg help config.paths'"))
1047 hint=_("see 'hg help config.paths'"))
1049 dest = path.pushloc or path.loc
1048 dest = path.pushloc or path.loc
1050 branches = path.branch, opts.get('branch') or []
1049 branches = path.branch, opts.get('branch') or []
1051
1050
1052 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
1051 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
1053 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
1052 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
1054 if revs:
1053 if revs:
1055 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1054 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1056
1055
1057 other = peer(repo, opts, dest)
1056 other = peer(repo, opts, dest)
1058 outgoing = discovery.findcommonoutgoing(repo, other, revs,
1057 outgoing = discovery.findcommonoutgoing(repo, other, revs,
1059 force=opts.get('force'))
1058 force=opts.get('force'))
1060 o = outgoing.missing
1059 o = outgoing.missing
1061 if not o:
1060 if not o:
1062 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1061 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1063 return o, other
1062 return o, other
1064
1063
1065 def outgoing(ui, repo, dest, opts):
1064 def outgoing(ui, repo, dest, opts):
1066 def recurse():
1065 def recurse():
1067 ret = 1
1066 ret = 1
1068 if opts.get('subrepos'):
1067 if opts.get('subrepos'):
1069 ctx = repo[None]
1068 ctx = repo[None]
1070 for subpath in sorted(ctx.substate):
1069 for subpath in sorted(ctx.substate):
1071 sub = ctx.sub(subpath)
1070 sub = ctx.sub(subpath)
1072 ret = min(ret, sub.outgoing(ui, dest, opts))
1071 ret = min(ret, sub.outgoing(ui, dest, opts))
1073 return ret
1072 return ret
1074
1073
1075 limit = logcmdutil.getlimit(opts)
1074 limit = logcmdutil.getlimit(opts)
1076 o, other = _outgoing(ui, repo, dest, opts)
1075 o, other = _outgoing(ui, repo, dest, opts)
1077 if not o:
1076 if not o:
1078 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1077 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1079 return recurse()
1078 return recurse()
1080
1079
1081 if opts.get('newest_first'):
1080 if opts.get('newest_first'):
1082 o.reverse()
1081 o.reverse()
1083 ui.pager('outgoing')
1082 ui.pager('outgoing')
1084 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1083 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1085 count = 0
1084 count = 0
1086 for n in o:
1085 for n in o:
1087 if limit is not None and count >= limit:
1086 if limit is not None and count >= limit:
1088 break
1087 break
1089 parents = [p for p in repo.changelog.parents(n) if p != nullid]
1088 parents = [p for p in repo.changelog.parents(n) if p != nullid]
1090 if opts.get('no_merges') and len(parents) == 2:
1089 if opts.get('no_merges') and len(parents) == 2:
1091 continue
1090 continue
1092 count += 1
1091 count += 1
1093 displayer.show(repo[n])
1092 displayer.show(repo[n])
1094 displayer.close()
1093 displayer.close()
1095 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1094 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1096 recurse()
1095 recurse()
1097 return 0 # exit code is zero since we found outgoing changes
1096 return 0 # exit code is zero since we found outgoing changes
1098
1097
1099 def verify(repo, level=None):
1098 def verify(repo, level=None):
1100 """verify the consistency of a repository"""
1099 """verify the consistency of a repository"""
1101 ret = verifymod.verify(repo, level=level)
1100 ret = verifymod.verify(repo, level=level)
1102
1101
1103 # Broken subrepo references in hidden csets don't seem worth worrying about,
1102 # Broken subrepo references in hidden csets don't seem worth worrying about,
1104 # since they can't be pushed/pulled, and --hidden can be used if they are a
1103 # since they can't be pushed/pulled, and --hidden can be used if they are a
1105 # concern.
1104 # concern.
1106
1105
1107 # pathto() is needed for -R case
1106 # pathto() is needed for -R case
1108 revs = repo.revs("filelog(%s)",
1107 revs = repo.revs("filelog(%s)",
1109 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
1108 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
1110
1109
1111 if revs:
1110 if revs:
1112 repo.ui.status(_('checking subrepo links\n'))
1111 repo.ui.status(_('checking subrepo links\n'))
1113 for rev in revs:
1112 for rev in revs:
1114 ctx = repo[rev]
1113 ctx = repo[rev]
1115 try:
1114 try:
1116 for subpath in ctx.substate:
1115 for subpath in ctx.substate:
1117 try:
1116 try:
1118 ret = (ctx.sub(subpath, allowcreate=False).verify()
1117 ret = (ctx.sub(subpath, allowcreate=False).verify()
1119 or ret)
1118 or ret)
1120 except error.RepoError as e:
1119 except error.RepoError as e:
1121 repo.ui.warn(('%d: %s\n') % (rev, e))
1120 repo.ui.warn(('%d: %s\n') % (rev, e))
1122 except Exception:
1121 except Exception:
1123 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
1122 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
1124 node.short(ctx.node()))
1123 node.short(ctx.node()))
1125
1124
1126 return ret
1125 return ret
1127
1126
1128 def remoteui(src, opts):
1127 def remoteui(src, opts):
1129 'build a remote ui from ui or repo and opts'
1128 'build a remote ui from ui or repo and opts'
1130 if util.safehasattr(src, 'baseui'): # looks like a repository
1129 if util.safehasattr(src, 'baseui'): # looks like a repository
1131 dst = src.baseui.copy() # drop repo-specific config
1130 dst = src.baseui.copy() # drop repo-specific config
1132 src = src.ui # copy target options from repo
1131 src = src.ui # copy target options from repo
1133 else: # assume it's a global ui object
1132 else: # assume it's a global ui object
1134 dst = src.copy() # keep all global options
1133 dst = src.copy() # keep all global options
1135
1134
1136 # copy ssh-specific options
1135 # copy ssh-specific options
1137 for o in 'ssh', 'remotecmd':
1136 for o in 'ssh', 'remotecmd':
1138 v = opts.get(o) or src.config('ui', o)
1137 v = opts.get(o) or src.config('ui', o)
1139 if v:
1138 if v:
1140 dst.setconfig("ui", o, v, 'copied')
1139 dst.setconfig("ui", o, v, 'copied')
1141
1140
1142 # copy bundle-specific options
1141 # copy bundle-specific options
1143 r = src.config('bundle', 'mainreporoot')
1142 r = src.config('bundle', 'mainreporoot')
1144 if r:
1143 if r:
1145 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
1144 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
1146
1145
1147 # copy selected local settings to the remote ui
1146 # copy selected local settings to the remote ui
1148 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
1147 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
1149 for key, val in src.configitems(sect):
1148 for key, val in src.configitems(sect):
1150 dst.setconfig(sect, key, val, 'copied')
1149 dst.setconfig(sect, key, val, 'copied')
1151 v = src.config('web', 'cacerts')
1150 v = src.config('web', 'cacerts')
1152 if v:
1151 if v:
1153 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
1152 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
1154
1153
1155 return dst
1154 return dst
1156
1155
1157 # Files of interest
1156 # Files of interest
1158 # Used to check if the repository has changed looking at mtime and size of
1157 # Used to check if the repository has changed looking at mtime and size of
1159 # these files.
1158 # these files.
1160 foi = [('spath', '00changelog.i'),
1159 foi = [('spath', '00changelog.i'),
1161 ('spath', 'phaseroots'), # ! phase can change content at the same size
1160 ('spath', 'phaseroots'), # ! phase can change content at the same size
1162 ('spath', 'obsstore'),
1161 ('spath', 'obsstore'),
1163 ('path', 'bookmarks'), # ! bookmark can change content at the same size
1162 ('path', 'bookmarks'), # ! bookmark can change content at the same size
1164 ]
1163 ]
1165
1164
1166 class cachedlocalrepo(object):
1165 class cachedlocalrepo(object):
1167 """Holds a localrepository that can be cached and reused."""
1166 """Holds a localrepository that can be cached and reused."""
1168
1167
1169 def __init__(self, repo):
1168 def __init__(self, repo):
1170 """Create a new cached repo from an existing repo.
1169 """Create a new cached repo from an existing repo.
1171
1170
1172 We assume the passed in repo was recently created. If the
1171 We assume the passed in repo was recently created. If the
1173 repo has changed between when it was created and when it was
1172 repo has changed between when it was created and when it was
1174 turned into a cache, it may not refresh properly.
1173 turned into a cache, it may not refresh properly.
1175 """
1174 """
1176 assert isinstance(repo, localrepo.localrepository)
1175 assert isinstance(repo, localrepo.localrepository)
1177 self._repo = repo
1176 self._repo = repo
1178 self._state, self.mtime = self._repostate()
1177 self._state, self.mtime = self._repostate()
1179 self._filtername = repo.filtername
1178 self._filtername = repo.filtername
1180
1179
1181 def fetch(self):
1180 def fetch(self):
1182 """Refresh (if necessary) and return a repository.
1181 """Refresh (if necessary) and return a repository.
1183
1182
1184 If the cached instance is out of date, it will be recreated
1183 If the cached instance is out of date, it will be recreated
1185 automatically and returned.
1184 automatically and returned.
1186
1185
1187 Returns a tuple of the repo and a boolean indicating whether a new
1186 Returns a tuple of the repo and a boolean indicating whether a new
1188 repo instance was created.
1187 repo instance was created.
1189 """
1188 """
1190 # We compare the mtimes and sizes of some well-known files to
1189 # We compare the mtimes and sizes of some well-known files to
1191 # determine if the repo changed. This is not precise, as mtimes
1190 # determine if the repo changed. This is not precise, as mtimes
1192 # are susceptible to clock skew and imprecise filesystems and
1191 # are susceptible to clock skew and imprecise filesystems and
1193 # file content can change while maintaining the same size.
1192 # file content can change while maintaining the same size.
1194
1193
1195 state, mtime = self._repostate()
1194 state, mtime = self._repostate()
1196 if state == self._state:
1195 if state == self._state:
1197 return self._repo, False
1196 return self._repo, False
1198
1197
1199 repo = repository(self._repo.baseui, self._repo.url())
1198 repo = repository(self._repo.baseui, self._repo.url())
1200 if self._filtername:
1199 if self._filtername:
1201 self._repo = repo.filtered(self._filtername)
1200 self._repo = repo.filtered(self._filtername)
1202 else:
1201 else:
1203 self._repo = repo.unfiltered()
1202 self._repo = repo.unfiltered()
1204 self._state = state
1203 self._state = state
1205 self.mtime = mtime
1204 self.mtime = mtime
1206
1205
1207 return self._repo, True
1206 return self._repo, True
1208
1207
1209 def _repostate(self):
1208 def _repostate(self):
1210 state = []
1209 state = []
1211 maxmtime = -1
1210 maxmtime = -1
1212 for attr, fname in foi:
1211 for attr, fname in foi:
1213 prefix = getattr(self._repo, attr)
1212 prefix = getattr(self._repo, attr)
1214 p = os.path.join(prefix, fname)
1213 p = os.path.join(prefix, fname)
1215 try:
1214 try:
1216 st = os.stat(p)
1215 st = os.stat(p)
1217 except OSError:
1216 except OSError:
1218 st = os.stat(prefix)
1217 st = os.stat(prefix)
1219 state.append((st[stat.ST_MTIME], st.st_size))
1218 state.append((st[stat.ST_MTIME], st.st_size))
1220 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1219 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1221
1220
1222 return tuple(state), maxmtime
1221 return tuple(state), maxmtime
1223
1222
1224 def copy(self):
1223 def copy(self):
1225 """Obtain a copy of this class instance.
1224 """Obtain a copy of this class instance.
1226
1225
1227 A new localrepository instance is obtained. The new instance should be
1226 A new localrepository instance is obtained. The new instance should be
1228 completely independent of the original.
1227 completely independent of the original.
1229 """
1228 """
1230 repo = repository(self._repo.baseui, self._repo.origroot)
1229 repo = repository(self._repo.baseui, self._repo.origroot)
1231 if self._filtername:
1230 if self._filtername:
1232 repo = repo.filtered(self._filtername)
1231 repo = repo.filtered(self._filtername)
1233 else:
1232 else:
1234 repo = repo.unfiltered()
1233 repo = repo.unfiltered()
1235 c = cachedlocalrepo(repo)
1234 c = cachedlocalrepo(repo)
1236 c._state = self._state
1235 c._state = self._state
1237 c.mtime = self.mtime
1236 c.mtime = self.mtime
1238 return c
1237 return c
General Comments 0
You need to be logged in to leave comments. Login now