##// END OF EJS Templates
hg: have `updatetotally` more thoroughly check updatecheck argument (API)...
Augie Fackler -
r43241:ee1ef76d default
parent child Browse files
Show More
@@ -1,1245 +1,1251 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13 import os
13 import os
14 import shutil
14 import shutil
15 import stat
15 import stat
16
16
17 from .i18n import _
17 from .i18n import _
18 from .node import (
18 from .node import (
19 nullid,
19 nullid,
20 )
20 )
21
21
22 from . import (
22 from . import (
23 bookmarks,
23 bookmarks,
24 bundlerepo,
24 bundlerepo,
25 cacheutil,
25 cacheutil,
26 cmdutil,
26 cmdutil,
27 destutil,
27 destutil,
28 discovery,
28 discovery,
29 error,
29 error,
30 exchange,
30 exchange,
31 extensions,
31 extensions,
32 httppeer,
32 httppeer,
33 localrepo,
33 localrepo,
34 lock,
34 lock,
35 logcmdutil,
35 logcmdutil,
36 logexchange,
36 logexchange,
37 merge as mergemod,
37 merge as mergemod,
38 narrowspec,
38 narrowspec,
39 node,
39 node,
40 phases,
40 phases,
41 pycompat,
41 pycompat,
42 scmutil,
42 scmutil,
43 sshpeer,
43 sshpeer,
44 statichttprepo,
44 statichttprepo,
45 ui as uimod,
45 ui as uimod,
46 unionrepo,
46 unionrepo,
47 url,
47 url,
48 util,
48 util,
49 verify as verifymod,
49 verify as verifymod,
50 vfs as vfsmod,
50 vfs as vfsmod,
51 )
51 )
52
52
53 from .interfaces import (
53 from .interfaces import (
54 repository as repositorymod,
54 repository as repositorymod,
55 )
55 )
56
56
57 release = lock.release
57 release = lock.release
58
58
59 # shared features
59 # shared features
60 sharedbookmarks = 'bookmarks'
60 sharedbookmarks = 'bookmarks'
61
61
62 def _local(path):
62 def _local(path):
63 path = util.expandpath(util.urllocalpath(path))
63 path = util.expandpath(util.urllocalpath(path))
64
64
65 try:
65 try:
66 isfile = os.path.isfile(path)
66 isfile = os.path.isfile(path)
67 # Python 2 raises TypeError, Python 3 ValueError.
67 # Python 2 raises TypeError, Python 3 ValueError.
68 except (TypeError, ValueError) as e:
68 except (TypeError, ValueError) as e:
69 raise error.Abort(_('invalid path %s: %s') % (
69 raise error.Abort(_('invalid path %s: %s') % (
70 path, pycompat.bytestr(e)))
70 path, pycompat.bytestr(e)))
71
71
72 return isfile and bundlerepo or localrepo
72 return isfile and bundlerepo or localrepo
73
73
74 def addbranchrevs(lrepo, other, branches, revs):
74 def addbranchrevs(lrepo, other, branches, revs):
75 peer = other.peer() # a courtesy to callers using a localrepo for other
75 peer = other.peer() # a courtesy to callers using a localrepo for other
76 hashbranch, branches = branches
76 hashbranch, branches = branches
77 if not hashbranch and not branches:
77 if not hashbranch and not branches:
78 x = revs or None
78 x = revs or None
79 if revs:
79 if revs:
80 y = revs[0]
80 y = revs[0]
81 else:
81 else:
82 y = None
82 y = None
83 return x, y
83 return x, y
84 if revs:
84 if revs:
85 revs = list(revs)
85 revs = list(revs)
86 else:
86 else:
87 revs = []
87 revs = []
88
88
89 if not peer.capable('branchmap'):
89 if not peer.capable('branchmap'):
90 if branches:
90 if branches:
91 raise error.Abort(_("remote branch lookup not supported"))
91 raise error.Abort(_("remote branch lookup not supported"))
92 revs.append(hashbranch)
92 revs.append(hashbranch)
93 return revs, revs[0]
93 return revs, revs[0]
94
94
95 with peer.commandexecutor() as e:
95 with peer.commandexecutor() as e:
96 branchmap = e.callcommand('branchmap', {}).result()
96 branchmap = e.callcommand('branchmap', {}).result()
97
97
98 def primary(branch):
98 def primary(branch):
99 if branch == '.':
99 if branch == '.':
100 if not lrepo:
100 if not lrepo:
101 raise error.Abort(_("dirstate branch not accessible"))
101 raise error.Abort(_("dirstate branch not accessible"))
102 branch = lrepo.dirstate.branch()
102 branch = lrepo.dirstate.branch()
103 if branch in branchmap:
103 if branch in branchmap:
104 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
104 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
105 return True
105 return True
106 else:
106 else:
107 return False
107 return False
108
108
109 for branch in branches:
109 for branch in branches:
110 if not primary(branch):
110 if not primary(branch):
111 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
111 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
112 if hashbranch:
112 if hashbranch:
113 if not primary(hashbranch):
113 if not primary(hashbranch):
114 revs.append(hashbranch)
114 revs.append(hashbranch)
115 return revs, revs[0]
115 return revs, revs[0]
116
116
117 def parseurl(path, branches=None):
117 def parseurl(path, branches=None):
118 '''parse url#branch, returning (url, (branch, branches))'''
118 '''parse url#branch, returning (url, (branch, branches))'''
119
119
120 u = util.url(path)
120 u = util.url(path)
121 branch = None
121 branch = None
122 if u.fragment:
122 if u.fragment:
123 branch = u.fragment
123 branch = u.fragment
124 u.fragment = None
124 u.fragment = None
125 return bytes(u), (branch, branches or [])
125 return bytes(u), (branch, branches or [])
126
126
127 schemes = {
127 schemes = {
128 'bundle': bundlerepo,
128 'bundle': bundlerepo,
129 'union': unionrepo,
129 'union': unionrepo,
130 'file': _local,
130 'file': _local,
131 'http': httppeer,
131 'http': httppeer,
132 'https': httppeer,
132 'https': httppeer,
133 'ssh': sshpeer,
133 'ssh': sshpeer,
134 'static-http': statichttprepo,
134 'static-http': statichttprepo,
135 }
135 }
136
136
137 def _peerlookup(path):
137 def _peerlookup(path):
138 u = util.url(path)
138 u = util.url(path)
139 scheme = u.scheme or 'file'
139 scheme = u.scheme or 'file'
140 thing = schemes.get(scheme) or schemes['file']
140 thing = schemes.get(scheme) or schemes['file']
141 try:
141 try:
142 return thing(path)
142 return thing(path)
143 except TypeError:
143 except TypeError:
144 # we can't test callable(thing) because 'thing' can be an unloaded
144 # we can't test callable(thing) because 'thing' can be an unloaded
145 # module that implements __call__
145 # module that implements __call__
146 if not util.safehasattr(thing, 'instance'):
146 if not util.safehasattr(thing, 'instance'):
147 raise
147 raise
148 return thing
148 return thing
149
149
150 def islocal(repo):
150 def islocal(repo):
151 '''return true if repo (or path pointing to repo) is local'''
151 '''return true if repo (or path pointing to repo) is local'''
152 if isinstance(repo, bytes):
152 if isinstance(repo, bytes):
153 try:
153 try:
154 return _peerlookup(repo).islocal(repo)
154 return _peerlookup(repo).islocal(repo)
155 except AttributeError:
155 except AttributeError:
156 return False
156 return False
157 return repo.local()
157 return repo.local()
158
158
159 def openpath(ui, path, sendaccept=True):
159 def openpath(ui, path, sendaccept=True):
160 '''open path with open if local, url.open if remote'''
160 '''open path with open if local, url.open if remote'''
161 pathurl = util.url(path, parsequery=False, parsefragment=False)
161 pathurl = util.url(path, parsequery=False, parsefragment=False)
162 if pathurl.islocal():
162 if pathurl.islocal():
163 return util.posixfile(pathurl.localpath(), 'rb')
163 return util.posixfile(pathurl.localpath(), 'rb')
164 else:
164 else:
165 return url.open(ui, path, sendaccept=sendaccept)
165 return url.open(ui, path, sendaccept=sendaccept)
166
166
167 # a list of (ui, repo) functions called for wire peer initialization
167 # a list of (ui, repo) functions called for wire peer initialization
168 wirepeersetupfuncs = []
168 wirepeersetupfuncs = []
169
169
170 def _peerorrepo(ui, path, create=False, presetupfuncs=None,
170 def _peerorrepo(ui, path, create=False, presetupfuncs=None,
171 intents=None, createopts=None):
171 intents=None, createopts=None):
172 """return a repository object for the specified path"""
172 """return a repository object for the specified path"""
173 obj = _peerlookup(path).instance(ui, path, create, intents=intents,
173 obj = _peerlookup(path).instance(ui, path, create, intents=intents,
174 createopts=createopts)
174 createopts=createopts)
175 ui = getattr(obj, "ui", ui)
175 ui = getattr(obj, "ui", ui)
176 for f in presetupfuncs or []:
176 for f in presetupfuncs or []:
177 f(ui, obj)
177 f(ui, obj)
178 ui.log(b'extension', b'- executing reposetup hooks\n')
178 ui.log(b'extension', b'- executing reposetup hooks\n')
179 with util.timedcm('all reposetup') as allreposetupstats:
179 with util.timedcm('all reposetup') as allreposetupstats:
180 for name, module in extensions.extensions(ui):
180 for name, module in extensions.extensions(ui):
181 ui.log(b'extension', b' - running reposetup for %s\n', name)
181 ui.log(b'extension', b' - running reposetup for %s\n', name)
182 hook = getattr(module, 'reposetup', None)
182 hook = getattr(module, 'reposetup', None)
183 if hook:
183 if hook:
184 with util.timedcm('reposetup %r', name) as stats:
184 with util.timedcm('reposetup %r', name) as stats:
185 hook(ui, obj)
185 hook(ui, obj)
186 ui.log(b'extension', b' > reposetup for %s took %s\n',
186 ui.log(b'extension', b' > reposetup for %s took %s\n',
187 name, stats)
187 name, stats)
188 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
188 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
189 if not obj.local():
189 if not obj.local():
190 for f in wirepeersetupfuncs:
190 for f in wirepeersetupfuncs:
191 f(ui, obj)
191 f(ui, obj)
192 return obj
192 return obj
193
193
194 def repository(ui, path='', create=False, presetupfuncs=None, intents=None,
194 def repository(ui, path='', create=False, presetupfuncs=None, intents=None,
195 createopts=None):
195 createopts=None):
196 """return a repository object for the specified path"""
196 """return a repository object for the specified path"""
197 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs,
197 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs,
198 intents=intents, createopts=createopts)
198 intents=intents, createopts=createopts)
199 repo = peer.local()
199 repo = peer.local()
200 if not repo:
200 if not repo:
201 raise error.Abort(_("repository '%s' is not local") %
201 raise error.Abort(_("repository '%s' is not local") %
202 (path or peer.url()))
202 (path or peer.url()))
203 return repo.filtered('visible')
203 return repo.filtered('visible')
204
204
205 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
205 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
206 '''return a repository peer for the specified path'''
206 '''return a repository peer for the specified path'''
207 rui = remoteui(uiorrepo, opts)
207 rui = remoteui(uiorrepo, opts)
208 return _peerorrepo(rui, path, create, intents=intents,
208 return _peerorrepo(rui, path, create, intents=intents,
209 createopts=createopts).peer()
209 createopts=createopts).peer()
210
210
211 def defaultdest(source):
211 def defaultdest(source):
212 '''return default destination of clone if none is given
212 '''return default destination of clone if none is given
213
213
214 >>> defaultdest(b'foo')
214 >>> defaultdest(b'foo')
215 'foo'
215 'foo'
216 >>> defaultdest(b'/foo/bar')
216 >>> defaultdest(b'/foo/bar')
217 'bar'
217 'bar'
218 >>> defaultdest(b'/')
218 >>> defaultdest(b'/')
219 ''
219 ''
220 >>> defaultdest(b'')
220 >>> defaultdest(b'')
221 ''
221 ''
222 >>> defaultdest(b'http://example.org/')
222 >>> defaultdest(b'http://example.org/')
223 ''
223 ''
224 >>> defaultdest(b'http://example.org/foo/')
224 >>> defaultdest(b'http://example.org/foo/')
225 'foo'
225 'foo'
226 '''
226 '''
227 path = util.url(source).path
227 path = util.url(source).path
228 if not path:
228 if not path:
229 return ''
229 return ''
230 return os.path.basename(os.path.normpath(path))
230 return os.path.basename(os.path.normpath(path))
231
231
232 def sharedreposource(repo):
232 def sharedreposource(repo):
233 """Returns repository object for source repository of a shared repo.
233 """Returns repository object for source repository of a shared repo.
234
234
235 If repo is not a shared repository, returns None.
235 If repo is not a shared repository, returns None.
236 """
236 """
237 if repo.sharedpath == repo.path:
237 if repo.sharedpath == repo.path:
238 return None
238 return None
239
239
240 if util.safehasattr(repo, 'srcrepo') and repo.srcrepo:
240 if util.safehasattr(repo, 'srcrepo') and repo.srcrepo:
241 return repo.srcrepo
241 return repo.srcrepo
242
242
243 # the sharedpath always ends in the .hg; we want the path to the repo
243 # the sharedpath always ends in the .hg; we want the path to the repo
244 source = repo.vfs.split(repo.sharedpath)[0]
244 source = repo.vfs.split(repo.sharedpath)[0]
245 srcurl, branches = parseurl(source)
245 srcurl, branches = parseurl(source)
246 srcrepo = repository(repo.ui, srcurl)
246 srcrepo = repository(repo.ui, srcurl)
247 repo.srcrepo = srcrepo
247 repo.srcrepo = srcrepo
248 return srcrepo
248 return srcrepo
249
249
250 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
250 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
251 relative=False):
251 relative=False):
252 '''create a shared repository'''
252 '''create a shared repository'''
253
253
254 if not islocal(source):
254 if not islocal(source):
255 raise error.Abort(_('can only share local repositories'))
255 raise error.Abort(_('can only share local repositories'))
256
256
257 if not dest:
257 if not dest:
258 dest = defaultdest(source)
258 dest = defaultdest(source)
259 else:
259 else:
260 dest = ui.expandpath(dest)
260 dest = ui.expandpath(dest)
261
261
262 if isinstance(source, bytes):
262 if isinstance(source, bytes):
263 origsource = ui.expandpath(source)
263 origsource = ui.expandpath(source)
264 source, branches = parseurl(origsource)
264 source, branches = parseurl(origsource)
265 srcrepo = repository(ui, source)
265 srcrepo = repository(ui, source)
266 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
266 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
267 else:
267 else:
268 srcrepo = source.local()
268 srcrepo = source.local()
269 checkout = None
269 checkout = None
270
270
271 shareditems = set()
271 shareditems = set()
272 if bookmarks:
272 if bookmarks:
273 shareditems.add(sharedbookmarks)
273 shareditems.add(sharedbookmarks)
274
274
275 r = repository(ui, dest, create=True, createopts={
275 r = repository(ui, dest, create=True, createopts={
276 'sharedrepo': srcrepo,
276 'sharedrepo': srcrepo,
277 'sharedrelative': relative,
277 'sharedrelative': relative,
278 'shareditems': shareditems,
278 'shareditems': shareditems,
279 })
279 })
280
280
281 postshare(srcrepo, r, defaultpath=defaultpath)
281 postshare(srcrepo, r, defaultpath=defaultpath)
282 r = repository(ui, dest)
282 r = repository(ui, dest)
283 _postshareupdate(r, update, checkout=checkout)
283 _postshareupdate(r, update, checkout=checkout)
284 return r
284 return r
285
285
286 def unshare(ui, repo):
286 def unshare(ui, repo):
287 """convert a shared repository to a normal one
287 """convert a shared repository to a normal one
288
288
289 Copy the store data to the repo and remove the sharedpath data.
289 Copy the store data to the repo and remove the sharedpath data.
290
290
291 Returns a new repository object representing the unshared repository.
291 Returns a new repository object representing the unshared repository.
292
292
293 The passed repository object is not usable after this function is
293 The passed repository object is not usable after this function is
294 called.
294 called.
295 """
295 """
296
296
297 with repo.lock():
297 with repo.lock():
298 # we use locks here because if we race with commit, we
298 # we use locks here because if we race with commit, we
299 # can end up with extra data in the cloned revlogs that's
299 # can end up with extra data in the cloned revlogs that's
300 # not pointed to by changesets, thus causing verify to
300 # not pointed to by changesets, thus causing verify to
301 # fail
301 # fail
302 destlock = copystore(ui, repo, repo.path)
302 destlock = copystore(ui, repo, repo.path)
303 with destlock or util.nullcontextmanager():
303 with destlock or util.nullcontextmanager():
304
304
305 sharefile = repo.vfs.join('sharedpath')
305 sharefile = repo.vfs.join('sharedpath')
306 util.rename(sharefile, sharefile + '.old')
306 util.rename(sharefile, sharefile + '.old')
307
307
308 repo.requirements.discard('shared')
308 repo.requirements.discard('shared')
309 repo.requirements.discard('relshared')
309 repo.requirements.discard('relshared')
310 repo._writerequirements()
310 repo._writerequirements()
311
311
312 # Removing share changes some fundamental properties of the repo instance.
312 # Removing share changes some fundamental properties of the repo instance.
313 # So we instantiate a new repo object and operate on it rather than
313 # So we instantiate a new repo object and operate on it rather than
314 # try to keep the existing repo usable.
314 # try to keep the existing repo usable.
315 newrepo = repository(repo.baseui, repo.root, create=False)
315 newrepo = repository(repo.baseui, repo.root, create=False)
316
316
317 # TODO: figure out how to access subrepos that exist, but were previously
317 # TODO: figure out how to access subrepos that exist, but were previously
318 # removed from .hgsub
318 # removed from .hgsub
319 c = newrepo['.']
319 c = newrepo['.']
320 subs = c.substate
320 subs = c.substate
321 for s in sorted(subs):
321 for s in sorted(subs):
322 c.sub(s).unshare()
322 c.sub(s).unshare()
323
323
324 localrepo.poisonrepository(repo)
324 localrepo.poisonrepository(repo)
325
325
326 return newrepo
326 return newrepo
327
327
328 def postshare(sourcerepo, destrepo, defaultpath=None):
328 def postshare(sourcerepo, destrepo, defaultpath=None):
329 """Called after a new shared repo is created.
329 """Called after a new shared repo is created.
330
330
331 The new repo only has a requirements file and pointer to the source.
331 The new repo only has a requirements file and pointer to the source.
332 This function configures additional shared data.
332 This function configures additional shared data.
333
333
334 Extensions can wrap this function and write additional entries to
334 Extensions can wrap this function and write additional entries to
335 destrepo/.hg/shared to indicate additional pieces of data to be shared.
335 destrepo/.hg/shared to indicate additional pieces of data to be shared.
336 """
336 """
337 default = defaultpath or sourcerepo.ui.config('paths', 'default')
337 default = defaultpath or sourcerepo.ui.config('paths', 'default')
338 if default:
338 if default:
339 template = ('[paths]\n'
339 template = ('[paths]\n'
340 'default = %s\n')
340 'default = %s\n')
341 destrepo.vfs.write('hgrc', util.tonativeeol(template % default))
341 destrepo.vfs.write('hgrc', util.tonativeeol(template % default))
342 if repositorymod.NARROW_REQUIREMENT in sourcerepo.requirements:
342 if repositorymod.NARROW_REQUIREMENT in sourcerepo.requirements:
343 with destrepo.wlock():
343 with destrepo.wlock():
344 narrowspec.copytoworkingcopy(destrepo)
344 narrowspec.copytoworkingcopy(destrepo)
345
345
346 def _postshareupdate(repo, update, checkout=None):
346 def _postshareupdate(repo, update, checkout=None):
347 """Maybe perform a working directory update after a shared repo is created.
347 """Maybe perform a working directory update after a shared repo is created.
348
348
349 ``update`` can be a boolean or a revision to update to.
349 ``update`` can be a boolean or a revision to update to.
350 """
350 """
351 if not update:
351 if not update:
352 return
352 return
353
353
354 repo.ui.status(_("updating working directory\n"))
354 repo.ui.status(_("updating working directory\n"))
355 if update is not True:
355 if update is not True:
356 checkout = update
356 checkout = update
357 for test in (checkout, 'default', 'tip'):
357 for test in (checkout, 'default', 'tip'):
358 if test is None:
358 if test is None:
359 continue
359 continue
360 try:
360 try:
361 uprev = repo.lookup(test)
361 uprev = repo.lookup(test)
362 break
362 break
363 except error.RepoLookupError:
363 except error.RepoLookupError:
364 continue
364 continue
365 _update(repo, uprev)
365 _update(repo, uprev)
366
366
367 def copystore(ui, srcrepo, destpath):
367 def copystore(ui, srcrepo, destpath):
368 '''copy files from store of srcrepo in destpath
368 '''copy files from store of srcrepo in destpath
369
369
370 returns destlock
370 returns destlock
371 '''
371 '''
372 destlock = None
372 destlock = None
373 try:
373 try:
374 hardlink = None
374 hardlink = None
375 topic = _('linking') if hardlink else _('copying')
375 topic = _('linking') if hardlink else _('copying')
376 with ui.makeprogress(topic, unit=_('files')) as progress:
376 with ui.makeprogress(topic, unit=_('files')) as progress:
377 num = 0
377 num = 0
378 srcpublishing = srcrepo.publishing()
378 srcpublishing = srcrepo.publishing()
379 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
379 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
380 dstvfs = vfsmod.vfs(destpath)
380 dstvfs = vfsmod.vfs(destpath)
381 for f in srcrepo.store.copylist():
381 for f in srcrepo.store.copylist():
382 if srcpublishing and f.endswith('phaseroots'):
382 if srcpublishing and f.endswith('phaseroots'):
383 continue
383 continue
384 dstbase = os.path.dirname(f)
384 dstbase = os.path.dirname(f)
385 if dstbase and not dstvfs.exists(dstbase):
385 if dstbase and not dstvfs.exists(dstbase):
386 dstvfs.mkdir(dstbase)
386 dstvfs.mkdir(dstbase)
387 if srcvfs.exists(f):
387 if srcvfs.exists(f):
388 if f.endswith('data'):
388 if f.endswith('data'):
389 # 'dstbase' may be empty (e.g. revlog format 0)
389 # 'dstbase' may be empty (e.g. revlog format 0)
390 lockfile = os.path.join(dstbase, "lock")
390 lockfile = os.path.join(dstbase, "lock")
391 # lock to avoid premature writing to the target
391 # lock to avoid premature writing to the target
392 destlock = lock.lock(dstvfs, lockfile)
392 destlock = lock.lock(dstvfs, lockfile)
393 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
393 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
394 hardlink, progress)
394 hardlink, progress)
395 num += n
395 num += n
396 if hardlink:
396 if hardlink:
397 ui.debug("linked %d files\n" % num)
397 ui.debug("linked %d files\n" % num)
398 else:
398 else:
399 ui.debug("copied %d files\n" % num)
399 ui.debug("copied %d files\n" % num)
400 return destlock
400 return destlock
401 except: # re-raises
401 except: # re-raises
402 release(destlock)
402 release(destlock)
403 raise
403 raise
404
404
405 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
405 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
406 rev=None, update=True, stream=False):
406 rev=None, update=True, stream=False):
407 """Perform a clone using a shared repo.
407 """Perform a clone using a shared repo.
408
408
409 The store for the repository will be located at <sharepath>/.hg. The
409 The store for the repository will be located at <sharepath>/.hg. The
410 specified revisions will be cloned or pulled from "source". A shared repo
410 specified revisions will be cloned or pulled from "source". A shared repo
411 will be created at "dest" and a working copy will be created if "update" is
411 will be created at "dest" and a working copy will be created if "update" is
412 True.
412 True.
413 """
413 """
414 revs = None
414 revs = None
415 if rev:
415 if rev:
416 if not srcpeer.capable('lookup'):
416 if not srcpeer.capable('lookup'):
417 raise error.Abort(_("src repository does not support "
417 raise error.Abort(_("src repository does not support "
418 "revision lookup and so doesn't "
418 "revision lookup and so doesn't "
419 "support clone by revision"))
419 "support clone by revision"))
420
420
421 # TODO this is batchable.
421 # TODO this is batchable.
422 remoterevs = []
422 remoterevs = []
423 for r in rev:
423 for r in rev:
424 with srcpeer.commandexecutor() as e:
424 with srcpeer.commandexecutor() as e:
425 remoterevs.append(e.callcommand('lookup', {
425 remoterevs.append(e.callcommand('lookup', {
426 'key': r,
426 'key': r,
427 }).result())
427 }).result())
428 revs = remoterevs
428 revs = remoterevs
429
429
430 # Obtain a lock before checking for or cloning the pooled repo otherwise
430 # Obtain a lock before checking for or cloning the pooled repo otherwise
431 # 2 clients may race creating or populating it.
431 # 2 clients may race creating or populating it.
432 pooldir = os.path.dirname(sharepath)
432 pooldir = os.path.dirname(sharepath)
433 # lock class requires the directory to exist.
433 # lock class requires the directory to exist.
434 try:
434 try:
435 util.makedir(pooldir, False)
435 util.makedir(pooldir, False)
436 except OSError as e:
436 except OSError as e:
437 if e.errno != errno.EEXIST:
437 if e.errno != errno.EEXIST:
438 raise
438 raise
439
439
440 poolvfs = vfsmod.vfs(pooldir)
440 poolvfs = vfsmod.vfs(pooldir)
441 basename = os.path.basename(sharepath)
441 basename = os.path.basename(sharepath)
442
442
443 with lock.lock(poolvfs, '%s.lock' % basename):
443 with lock.lock(poolvfs, '%s.lock' % basename):
444 if os.path.exists(sharepath):
444 if os.path.exists(sharepath):
445 ui.status(_('(sharing from existing pooled repository %s)\n') %
445 ui.status(_('(sharing from existing pooled repository %s)\n') %
446 basename)
446 basename)
447 else:
447 else:
448 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
448 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
449 # Always use pull mode because hardlinks in share mode don't work
449 # Always use pull mode because hardlinks in share mode don't work
450 # well. Never update because working copies aren't necessary in
450 # well. Never update because working copies aren't necessary in
451 # share mode.
451 # share mode.
452 clone(ui, peeropts, source, dest=sharepath, pull=True,
452 clone(ui, peeropts, source, dest=sharepath, pull=True,
453 revs=rev, update=False, stream=stream)
453 revs=rev, update=False, stream=stream)
454
454
455 # Resolve the value to put in [paths] section for the source.
455 # Resolve the value to put in [paths] section for the source.
456 if islocal(source):
456 if islocal(source):
457 defaultpath = os.path.abspath(util.urllocalpath(source))
457 defaultpath = os.path.abspath(util.urllocalpath(source))
458 else:
458 else:
459 defaultpath = source
459 defaultpath = source
460
460
461 sharerepo = repository(ui, path=sharepath)
461 sharerepo = repository(ui, path=sharepath)
462 destrepo = share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
462 destrepo = share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
463 defaultpath=defaultpath)
463 defaultpath=defaultpath)
464
464
465 # We need to perform a pull against the dest repo to fetch bookmarks
465 # We need to perform a pull against the dest repo to fetch bookmarks
466 # and other non-store data that isn't shared by default. In the case of
466 # and other non-store data that isn't shared by default. In the case of
467 # non-existing shared repo, this means we pull from the remote twice. This
467 # non-existing shared repo, this means we pull from the remote twice. This
468 # is a bit weird. But at the time it was implemented, there wasn't an easy
468 # is a bit weird. But at the time it was implemented, there wasn't an easy
469 # way to pull just non-changegroup data.
469 # way to pull just non-changegroup data.
470 exchange.pull(destrepo, srcpeer, heads=revs)
470 exchange.pull(destrepo, srcpeer, heads=revs)
471
471
472 _postshareupdate(destrepo, update)
472 _postshareupdate(destrepo, update)
473
473
474 return srcpeer, peer(ui, peeropts, dest)
474 return srcpeer, peer(ui, peeropts, dest)
475
475
476 # Recomputing branch cache might be slow on big repos,
476 # Recomputing branch cache might be slow on big repos,
477 # so just copy it
477 # so just copy it
478 def _copycache(srcrepo, dstcachedir, fname):
478 def _copycache(srcrepo, dstcachedir, fname):
479 """copy a cache from srcrepo to destcachedir (if it exists)"""
479 """copy a cache from srcrepo to destcachedir (if it exists)"""
480 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
480 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
481 dstbranchcache = os.path.join(dstcachedir, fname)
481 dstbranchcache = os.path.join(dstcachedir, fname)
482 if os.path.exists(srcbranchcache):
482 if os.path.exists(srcbranchcache):
483 if not os.path.exists(dstcachedir):
483 if not os.path.exists(dstcachedir):
484 os.mkdir(dstcachedir)
484 os.mkdir(dstcachedir)
485 util.copyfile(srcbranchcache, dstbranchcache)
485 util.copyfile(srcbranchcache, dstbranchcache)
486
486
487 def clone(ui, peeropts, source, dest=None, pull=False, revs=None,
487 def clone(ui, peeropts, source, dest=None, pull=False, revs=None,
488 update=True, stream=False, branch=None, shareopts=None,
488 update=True, stream=False, branch=None, shareopts=None,
489 storeincludepats=None, storeexcludepats=None, depth=None):
489 storeincludepats=None, storeexcludepats=None, depth=None):
490 """Make a copy of an existing repository.
490 """Make a copy of an existing repository.
491
491
492 Create a copy of an existing repository in a new directory. The
492 Create a copy of an existing repository in a new directory. The
493 source and destination are URLs, as passed to the repository
493 source and destination are URLs, as passed to the repository
494 function. Returns a pair of repository peers, the source and
494 function. Returns a pair of repository peers, the source and
495 newly created destination.
495 newly created destination.
496
496
497 The location of the source is added to the new repository's
497 The location of the source is added to the new repository's
498 .hg/hgrc file, as the default to be used for future pulls and
498 .hg/hgrc file, as the default to be used for future pulls and
499 pushes.
499 pushes.
500
500
501 If an exception is raised, the partly cloned/updated destination
501 If an exception is raised, the partly cloned/updated destination
502 repository will be deleted.
502 repository will be deleted.
503
503
504 Arguments:
504 Arguments:
505
505
506 source: repository object or URL
506 source: repository object or URL
507
507
508 dest: URL of destination repository to create (defaults to base
508 dest: URL of destination repository to create (defaults to base
509 name of source repository)
509 name of source repository)
510
510
511 pull: always pull from source repository, even in local case or if the
511 pull: always pull from source repository, even in local case or if the
512 server prefers streaming
512 server prefers streaming
513
513
514 stream: stream raw data uncompressed from repository (fast over
514 stream: stream raw data uncompressed from repository (fast over
515 LAN, slow over WAN)
515 LAN, slow over WAN)
516
516
517 revs: revision to clone up to (implies pull=True)
517 revs: revision to clone up to (implies pull=True)
518
518
519 update: update working directory after clone completes, if
519 update: update working directory after clone completes, if
520 destination is local repository (True means update to default rev,
520 destination is local repository (True means update to default rev,
521 anything else is treated as a revision)
521 anything else is treated as a revision)
522
522
523 branch: branches to clone
523 branch: branches to clone
524
524
525 shareopts: dict of options to control auto sharing behavior. The "pool" key
525 shareopts: dict of options to control auto sharing behavior. The "pool" key
526 activates auto sharing mode and defines the directory for stores. The
526 activates auto sharing mode and defines the directory for stores. The
527 "mode" key determines how to construct the directory name of the shared
527 "mode" key determines how to construct the directory name of the shared
528 repository. "identity" means the name is derived from the node of the first
528 repository. "identity" means the name is derived from the node of the first
529 changeset in the repository. "remote" means the name is derived from the
529 changeset in the repository. "remote" means the name is derived from the
530 remote's path/URL. Defaults to "identity."
530 remote's path/URL. Defaults to "identity."
531
531
532 storeincludepats and storeexcludepats: sets of file patterns to include and
532 storeincludepats and storeexcludepats: sets of file patterns to include and
533 exclude in the repository copy, respectively. If not defined, all files
533 exclude in the repository copy, respectively. If not defined, all files
534 will be included (a "full" clone). Otherwise a "narrow" clone containing
534 will be included (a "full" clone). Otherwise a "narrow" clone containing
535 only the requested files will be performed. If ``storeincludepats`` is not
535 only the requested files will be performed. If ``storeincludepats`` is not
536 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
536 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
537 ``path:.``. If both are empty sets, no files will be cloned.
537 ``path:.``. If both are empty sets, no files will be cloned.
538 """
538 """
539
539
540 if isinstance(source, bytes):
540 if isinstance(source, bytes):
541 origsource = ui.expandpath(source)
541 origsource = ui.expandpath(source)
542 source, branches = parseurl(origsource, branch)
542 source, branches = parseurl(origsource, branch)
543 srcpeer = peer(ui, peeropts, source)
543 srcpeer = peer(ui, peeropts, source)
544 else:
544 else:
545 srcpeer = source.peer() # in case we were called with a localrepo
545 srcpeer = source.peer() # in case we were called with a localrepo
546 branches = (None, branch or [])
546 branches = (None, branch or [])
547 origsource = source = srcpeer.url()
547 origsource = source = srcpeer.url()
548 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
548 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
549
549
550 if dest is None:
550 if dest is None:
551 dest = defaultdest(source)
551 dest = defaultdest(source)
552 if dest:
552 if dest:
553 ui.status(_("destination directory: %s\n") % dest)
553 ui.status(_("destination directory: %s\n") % dest)
554 else:
554 else:
555 dest = ui.expandpath(dest)
555 dest = ui.expandpath(dest)
556
556
557 dest = util.urllocalpath(dest)
557 dest = util.urllocalpath(dest)
558 source = util.urllocalpath(source)
558 source = util.urllocalpath(source)
559
559
560 if not dest:
560 if not dest:
561 raise error.Abort(_("empty destination path is not valid"))
561 raise error.Abort(_("empty destination path is not valid"))
562
562
563 destvfs = vfsmod.vfs(dest, expandpath=True)
563 destvfs = vfsmod.vfs(dest, expandpath=True)
564 if destvfs.lexists():
564 if destvfs.lexists():
565 if not destvfs.isdir():
565 if not destvfs.isdir():
566 raise error.Abort(_("destination '%s' already exists") % dest)
566 raise error.Abort(_("destination '%s' already exists") % dest)
567 elif destvfs.listdir():
567 elif destvfs.listdir():
568 raise error.Abort(_("destination '%s' is not empty") % dest)
568 raise error.Abort(_("destination '%s' is not empty") % dest)
569
569
570 createopts = {}
570 createopts = {}
571 narrow = False
571 narrow = False
572
572
573 if storeincludepats is not None:
573 if storeincludepats is not None:
574 narrowspec.validatepatterns(storeincludepats)
574 narrowspec.validatepatterns(storeincludepats)
575 narrow = True
575 narrow = True
576
576
577 if storeexcludepats is not None:
577 if storeexcludepats is not None:
578 narrowspec.validatepatterns(storeexcludepats)
578 narrowspec.validatepatterns(storeexcludepats)
579 narrow = True
579 narrow = True
580
580
581 if narrow:
581 if narrow:
582 # Include everything by default if only exclusion patterns defined.
582 # Include everything by default if only exclusion patterns defined.
583 if storeexcludepats and not storeincludepats:
583 if storeexcludepats and not storeincludepats:
584 storeincludepats = {'path:.'}
584 storeincludepats = {'path:.'}
585
585
586 createopts['narrowfiles'] = True
586 createopts['narrowfiles'] = True
587
587
588 if depth:
588 if depth:
589 createopts['shallowfilestore'] = True
589 createopts['shallowfilestore'] = True
590
590
591 if srcpeer.capable(b'lfs-serve'):
591 if srcpeer.capable(b'lfs-serve'):
592 # Repository creation honors the config if it disabled the extension, so
592 # Repository creation honors the config if it disabled the extension, so
593 # we can't just announce that lfs will be enabled. This check avoids
593 # we can't just announce that lfs will be enabled. This check avoids
594 # saying that lfs will be enabled, and then saying it's an unknown
594 # saying that lfs will be enabled, and then saying it's an unknown
595 # feature. The lfs creation option is set in either case so that a
595 # feature. The lfs creation option is set in either case so that a
596 # requirement is added. If the extension is explicitly disabled but the
596 # requirement is added. If the extension is explicitly disabled but the
597 # requirement is set, the clone aborts early, before transferring any
597 # requirement is set, the clone aborts early, before transferring any
598 # data.
598 # data.
599 createopts['lfs'] = True
599 createopts['lfs'] = True
600
600
601 if extensions.disabledext('lfs'):
601 if extensions.disabledext('lfs'):
602 ui.status(_('(remote is using large file support (lfs), but it is '
602 ui.status(_('(remote is using large file support (lfs), but it is '
603 'explicitly disabled in the local configuration)\n'))
603 'explicitly disabled in the local configuration)\n'))
604 else:
604 else:
605 ui.status(_('(remote is using large file support (lfs); lfs will '
605 ui.status(_('(remote is using large file support (lfs); lfs will '
606 'be enabled for this repository)\n'))
606 'be enabled for this repository)\n'))
607
607
608 shareopts = shareopts or {}
608 shareopts = shareopts or {}
609 sharepool = shareopts.get('pool')
609 sharepool = shareopts.get('pool')
610 sharenamemode = shareopts.get('mode')
610 sharenamemode = shareopts.get('mode')
611 if sharepool and islocal(dest):
611 if sharepool and islocal(dest):
612 sharepath = None
612 sharepath = None
613 if sharenamemode == 'identity':
613 if sharenamemode == 'identity':
614 # Resolve the name from the initial changeset in the remote
614 # Resolve the name from the initial changeset in the remote
615 # repository. This returns nullid when the remote is empty. It
615 # repository. This returns nullid when the remote is empty. It
616 # raises RepoLookupError if revision 0 is filtered or otherwise
616 # raises RepoLookupError if revision 0 is filtered or otherwise
617 # not available. If we fail to resolve, sharing is not enabled.
617 # not available. If we fail to resolve, sharing is not enabled.
618 try:
618 try:
619 with srcpeer.commandexecutor() as e:
619 with srcpeer.commandexecutor() as e:
620 rootnode = e.callcommand('lookup', {
620 rootnode = e.callcommand('lookup', {
621 'key': '0',
621 'key': '0',
622 }).result()
622 }).result()
623
623
624 if rootnode != node.nullid:
624 if rootnode != node.nullid:
625 sharepath = os.path.join(sharepool, node.hex(rootnode))
625 sharepath = os.path.join(sharepool, node.hex(rootnode))
626 else:
626 else:
627 ui.status(_('(not using pooled storage: '
627 ui.status(_('(not using pooled storage: '
628 'remote appears to be empty)\n'))
628 'remote appears to be empty)\n'))
629 except error.RepoLookupError:
629 except error.RepoLookupError:
630 ui.status(_('(not using pooled storage: '
630 ui.status(_('(not using pooled storage: '
631 'unable to resolve identity of remote)\n'))
631 'unable to resolve identity of remote)\n'))
632 elif sharenamemode == 'remote':
632 elif sharenamemode == 'remote':
633 sharepath = os.path.join(
633 sharepath = os.path.join(
634 sharepool, node.hex(hashlib.sha1(source).digest()))
634 sharepool, node.hex(hashlib.sha1(source).digest()))
635 else:
635 else:
636 raise error.Abort(_('unknown share naming mode: %s') %
636 raise error.Abort(_('unknown share naming mode: %s') %
637 sharenamemode)
637 sharenamemode)
638
638
639 # TODO this is a somewhat arbitrary restriction.
639 # TODO this is a somewhat arbitrary restriction.
640 if narrow:
640 if narrow:
641 ui.status(_('(pooled storage not supported for narrow clones)\n'))
641 ui.status(_('(pooled storage not supported for narrow clones)\n'))
642 sharepath = None
642 sharepath = None
643
643
644 if sharepath:
644 if sharepath:
645 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
645 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
646 dest, pull=pull, rev=revs, update=update,
646 dest, pull=pull, rev=revs, update=update,
647 stream=stream)
647 stream=stream)
648
648
649 srclock = destlock = cleandir = None
649 srclock = destlock = cleandir = None
650 srcrepo = srcpeer.local()
650 srcrepo = srcpeer.local()
651 try:
651 try:
652 abspath = origsource
652 abspath = origsource
653 if islocal(origsource):
653 if islocal(origsource):
654 abspath = os.path.abspath(util.urllocalpath(origsource))
654 abspath = os.path.abspath(util.urllocalpath(origsource))
655
655
656 if islocal(dest):
656 if islocal(dest):
657 cleandir = dest
657 cleandir = dest
658
658
659 copy = False
659 copy = False
660 if (srcrepo and srcrepo.cancopy() and islocal(dest)
660 if (srcrepo and srcrepo.cancopy() and islocal(dest)
661 and not phases.hassecret(srcrepo)):
661 and not phases.hassecret(srcrepo)):
662 copy = not pull and not revs
662 copy = not pull and not revs
663
663
664 # TODO this is a somewhat arbitrary restriction.
664 # TODO this is a somewhat arbitrary restriction.
665 if narrow:
665 if narrow:
666 copy = False
666 copy = False
667
667
668 if copy:
668 if copy:
669 try:
669 try:
670 # we use a lock here because if we race with commit, we
670 # we use a lock here because if we race with commit, we
671 # can end up with extra data in the cloned revlogs that's
671 # can end up with extra data in the cloned revlogs that's
672 # not pointed to by changesets, thus causing verify to
672 # not pointed to by changesets, thus causing verify to
673 # fail
673 # fail
674 srclock = srcrepo.lock(wait=False)
674 srclock = srcrepo.lock(wait=False)
675 except error.LockError:
675 except error.LockError:
676 copy = False
676 copy = False
677
677
678 if copy:
678 if copy:
679 srcrepo.hook('preoutgoing', throw=True, source='clone')
679 srcrepo.hook('preoutgoing', throw=True, source='clone')
680 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
680 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
681 if not os.path.exists(dest):
681 if not os.path.exists(dest):
682 util.makedirs(dest)
682 util.makedirs(dest)
683 else:
683 else:
684 # only clean up directories we create ourselves
684 # only clean up directories we create ourselves
685 cleandir = hgdir
685 cleandir = hgdir
686 try:
686 try:
687 destpath = hgdir
687 destpath = hgdir
688 util.makedir(destpath, notindexed=True)
688 util.makedir(destpath, notindexed=True)
689 except OSError as inst:
689 except OSError as inst:
690 if inst.errno == errno.EEXIST:
690 if inst.errno == errno.EEXIST:
691 cleandir = None
691 cleandir = None
692 raise error.Abort(_("destination '%s' already exists")
692 raise error.Abort(_("destination '%s' already exists")
693 % dest)
693 % dest)
694 raise
694 raise
695
695
696 destlock = copystore(ui, srcrepo, destpath)
696 destlock = copystore(ui, srcrepo, destpath)
697 # copy bookmarks over
697 # copy bookmarks over
698 srcbookmarks = srcrepo.vfs.join('bookmarks')
698 srcbookmarks = srcrepo.vfs.join('bookmarks')
699 dstbookmarks = os.path.join(destpath, 'bookmarks')
699 dstbookmarks = os.path.join(destpath, 'bookmarks')
700 if os.path.exists(srcbookmarks):
700 if os.path.exists(srcbookmarks):
701 util.copyfile(srcbookmarks, dstbookmarks)
701 util.copyfile(srcbookmarks, dstbookmarks)
702
702
703 dstcachedir = os.path.join(destpath, 'cache')
703 dstcachedir = os.path.join(destpath, 'cache')
704 for cache in cacheutil.cachetocopy(srcrepo):
704 for cache in cacheutil.cachetocopy(srcrepo):
705 _copycache(srcrepo, dstcachedir, cache)
705 _copycache(srcrepo, dstcachedir, cache)
706
706
707 # we need to re-init the repo after manually copying the data
707 # we need to re-init the repo after manually copying the data
708 # into it
708 # into it
709 destpeer = peer(srcrepo, peeropts, dest)
709 destpeer = peer(srcrepo, peeropts, dest)
710 srcrepo.hook('outgoing', source='clone',
710 srcrepo.hook('outgoing', source='clone',
711 node=node.hex(node.nullid))
711 node=node.hex(node.nullid))
712 else:
712 else:
713 try:
713 try:
714 # only pass ui when no srcrepo
714 # only pass ui when no srcrepo
715 destpeer = peer(srcrepo or ui, peeropts, dest, create=True,
715 destpeer = peer(srcrepo or ui, peeropts, dest, create=True,
716 createopts=createopts)
716 createopts=createopts)
717 except OSError as inst:
717 except OSError as inst:
718 if inst.errno == errno.EEXIST:
718 if inst.errno == errno.EEXIST:
719 cleandir = None
719 cleandir = None
720 raise error.Abort(_("destination '%s' already exists")
720 raise error.Abort(_("destination '%s' already exists")
721 % dest)
721 % dest)
722 raise
722 raise
723
723
724 if revs:
724 if revs:
725 if not srcpeer.capable('lookup'):
725 if not srcpeer.capable('lookup'):
726 raise error.Abort(_("src repository does not support "
726 raise error.Abort(_("src repository does not support "
727 "revision lookup and so doesn't "
727 "revision lookup and so doesn't "
728 "support clone by revision"))
728 "support clone by revision"))
729
729
730 # TODO this is batchable.
730 # TODO this is batchable.
731 remoterevs = []
731 remoterevs = []
732 for rev in revs:
732 for rev in revs:
733 with srcpeer.commandexecutor() as e:
733 with srcpeer.commandexecutor() as e:
734 remoterevs.append(e.callcommand('lookup', {
734 remoterevs.append(e.callcommand('lookup', {
735 'key': rev,
735 'key': rev,
736 }).result())
736 }).result())
737 revs = remoterevs
737 revs = remoterevs
738
738
739 checkout = revs[0]
739 checkout = revs[0]
740 else:
740 else:
741 revs = None
741 revs = None
742 local = destpeer.local()
742 local = destpeer.local()
743 if local:
743 if local:
744 if narrow:
744 if narrow:
745 with local.wlock(), local.lock():
745 with local.wlock(), local.lock():
746 local.setnarrowpats(storeincludepats, storeexcludepats)
746 local.setnarrowpats(storeincludepats, storeexcludepats)
747 narrowspec.copytoworkingcopy(local)
747 narrowspec.copytoworkingcopy(local)
748
748
749 u = util.url(abspath)
749 u = util.url(abspath)
750 defaulturl = bytes(u)
750 defaulturl = bytes(u)
751 local.ui.setconfig('paths', 'default', defaulturl, 'clone')
751 local.ui.setconfig('paths', 'default', defaulturl, 'clone')
752 if not stream:
752 if not stream:
753 if pull:
753 if pull:
754 stream = False
754 stream = False
755 else:
755 else:
756 stream = None
756 stream = None
757 # internal config: ui.quietbookmarkmove
757 # internal config: ui.quietbookmarkmove
758 overrides = {('ui', 'quietbookmarkmove'): True}
758 overrides = {('ui', 'quietbookmarkmove'): True}
759 with local.ui.configoverride(overrides, 'clone'):
759 with local.ui.configoverride(overrides, 'clone'):
760 exchange.pull(local, srcpeer, revs,
760 exchange.pull(local, srcpeer, revs,
761 streamclonerequested=stream,
761 streamclonerequested=stream,
762 includepats=storeincludepats,
762 includepats=storeincludepats,
763 excludepats=storeexcludepats,
763 excludepats=storeexcludepats,
764 depth=depth)
764 depth=depth)
765 elif srcrepo:
765 elif srcrepo:
766 # TODO lift restriction once exchange.push() accepts narrow
766 # TODO lift restriction once exchange.push() accepts narrow
767 # push.
767 # push.
768 if narrow:
768 if narrow:
769 raise error.Abort(_('narrow clone not available for '
769 raise error.Abort(_('narrow clone not available for '
770 'remote destinations'))
770 'remote destinations'))
771
771
772 exchange.push(srcrepo, destpeer, revs=revs,
772 exchange.push(srcrepo, destpeer, revs=revs,
773 bookmarks=srcrepo._bookmarks.keys())
773 bookmarks=srcrepo._bookmarks.keys())
774 else:
774 else:
775 raise error.Abort(_("clone from remote to remote not supported")
775 raise error.Abort(_("clone from remote to remote not supported")
776 )
776 )
777
777
778 cleandir = None
778 cleandir = None
779
779
780 destrepo = destpeer.local()
780 destrepo = destpeer.local()
781 if destrepo:
781 if destrepo:
782 template = uimod.samplehgrcs['cloned']
782 template = uimod.samplehgrcs['cloned']
783 u = util.url(abspath)
783 u = util.url(abspath)
784 u.passwd = None
784 u.passwd = None
785 defaulturl = bytes(u)
785 defaulturl = bytes(u)
786 destrepo.vfs.write('hgrc', util.tonativeeol(template % defaulturl))
786 destrepo.vfs.write('hgrc', util.tonativeeol(template % defaulturl))
787 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
787 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
788
788
789 if ui.configbool('experimental', 'remotenames'):
789 if ui.configbool('experimental', 'remotenames'):
790 logexchange.pullremotenames(destrepo, srcpeer)
790 logexchange.pullremotenames(destrepo, srcpeer)
791
791
792 if update:
792 if update:
793 if update is not True:
793 if update is not True:
794 with srcpeer.commandexecutor() as e:
794 with srcpeer.commandexecutor() as e:
795 checkout = e.callcommand('lookup', {
795 checkout = e.callcommand('lookup', {
796 'key': update,
796 'key': update,
797 }).result()
797 }).result()
798
798
799 uprev = None
799 uprev = None
800 status = None
800 status = None
801 if checkout is not None:
801 if checkout is not None:
802 # Some extensions (at least hg-git and hg-subversion) have
802 # Some extensions (at least hg-git and hg-subversion) have
803 # a peer.lookup() implementation that returns a name instead
803 # a peer.lookup() implementation that returns a name instead
804 # of a nodeid. We work around it here until we've figured
804 # of a nodeid. We work around it here until we've figured
805 # out a better solution.
805 # out a better solution.
806 if len(checkout) == 20 and checkout in destrepo:
806 if len(checkout) == 20 and checkout in destrepo:
807 uprev = checkout
807 uprev = checkout
808 elif scmutil.isrevsymbol(destrepo, checkout):
808 elif scmutil.isrevsymbol(destrepo, checkout):
809 uprev = scmutil.revsymbol(destrepo, checkout).node()
809 uprev = scmutil.revsymbol(destrepo, checkout).node()
810 else:
810 else:
811 if update is not True:
811 if update is not True:
812 try:
812 try:
813 uprev = destrepo.lookup(update)
813 uprev = destrepo.lookup(update)
814 except error.RepoLookupError:
814 except error.RepoLookupError:
815 pass
815 pass
816 if uprev is None:
816 if uprev is None:
817 try:
817 try:
818 uprev = destrepo._bookmarks['@']
818 uprev = destrepo._bookmarks['@']
819 update = '@'
819 update = '@'
820 bn = destrepo[uprev].branch()
820 bn = destrepo[uprev].branch()
821 if bn == 'default':
821 if bn == 'default':
822 status = _("updating to bookmark @\n")
822 status = _("updating to bookmark @\n")
823 else:
823 else:
824 status = (_("updating to bookmark @ on branch %s\n")
824 status = (_("updating to bookmark @ on branch %s\n")
825 % bn)
825 % bn)
826 except KeyError:
826 except KeyError:
827 try:
827 try:
828 uprev = destrepo.branchtip('default')
828 uprev = destrepo.branchtip('default')
829 except error.RepoLookupError:
829 except error.RepoLookupError:
830 uprev = destrepo.lookup('tip')
830 uprev = destrepo.lookup('tip')
831 if not status:
831 if not status:
832 bn = destrepo[uprev].branch()
832 bn = destrepo[uprev].branch()
833 status = _("updating to branch %s\n") % bn
833 status = _("updating to branch %s\n") % bn
834 destrepo.ui.status(status)
834 destrepo.ui.status(status)
835 _update(destrepo, uprev)
835 _update(destrepo, uprev)
836 if update in destrepo._bookmarks:
836 if update in destrepo._bookmarks:
837 bookmarks.activate(destrepo, update)
837 bookmarks.activate(destrepo, update)
838 finally:
838 finally:
839 release(srclock, destlock)
839 release(srclock, destlock)
840 if cleandir is not None:
840 if cleandir is not None:
841 shutil.rmtree(cleandir, True)
841 shutil.rmtree(cleandir, True)
842 if srcpeer is not None:
842 if srcpeer is not None:
843 srcpeer.close()
843 srcpeer.close()
844 return srcpeer, destpeer
844 return srcpeer, destpeer
845
845
846 def _showstats(repo, stats, quietempty=False):
846 def _showstats(repo, stats, quietempty=False):
847 if quietempty and stats.isempty():
847 if quietempty and stats.isempty():
848 return
848 return
849 repo.ui.status(_("%d files updated, %d files merged, "
849 repo.ui.status(_("%d files updated, %d files merged, "
850 "%d files removed, %d files unresolved\n") % (
850 "%d files removed, %d files unresolved\n") % (
851 stats.updatedcount, stats.mergedcount,
851 stats.updatedcount, stats.mergedcount,
852 stats.removedcount, stats.unresolvedcount))
852 stats.removedcount, stats.unresolvedcount))
853
853
854 def updaterepo(repo, node, overwrite, updatecheck=None):
854 def updaterepo(repo, node, overwrite, updatecheck=None):
855 """Update the working directory to node.
855 """Update the working directory to node.
856
856
857 When overwrite is set, changes are clobbered, merged else
857 When overwrite is set, changes are clobbered, merged else
858
858
859 returns stats (see pydoc mercurial.merge.applyupdates)"""
859 returns stats (see pydoc mercurial.merge.applyupdates)"""
860 return mergemod.update(repo, node, branchmerge=False, force=overwrite,
860 return mergemod.update(repo, node, branchmerge=False, force=overwrite,
861 labels=['working copy', 'destination'],
861 labels=['working copy', 'destination'],
862 updatecheck=updatecheck)
862 updatecheck=updatecheck)
863
863
864 def update(repo, node, quietempty=False, updatecheck=None):
864 def update(repo, node, quietempty=False, updatecheck=None):
865 """update the working directory to node"""
865 """update the working directory to node"""
866 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
866 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
867 _showstats(repo, stats, quietempty)
867 _showstats(repo, stats, quietempty)
868 if stats.unresolvedcount:
868 if stats.unresolvedcount:
869 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
869 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
870 return stats.unresolvedcount > 0
870 return stats.unresolvedcount > 0
871
871
872 # naming conflict in clone()
872 # naming conflict in clone()
873 _update = update
873 _update = update
874
874
875 def clean(repo, node, show_stats=True, quietempty=False):
875 def clean(repo, node, show_stats=True, quietempty=False):
876 """forcibly switch the working directory to node, clobbering changes"""
876 """forcibly switch the working directory to node, clobbering changes"""
877 stats = updaterepo(repo, node, True)
877 stats = updaterepo(repo, node, True)
878 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
878 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
879 if show_stats:
879 if show_stats:
880 _showstats(repo, stats, quietempty)
880 _showstats(repo, stats, quietempty)
881 return stats.unresolvedcount > 0
881 return stats.unresolvedcount > 0
882
882
883 # naming conflict in updatetotally()
883 # naming conflict in updatetotally()
884 _clean = clean
884 _clean = clean
885
885
886 _VALID_UPDATECHECKS = {mergemod.UPDATECHECK_ABORT,
887 mergemod.UPDATECHECK_NONE,
888 mergemod.UPDATECHECK_LINEAR,
889 mergemod.UPDATECHECK_NO_CONFLICT,
890 }
891
886 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
892 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
887 """Update the working directory with extra care for non-file components
893 """Update the working directory with extra care for non-file components
888
894
889 This takes care of non-file components below:
895 This takes care of non-file components below:
890
896
891 :bookmark: might be advanced or (in)activated
897 :bookmark: might be advanced or (in)activated
892
898
893 This takes arguments below:
899 This takes arguments below:
894
900
895 :checkout: to which revision the working directory is updated
901 :checkout: to which revision the working directory is updated
896 :brev: a name, which might be a bookmark to be activated after updating
902 :brev: a name, which might be a bookmark to be activated after updating
897 :clean: whether changes in the working directory can be discarded
903 :clean: whether changes in the working directory can be discarded
898 :updatecheck: how to deal with a dirty working directory
904 :updatecheck: how to deal with a dirty working directory
899
905
900 Valid values for updatecheck are the UPDATECHECK_* constants
906 Valid values for updatecheck are the UPDATECHECK_* constants
901 defined in the merge module. Passing `None` will result in using the
907 defined in the merge module. Passing `None` will result in using the
902 configured default.
908 configured default.
903
909
904 * ABORT: abort if the working directory is dirty
910 * ABORT: abort if the working directory is dirty
905 * NONE: don't check (merge working directory changes into destination)
911 * NONE: don't check (merge working directory changes into destination)
906 * LINEAR: check that update is linear before merging working directory
912 * LINEAR: check that update is linear before merging working directory
907 changes into destination
913 changes into destination
908 * NO_CONFLICT: check that the update does not result in file merges
914 * NO_CONFLICT: check that the update does not result in file merges
909
915
910 This returns whether conflict is detected at updating or not.
916 This returns whether conflict is detected at updating or not.
911 """
917 """
912 if updatecheck is None:
918 if updatecheck is None:
913 updatecheck = ui.config('commands', 'update.check')
919 updatecheck = ui.config('commands', 'update.check')
914 if updatecheck not in (mergemod.UPDATECHECK_ABORT,
920 if updatecheck not in _VALID_UPDATECHECKS:
915 mergemod.UPDATECHECK_NONE,
916 mergemod.UPDATECHECK_LINEAR,
917 mergemod.UPDATECHECK_NO_CONFLICT):
918 # If not configured, or invalid value configured
921 # If not configured, or invalid value configured
919 updatecheck = mergemod.UPDATECHECK_LINEAR
922 updatecheck = mergemod.UPDATECHECK_LINEAR
923 if updatecheck not in _VALID_UPDATECHECKS:
924 raise ValueError(r'Invalid updatecheck value %r (can accept %r)' % (
925 updatecheck, _VALID_UPDATECHECKS))
920 with repo.wlock():
926 with repo.wlock():
921 movemarkfrom = None
927 movemarkfrom = None
922 warndest = False
928 warndest = False
923 if checkout is None:
929 if checkout is None:
924 updata = destutil.destupdate(repo, clean=clean)
930 updata = destutil.destupdate(repo, clean=clean)
925 checkout, movemarkfrom, brev = updata
931 checkout, movemarkfrom, brev = updata
926 warndest = True
932 warndest = True
927
933
928 if clean:
934 if clean:
929 ret = _clean(repo, checkout)
935 ret = _clean(repo, checkout)
930 else:
936 else:
931 if updatecheck == mergemod.UPDATECHECK_ABORT:
937 if updatecheck == mergemod.UPDATECHECK_ABORT:
932 cmdutil.bailifchanged(repo, merge=False)
938 cmdutil.bailifchanged(repo, merge=False)
933 updatecheck = mergemod.UPDATECHECK_NONE
939 updatecheck = mergemod.UPDATECHECK_NONE
934 ret = _update(repo, checkout, updatecheck=updatecheck)
940 ret = _update(repo, checkout, updatecheck=updatecheck)
935
941
936 if not ret and movemarkfrom:
942 if not ret and movemarkfrom:
937 if movemarkfrom == repo['.'].node():
943 if movemarkfrom == repo['.'].node():
938 pass # no-op update
944 pass # no-op update
939 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
945 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
940 b = ui.label(repo._activebookmark, 'bookmarks.active')
946 b = ui.label(repo._activebookmark, 'bookmarks.active')
941 ui.status(_("updating bookmark %s\n") % b)
947 ui.status(_("updating bookmark %s\n") % b)
942 else:
948 else:
943 # this can happen with a non-linear update
949 # this can happen with a non-linear update
944 b = ui.label(repo._activebookmark, 'bookmarks')
950 b = ui.label(repo._activebookmark, 'bookmarks')
945 ui.status(_("(leaving bookmark %s)\n") % b)
951 ui.status(_("(leaving bookmark %s)\n") % b)
946 bookmarks.deactivate(repo)
952 bookmarks.deactivate(repo)
947 elif brev in repo._bookmarks:
953 elif brev in repo._bookmarks:
948 if brev != repo._activebookmark:
954 if brev != repo._activebookmark:
949 b = ui.label(brev, 'bookmarks.active')
955 b = ui.label(brev, 'bookmarks.active')
950 ui.status(_("(activating bookmark %s)\n") % b)
956 ui.status(_("(activating bookmark %s)\n") % b)
951 bookmarks.activate(repo, brev)
957 bookmarks.activate(repo, brev)
952 elif brev:
958 elif brev:
953 if repo._activebookmark:
959 if repo._activebookmark:
954 b = ui.label(repo._activebookmark, 'bookmarks')
960 b = ui.label(repo._activebookmark, 'bookmarks')
955 ui.status(_("(leaving bookmark %s)\n") % b)
961 ui.status(_("(leaving bookmark %s)\n") % b)
956 bookmarks.deactivate(repo)
962 bookmarks.deactivate(repo)
957
963
958 if warndest:
964 if warndest:
959 destutil.statusotherdests(ui, repo)
965 destutil.statusotherdests(ui, repo)
960
966
961 return ret
967 return ret
962
968
963 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None,
969 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None,
964 abort=False):
970 abort=False):
965 """Branch merge with node, resolving changes. Return true if any
971 """Branch merge with node, resolving changes. Return true if any
966 unresolved conflicts."""
972 unresolved conflicts."""
967 if abort:
973 if abort:
968 return abortmerge(repo.ui, repo)
974 return abortmerge(repo.ui, repo)
969
975
970 stats = mergemod.update(repo, node, branchmerge=True, force=force,
976 stats = mergemod.update(repo, node, branchmerge=True, force=force,
971 mergeforce=mergeforce, labels=labels)
977 mergeforce=mergeforce, labels=labels)
972 _showstats(repo, stats)
978 _showstats(repo, stats)
973 if stats.unresolvedcount:
979 if stats.unresolvedcount:
974 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
980 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
975 "or 'hg merge --abort' to abandon\n"))
981 "or 'hg merge --abort' to abandon\n"))
976 elif remind:
982 elif remind:
977 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
983 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
978 return stats.unresolvedcount > 0
984 return stats.unresolvedcount > 0
979
985
980 def abortmerge(ui, repo):
986 def abortmerge(ui, repo):
981 ms = mergemod.mergestate.read(repo)
987 ms = mergemod.mergestate.read(repo)
982 if ms.active():
988 if ms.active():
983 # there were conflicts
989 # there were conflicts
984 node = ms.localctx.hex()
990 node = ms.localctx.hex()
985 else:
991 else:
986 # there were no conficts, mergestate was not stored
992 # there were no conficts, mergestate was not stored
987 node = repo['.'].hex()
993 node = repo['.'].hex()
988
994
989 repo.ui.status(_("aborting the merge, updating back to"
995 repo.ui.status(_("aborting the merge, updating back to"
990 " %s\n") % node[:12])
996 " %s\n") % node[:12])
991 stats = mergemod.update(repo, node, branchmerge=False, force=True)
997 stats = mergemod.update(repo, node, branchmerge=False, force=True)
992 _showstats(repo, stats)
998 _showstats(repo, stats)
993 return stats.unresolvedcount > 0
999 return stats.unresolvedcount > 0
994
1000
995 def _incoming(displaychlist, subreporecurse, ui, repo, source,
1001 def _incoming(displaychlist, subreporecurse, ui, repo, source,
996 opts, buffered=False):
1002 opts, buffered=False):
997 """
1003 """
998 Helper for incoming / gincoming.
1004 Helper for incoming / gincoming.
999 displaychlist gets called with
1005 displaychlist gets called with
1000 (remoterepo, incomingchangesetlist, displayer) parameters,
1006 (remoterepo, incomingchangesetlist, displayer) parameters,
1001 and is supposed to contain only code that can't be unified.
1007 and is supposed to contain only code that can't be unified.
1002 """
1008 """
1003 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
1009 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
1004 other = peer(repo, opts, source)
1010 other = peer(repo, opts, source)
1005 ui.status(_('comparing with %s\n') % util.hidepassword(source))
1011 ui.status(_('comparing with %s\n') % util.hidepassword(source))
1006 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
1012 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
1007
1013
1008 if revs:
1014 if revs:
1009 revs = [other.lookup(rev) for rev in revs]
1015 revs = [other.lookup(rev) for rev in revs]
1010 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
1016 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
1011 revs, opts["bundle"], opts["force"])
1017 revs, opts["bundle"], opts["force"])
1012 try:
1018 try:
1013 if not chlist:
1019 if not chlist:
1014 ui.status(_("no changes found\n"))
1020 ui.status(_("no changes found\n"))
1015 return subreporecurse()
1021 return subreporecurse()
1016 ui.pager('incoming')
1022 ui.pager('incoming')
1017 displayer = logcmdutil.changesetdisplayer(ui, other, opts,
1023 displayer = logcmdutil.changesetdisplayer(ui, other, opts,
1018 buffered=buffered)
1024 buffered=buffered)
1019 displaychlist(other, chlist, displayer)
1025 displaychlist(other, chlist, displayer)
1020 displayer.close()
1026 displayer.close()
1021 finally:
1027 finally:
1022 cleanupfn()
1028 cleanupfn()
1023 subreporecurse()
1029 subreporecurse()
1024 return 0 # exit code is zero since we found incoming changes
1030 return 0 # exit code is zero since we found incoming changes
1025
1031
1026 def incoming(ui, repo, source, opts):
1032 def incoming(ui, repo, source, opts):
1027 def subreporecurse():
1033 def subreporecurse():
1028 ret = 1
1034 ret = 1
1029 if opts.get('subrepos'):
1035 if opts.get('subrepos'):
1030 ctx = repo[None]
1036 ctx = repo[None]
1031 for subpath in sorted(ctx.substate):
1037 for subpath in sorted(ctx.substate):
1032 sub = ctx.sub(subpath)
1038 sub = ctx.sub(subpath)
1033 ret = min(ret, sub.incoming(ui, source, opts))
1039 ret = min(ret, sub.incoming(ui, source, opts))
1034 return ret
1040 return ret
1035
1041
1036 def display(other, chlist, displayer):
1042 def display(other, chlist, displayer):
1037 limit = logcmdutil.getlimit(opts)
1043 limit = logcmdutil.getlimit(opts)
1038 if opts.get('newest_first'):
1044 if opts.get('newest_first'):
1039 chlist.reverse()
1045 chlist.reverse()
1040 count = 0
1046 count = 0
1041 for n in chlist:
1047 for n in chlist:
1042 if limit is not None and count >= limit:
1048 if limit is not None and count >= limit:
1043 break
1049 break
1044 parents = [p for p in other.changelog.parents(n) if p != nullid]
1050 parents = [p for p in other.changelog.parents(n) if p != nullid]
1045 if opts.get('no_merges') and len(parents) == 2:
1051 if opts.get('no_merges') and len(parents) == 2:
1046 continue
1052 continue
1047 count += 1
1053 count += 1
1048 displayer.show(other[n])
1054 displayer.show(other[n])
1049 return _incoming(display, subreporecurse, ui, repo, source, opts)
1055 return _incoming(display, subreporecurse, ui, repo, source, opts)
1050
1056
1051 def _outgoing(ui, repo, dest, opts):
1057 def _outgoing(ui, repo, dest, opts):
1052 path = ui.paths.getpath(dest, default=('default-push', 'default'))
1058 path = ui.paths.getpath(dest, default=('default-push', 'default'))
1053 if not path:
1059 if not path:
1054 raise error.Abort(_('default repository not configured!'),
1060 raise error.Abort(_('default repository not configured!'),
1055 hint=_("see 'hg help config.paths'"))
1061 hint=_("see 'hg help config.paths'"))
1056 dest = path.pushloc or path.loc
1062 dest = path.pushloc or path.loc
1057 branches = path.branch, opts.get('branch') or []
1063 branches = path.branch, opts.get('branch') or []
1058
1064
1059 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
1065 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
1060 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
1066 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
1061 if revs:
1067 if revs:
1062 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1068 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1063
1069
1064 other = peer(repo, opts, dest)
1070 other = peer(repo, opts, dest)
1065 outgoing = discovery.findcommonoutgoing(repo, other, revs,
1071 outgoing = discovery.findcommonoutgoing(repo, other, revs,
1066 force=opts.get('force'))
1072 force=opts.get('force'))
1067 o = outgoing.missing
1073 o = outgoing.missing
1068 if not o:
1074 if not o:
1069 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1075 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1070 return o, other
1076 return o, other
1071
1077
1072 def outgoing(ui, repo, dest, opts):
1078 def outgoing(ui, repo, dest, opts):
1073 def recurse():
1079 def recurse():
1074 ret = 1
1080 ret = 1
1075 if opts.get('subrepos'):
1081 if opts.get('subrepos'):
1076 ctx = repo[None]
1082 ctx = repo[None]
1077 for subpath in sorted(ctx.substate):
1083 for subpath in sorted(ctx.substate):
1078 sub = ctx.sub(subpath)
1084 sub = ctx.sub(subpath)
1079 ret = min(ret, sub.outgoing(ui, dest, opts))
1085 ret = min(ret, sub.outgoing(ui, dest, opts))
1080 return ret
1086 return ret
1081
1087
1082 limit = logcmdutil.getlimit(opts)
1088 limit = logcmdutil.getlimit(opts)
1083 o, other = _outgoing(ui, repo, dest, opts)
1089 o, other = _outgoing(ui, repo, dest, opts)
1084 if not o:
1090 if not o:
1085 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1091 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1086 return recurse()
1092 return recurse()
1087
1093
1088 if opts.get('newest_first'):
1094 if opts.get('newest_first'):
1089 o.reverse()
1095 o.reverse()
1090 ui.pager('outgoing')
1096 ui.pager('outgoing')
1091 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1097 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1092 count = 0
1098 count = 0
1093 for n in o:
1099 for n in o:
1094 if limit is not None and count >= limit:
1100 if limit is not None and count >= limit:
1095 break
1101 break
1096 parents = [p for p in repo.changelog.parents(n) if p != nullid]
1102 parents = [p for p in repo.changelog.parents(n) if p != nullid]
1097 if opts.get('no_merges') and len(parents) == 2:
1103 if opts.get('no_merges') and len(parents) == 2:
1098 continue
1104 continue
1099 count += 1
1105 count += 1
1100 displayer.show(repo[n])
1106 displayer.show(repo[n])
1101 displayer.close()
1107 displayer.close()
1102 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1108 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1103 recurse()
1109 recurse()
1104 return 0 # exit code is zero since we found outgoing changes
1110 return 0 # exit code is zero since we found outgoing changes
1105
1111
1106 def verify(repo, level=None):
1112 def verify(repo, level=None):
1107 """verify the consistency of a repository"""
1113 """verify the consistency of a repository"""
1108 ret = verifymod.verify(repo, level=level)
1114 ret = verifymod.verify(repo, level=level)
1109
1115
1110 # Broken subrepo references in hidden csets don't seem worth worrying about,
1116 # Broken subrepo references in hidden csets don't seem worth worrying about,
1111 # since they can't be pushed/pulled, and --hidden can be used if they are a
1117 # since they can't be pushed/pulled, and --hidden can be used if they are a
1112 # concern.
1118 # concern.
1113
1119
1114 # pathto() is needed for -R case
1120 # pathto() is needed for -R case
1115 revs = repo.revs("filelog(%s)",
1121 revs = repo.revs("filelog(%s)",
1116 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
1122 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
1117
1123
1118 if revs:
1124 if revs:
1119 repo.ui.status(_('checking subrepo links\n'))
1125 repo.ui.status(_('checking subrepo links\n'))
1120 for rev in revs:
1126 for rev in revs:
1121 ctx = repo[rev]
1127 ctx = repo[rev]
1122 try:
1128 try:
1123 for subpath in ctx.substate:
1129 for subpath in ctx.substate:
1124 try:
1130 try:
1125 ret = (ctx.sub(subpath, allowcreate=False).verify()
1131 ret = (ctx.sub(subpath, allowcreate=False).verify()
1126 or ret)
1132 or ret)
1127 except error.RepoError as e:
1133 except error.RepoError as e:
1128 repo.ui.warn(('%d: %s\n') % (rev, e))
1134 repo.ui.warn(('%d: %s\n') % (rev, e))
1129 except Exception:
1135 except Exception:
1130 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
1136 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
1131 node.short(ctx.node()))
1137 node.short(ctx.node()))
1132
1138
1133 return ret
1139 return ret
1134
1140
1135 def remoteui(src, opts):
1141 def remoteui(src, opts):
1136 'build a remote ui from ui or repo and opts'
1142 'build a remote ui from ui or repo and opts'
1137 if util.safehasattr(src, 'baseui'): # looks like a repository
1143 if util.safehasattr(src, 'baseui'): # looks like a repository
1138 dst = src.baseui.copy() # drop repo-specific config
1144 dst = src.baseui.copy() # drop repo-specific config
1139 src = src.ui # copy target options from repo
1145 src = src.ui # copy target options from repo
1140 else: # assume it's a global ui object
1146 else: # assume it's a global ui object
1141 dst = src.copy() # keep all global options
1147 dst = src.copy() # keep all global options
1142
1148
1143 # copy ssh-specific options
1149 # copy ssh-specific options
1144 for o in 'ssh', 'remotecmd':
1150 for o in 'ssh', 'remotecmd':
1145 v = opts.get(o) or src.config('ui', o)
1151 v = opts.get(o) or src.config('ui', o)
1146 if v:
1152 if v:
1147 dst.setconfig("ui", o, v, 'copied')
1153 dst.setconfig("ui", o, v, 'copied')
1148
1154
1149 # copy bundle-specific options
1155 # copy bundle-specific options
1150 r = src.config('bundle', 'mainreporoot')
1156 r = src.config('bundle', 'mainreporoot')
1151 if r:
1157 if r:
1152 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
1158 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
1153
1159
1154 # copy selected local settings to the remote ui
1160 # copy selected local settings to the remote ui
1155 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
1161 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
1156 for key, val in src.configitems(sect):
1162 for key, val in src.configitems(sect):
1157 dst.setconfig(sect, key, val, 'copied')
1163 dst.setconfig(sect, key, val, 'copied')
1158 v = src.config('web', 'cacerts')
1164 v = src.config('web', 'cacerts')
1159 if v:
1165 if v:
1160 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
1166 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
1161
1167
1162 return dst
1168 return dst
1163
1169
1164 # Files of interest
1170 # Files of interest
1165 # Used to check if the repository has changed looking at mtime and size of
1171 # Used to check if the repository has changed looking at mtime and size of
1166 # these files.
1172 # these files.
1167 foi = [('spath', '00changelog.i'),
1173 foi = [('spath', '00changelog.i'),
1168 ('spath', 'phaseroots'), # ! phase can change content at the same size
1174 ('spath', 'phaseroots'), # ! phase can change content at the same size
1169 ('spath', 'obsstore'),
1175 ('spath', 'obsstore'),
1170 ('path', 'bookmarks'), # ! bookmark can change content at the same size
1176 ('path', 'bookmarks'), # ! bookmark can change content at the same size
1171 ]
1177 ]
1172
1178
1173 class cachedlocalrepo(object):
1179 class cachedlocalrepo(object):
1174 """Holds a localrepository that can be cached and reused."""
1180 """Holds a localrepository that can be cached and reused."""
1175
1181
1176 def __init__(self, repo):
1182 def __init__(self, repo):
1177 """Create a new cached repo from an existing repo.
1183 """Create a new cached repo from an existing repo.
1178
1184
1179 We assume the passed in repo was recently created. If the
1185 We assume the passed in repo was recently created. If the
1180 repo has changed between when it was created and when it was
1186 repo has changed between when it was created and when it was
1181 turned into a cache, it may not refresh properly.
1187 turned into a cache, it may not refresh properly.
1182 """
1188 """
1183 assert isinstance(repo, localrepo.localrepository)
1189 assert isinstance(repo, localrepo.localrepository)
1184 self._repo = repo
1190 self._repo = repo
1185 self._state, self.mtime = self._repostate()
1191 self._state, self.mtime = self._repostate()
1186 self._filtername = repo.filtername
1192 self._filtername = repo.filtername
1187
1193
1188 def fetch(self):
1194 def fetch(self):
1189 """Refresh (if necessary) and return a repository.
1195 """Refresh (if necessary) and return a repository.
1190
1196
1191 If the cached instance is out of date, it will be recreated
1197 If the cached instance is out of date, it will be recreated
1192 automatically and returned.
1198 automatically and returned.
1193
1199
1194 Returns a tuple of the repo and a boolean indicating whether a new
1200 Returns a tuple of the repo and a boolean indicating whether a new
1195 repo instance was created.
1201 repo instance was created.
1196 """
1202 """
1197 # We compare the mtimes and sizes of some well-known files to
1203 # We compare the mtimes and sizes of some well-known files to
1198 # determine if the repo changed. This is not precise, as mtimes
1204 # determine if the repo changed. This is not precise, as mtimes
1199 # are susceptible to clock skew and imprecise filesystems and
1205 # are susceptible to clock skew and imprecise filesystems and
1200 # file content can change while maintaining the same size.
1206 # file content can change while maintaining the same size.
1201
1207
1202 state, mtime = self._repostate()
1208 state, mtime = self._repostate()
1203 if state == self._state:
1209 if state == self._state:
1204 return self._repo, False
1210 return self._repo, False
1205
1211
1206 repo = repository(self._repo.baseui, self._repo.url())
1212 repo = repository(self._repo.baseui, self._repo.url())
1207 if self._filtername:
1213 if self._filtername:
1208 self._repo = repo.filtered(self._filtername)
1214 self._repo = repo.filtered(self._filtername)
1209 else:
1215 else:
1210 self._repo = repo.unfiltered()
1216 self._repo = repo.unfiltered()
1211 self._state = state
1217 self._state = state
1212 self.mtime = mtime
1218 self.mtime = mtime
1213
1219
1214 return self._repo, True
1220 return self._repo, True
1215
1221
1216 def _repostate(self):
1222 def _repostate(self):
1217 state = []
1223 state = []
1218 maxmtime = -1
1224 maxmtime = -1
1219 for attr, fname in foi:
1225 for attr, fname in foi:
1220 prefix = getattr(self._repo, attr)
1226 prefix = getattr(self._repo, attr)
1221 p = os.path.join(prefix, fname)
1227 p = os.path.join(prefix, fname)
1222 try:
1228 try:
1223 st = os.stat(p)
1229 st = os.stat(p)
1224 except OSError:
1230 except OSError:
1225 st = os.stat(prefix)
1231 st = os.stat(prefix)
1226 state.append((st[stat.ST_MTIME], st.st_size))
1232 state.append((st[stat.ST_MTIME], st.st_size))
1227 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1233 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1228
1234
1229 return tuple(state), maxmtime
1235 return tuple(state), maxmtime
1230
1236
1231 def copy(self):
1237 def copy(self):
1232 """Obtain a copy of this class instance.
1238 """Obtain a copy of this class instance.
1233
1239
1234 A new localrepository instance is obtained. The new instance should be
1240 A new localrepository instance is obtained. The new instance should be
1235 completely independent of the original.
1241 completely independent of the original.
1236 """
1242 """
1237 repo = repository(self._repo.baseui, self._repo.origroot)
1243 repo = repository(self._repo.baseui, self._repo.origroot)
1238 if self._filtername:
1244 if self._filtername:
1239 repo = repo.filtered(self._filtername)
1245 repo = repo.filtered(self._filtername)
1240 else:
1246 else:
1241 repo = repo.unfiltered()
1247 repo = repo.unfiltered()
1242 c = cachedlocalrepo(repo)
1248 c = cachedlocalrepo(repo)
1243 c._state = self._state
1249 c._state = self._state
1244 c.mtime = self.mtime
1250 c.mtime = self.mtime
1245 return c
1251 return c
General Comments 0
You need to be logged in to leave comments. Login now