##// END OF EJS Templates
hg: make cachedlocalrepo cache appropriate repoview object...
FUJIWARA Katsunori -
r28119:91a827e7 default
parent child Browse files
Show More
@@ -1,918 +1,927
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import os
12 import os
13 import shutil
13 import shutil
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import nullid
16 from .node import nullid
17
17
18 from . import (
18 from . import (
19 bookmarks,
19 bookmarks,
20 bundlerepo,
20 bundlerepo,
21 cmdutil,
21 cmdutil,
22 discovery,
22 discovery,
23 error,
23 error,
24 exchange,
24 exchange,
25 extensions,
25 extensions,
26 httppeer,
26 httppeer,
27 localrepo,
27 localrepo,
28 lock,
28 lock,
29 merge as mergemod,
29 merge as mergemod,
30 node,
30 node,
31 phases,
31 phases,
32 repoview,
32 repoview,
33 scmutil,
33 scmutil,
34 sshpeer,
34 sshpeer,
35 statichttprepo,
35 statichttprepo,
36 ui as uimod,
36 ui as uimod,
37 unionrepo,
37 unionrepo,
38 url,
38 url,
39 util,
39 util,
40 verify as verifymod,
40 verify as verifymod,
41 )
41 )
42
42
43 release = lock.release
43 release = lock.release
44
44
45 def _local(path):
45 def _local(path):
46 path = util.expandpath(util.urllocalpath(path))
46 path = util.expandpath(util.urllocalpath(path))
47 return (os.path.isfile(path) and bundlerepo or localrepo)
47 return (os.path.isfile(path) and bundlerepo or localrepo)
48
48
49 def addbranchrevs(lrepo, other, branches, revs):
49 def addbranchrevs(lrepo, other, branches, revs):
50 peer = other.peer() # a courtesy to callers using a localrepo for other
50 peer = other.peer() # a courtesy to callers using a localrepo for other
51 hashbranch, branches = branches
51 hashbranch, branches = branches
52 if not hashbranch and not branches:
52 if not hashbranch and not branches:
53 x = revs or None
53 x = revs or None
54 if util.safehasattr(revs, 'first'):
54 if util.safehasattr(revs, 'first'):
55 y = revs.first()
55 y = revs.first()
56 elif revs:
56 elif revs:
57 y = revs[0]
57 y = revs[0]
58 else:
58 else:
59 y = None
59 y = None
60 return x, y
60 return x, y
61 if revs:
61 if revs:
62 revs = list(revs)
62 revs = list(revs)
63 else:
63 else:
64 revs = []
64 revs = []
65
65
66 if not peer.capable('branchmap'):
66 if not peer.capable('branchmap'):
67 if branches:
67 if branches:
68 raise error.Abort(_("remote branch lookup not supported"))
68 raise error.Abort(_("remote branch lookup not supported"))
69 revs.append(hashbranch)
69 revs.append(hashbranch)
70 return revs, revs[0]
70 return revs, revs[0]
71 branchmap = peer.branchmap()
71 branchmap = peer.branchmap()
72
72
73 def primary(branch):
73 def primary(branch):
74 if branch == '.':
74 if branch == '.':
75 if not lrepo:
75 if not lrepo:
76 raise error.Abort(_("dirstate branch not accessible"))
76 raise error.Abort(_("dirstate branch not accessible"))
77 branch = lrepo.dirstate.branch()
77 branch = lrepo.dirstate.branch()
78 if branch in branchmap:
78 if branch in branchmap:
79 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
79 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
80 return True
80 return True
81 else:
81 else:
82 return False
82 return False
83
83
84 for branch in branches:
84 for branch in branches:
85 if not primary(branch):
85 if not primary(branch):
86 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
86 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
87 if hashbranch:
87 if hashbranch:
88 if not primary(hashbranch):
88 if not primary(hashbranch):
89 revs.append(hashbranch)
89 revs.append(hashbranch)
90 return revs, revs[0]
90 return revs, revs[0]
91
91
92 def parseurl(path, branches=None):
92 def parseurl(path, branches=None):
93 '''parse url#branch, returning (url, (branch, branches))'''
93 '''parse url#branch, returning (url, (branch, branches))'''
94
94
95 u = util.url(path)
95 u = util.url(path)
96 branch = None
96 branch = None
97 if u.fragment:
97 if u.fragment:
98 branch = u.fragment
98 branch = u.fragment
99 u.fragment = None
99 u.fragment = None
100 return str(u), (branch, branches or [])
100 return str(u), (branch, branches or [])
101
101
102 schemes = {
102 schemes = {
103 'bundle': bundlerepo,
103 'bundle': bundlerepo,
104 'union': unionrepo,
104 'union': unionrepo,
105 'file': _local,
105 'file': _local,
106 'http': httppeer,
106 'http': httppeer,
107 'https': httppeer,
107 'https': httppeer,
108 'ssh': sshpeer,
108 'ssh': sshpeer,
109 'static-http': statichttprepo,
109 'static-http': statichttprepo,
110 }
110 }
111
111
112 def _peerlookup(path):
112 def _peerlookup(path):
113 u = util.url(path)
113 u = util.url(path)
114 scheme = u.scheme or 'file'
114 scheme = u.scheme or 'file'
115 thing = schemes.get(scheme) or schemes['file']
115 thing = schemes.get(scheme) or schemes['file']
116 try:
116 try:
117 return thing(path)
117 return thing(path)
118 except TypeError:
118 except TypeError:
119 # we can't test callable(thing) because 'thing' can be an unloaded
119 # we can't test callable(thing) because 'thing' can be an unloaded
120 # module that implements __call__
120 # module that implements __call__
121 if not util.safehasattr(thing, 'instance'):
121 if not util.safehasattr(thing, 'instance'):
122 raise
122 raise
123 return thing
123 return thing
124
124
125 def islocal(repo):
125 def islocal(repo):
126 '''return true if repo (or path pointing to repo) is local'''
126 '''return true if repo (or path pointing to repo) is local'''
127 if isinstance(repo, str):
127 if isinstance(repo, str):
128 try:
128 try:
129 return _peerlookup(repo).islocal(repo)
129 return _peerlookup(repo).islocal(repo)
130 except AttributeError:
130 except AttributeError:
131 return False
131 return False
132 return repo.local()
132 return repo.local()
133
133
134 def openpath(ui, path):
134 def openpath(ui, path):
135 '''open path with open if local, url.open if remote'''
135 '''open path with open if local, url.open if remote'''
136 pathurl = util.url(path, parsequery=False, parsefragment=False)
136 pathurl = util.url(path, parsequery=False, parsefragment=False)
137 if pathurl.islocal():
137 if pathurl.islocal():
138 return util.posixfile(pathurl.localpath(), 'rb')
138 return util.posixfile(pathurl.localpath(), 'rb')
139 else:
139 else:
140 return url.open(ui, path)
140 return url.open(ui, path)
141
141
142 # a list of (ui, repo) functions called for wire peer initialization
142 # a list of (ui, repo) functions called for wire peer initialization
143 wirepeersetupfuncs = []
143 wirepeersetupfuncs = []
144
144
145 def _peerorrepo(ui, path, create=False):
145 def _peerorrepo(ui, path, create=False):
146 """return a repository object for the specified path"""
146 """return a repository object for the specified path"""
147 obj = _peerlookup(path).instance(ui, path, create)
147 obj = _peerlookup(path).instance(ui, path, create)
148 ui = getattr(obj, "ui", ui)
148 ui = getattr(obj, "ui", ui)
149 for name, module in extensions.extensions(ui):
149 for name, module in extensions.extensions(ui):
150 hook = getattr(module, 'reposetup', None)
150 hook = getattr(module, 'reposetup', None)
151 if hook:
151 if hook:
152 hook(ui, obj)
152 hook(ui, obj)
153 if not obj.local():
153 if not obj.local():
154 for f in wirepeersetupfuncs:
154 for f in wirepeersetupfuncs:
155 f(ui, obj)
155 f(ui, obj)
156 return obj
156 return obj
157
157
158 def repository(ui, path='', create=False):
158 def repository(ui, path='', create=False):
159 """return a repository object for the specified path"""
159 """return a repository object for the specified path"""
160 peer = _peerorrepo(ui, path, create)
160 peer = _peerorrepo(ui, path, create)
161 repo = peer.local()
161 repo = peer.local()
162 if not repo:
162 if not repo:
163 raise error.Abort(_("repository '%s' is not local") %
163 raise error.Abort(_("repository '%s' is not local") %
164 (path or peer.url()))
164 (path or peer.url()))
165 return repo.filtered('visible')
165 return repo.filtered('visible')
166
166
167 def peer(uiorrepo, opts, path, create=False):
167 def peer(uiorrepo, opts, path, create=False):
168 '''return a repository peer for the specified path'''
168 '''return a repository peer for the specified path'''
169 rui = remoteui(uiorrepo, opts)
169 rui = remoteui(uiorrepo, opts)
170 return _peerorrepo(rui, path, create).peer()
170 return _peerorrepo(rui, path, create).peer()
171
171
172 def defaultdest(source):
172 def defaultdest(source):
173 '''return default destination of clone if none is given
173 '''return default destination of clone if none is given
174
174
175 >>> defaultdest('foo')
175 >>> defaultdest('foo')
176 'foo'
176 'foo'
177 >>> defaultdest('/foo/bar')
177 >>> defaultdest('/foo/bar')
178 'bar'
178 'bar'
179 >>> defaultdest('/')
179 >>> defaultdest('/')
180 ''
180 ''
181 >>> defaultdest('')
181 >>> defaultdest('')
182 ''
182 ''
183 >>> defaultdest('http://example.org/')
183 >>> defaultdest('http://example.org/')
184 ''
184 ''
185 >>> defaultdest('http://example.org/foo/')
185 >>> defaultdest('http://example.org/foo/')
186 'foo'
186 'foo'
187 '''
187 '''
188 path = util.url(source).path
188 path = util.url(source).path
189 if not path:
189 if not path:
190 return ''
190 return ''
191 return os.path.basename(os.path.normpath(path))
191 return os.path.basename(os.path.normpath(path))
192
192
193 def share(ui, source, dest=None, update=True, bookmarks=True):
193 def share(ui, source, dest=None, update=True, bookmarks=True):
194 '''create a shared repository'''
194 '''create a shared repository'''
195
195
196 if not islocal(source):
196 if not islocal(source):
197 raise error.Abort(_('can only share local repositories'))
197 raise error.Abort(_('can only share local repositories'))
198
198
199 if not dest:
199 if not dest:
200 dest = defaultdest(source)
200 dest = defaultdest(source)
201 else:
201 else:
202 dest = ui.expandpath(dest)
202 dest = ui.expandpath(dest)
203
203
204 if isinstance(source, str):
204 if isinstance(source, str):
205 origsource = ui.expandpath(source)
205 origsource = ui.expandpath(source)
206 source, branches = parseurl(origsource)
206 source, branches = parseurl(origsource)
207 srcrepo = repository(ui, source)
207 srcrepo = repository(ui, source)
208 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
208 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
209 else:
209 else:
210 srcrepo = source.local()
210 srcrepo = source.local()
211 origsource = source = srcrepo.url()
211 origsource = source = srcrepo.url()
212 checkout = None
212 checkout = None
213
213
214 sharedpath = srcrepo.sharedpath # if our source is already sharing
214 sharedpath = srcrepo.sharedpath # if our source is already sharing
215
215
216 destwvfs = scmutil.vfs(dest, realpath=True)
216 destwvfs = scmutil.vfs(dest, realpath=True)
217 destvfs = scmutil.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
217 destvfs = scmutil.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
218
218
219 if destvfs.lexists():
219 if destvfs.lexists():
220 raise error.Abort(_('destination already exists'))
220 raise error.Abort(_('destination already exists'))
221
221
222 if not destwvfs.isdir():
222 if not destwvfs.isdir():
223 destwvfs.mkdir()
223 destwvfs.mkdir()
224 destvfs.makedir()
224 destvfs.makedir()
225
225
226 requirements = ''
226 requirements = ''
227 try:
227 try:
228 requirements = srcrepo.vfs.read('requires')
228 requirements = srcrepo.vfs.read('requires')
229 except IOError as inst:
229 except IOError as inst:
230 if inst.errno != errno.ENOENT:
230 if inst.errno != errno.ENOENT:
231 raise
231 raise
232
232
233 requirements += 'shared\n'
233 requirements += 'shared\n'
234 destvfs.write('requires', requirements)
234 destvfs.write('requires', requirements)
235 destvfs.write('sharedpath', sharedpath)
235 destvfs.write('sharedpath', sharedpath)
236
236
237 r = repository(ui, destwvfs.base)
237 r = repository(ui, destwvfs.base)
238 postshare(srcrepo, r, bookmarks=bookmarks)
238 postshare(srcrepo, r, bookmarks=bookmarks)
239
239
240 if update:
240 if update:
241 r.ui.status(_("updating working directory\n"))
241 r.ui.status(_("updating working directory\n"))
242 if update is not True:
242 if update is not True:
243 checkout = update
243 checkout = update
244 for test in (checkout, 'default', 'tip'):
244 for test in (checkout, 'default', 'tip'):
245 if test is None:
245 if test is None:
246 continue
246 continue
247 try:
247 try:
248 uprev = r.lookup(test)
248 uprev = r.lookup(test)
249 break
249 break
250 except error.RepoLookupError:
250 except error.RepoLookupError:
251 continue
251 continue
252 _update(r, uprev)
252 _update(r, uprev)
253
253
254 def postshare(sourcerepo, destrepo, bookmarks=True):
254 def postshare(sourcerepo, destrepo, bookmarks=True):
255 """Called after a new shared repo is created.
255 """Called after a new shared repo is created.
256
256
257 The new repo only has a requirements file and pointer to the source.
257 The new repo only has a requirements file and pointer to the source.
258 This function configures additional shared data.
258 This function configures additional shared data.
259
259
260 Extensions can wrap this function and write additional entries to
260 Extensions can wrap this function and write additional entries to
261 destrepo/.hg/shared to indicate additional pieces of data to be shared.
261 destrepo/.hg/shared to indicate additional pieces of data to be shared.
262 """
262 """
263 default = sourcerepo.ui.config('paths', 'default')
263 default = sourcerepo.ui.config('paths', 'default')
264 if default:
264 if default:
265 fp = destrepo.vfs("hgrc", "w", text=True)
265 fp = destrepo.vfs("hgrc", "w", text=True)
266 fp.write("[paths]\n")
266 fp.write("[paths]\n")
267 fp.write("default = %s\n" % default)
267 fp.write("default = %s\n" % default)
268 fp.close()
268 fp.close()
269
269
270 if bookmarks:
270 if bookmarks:
271 fp = destrepo.vfs('shared', 'w')
271 fp = destrepo.vfs('shared', 'w')
272 fp.write('bookmarks\n')
272 fp.write('bookmarks\n')
273 fp.close()
273 fp.close()
274
274
275 def copystore(ui, srcrepo, destpath):
275 def copystore(ui, srcrepo, destpath):
276 '''copy files from store of srcrepo in destpath
276 '''copy files from store of srcrepo in destpath
277
277
278 returns destlock
278 returns destlock
279 '''
279 '''
280 destlock = None
280 destlock = None
281 try:
281 try:
282 hardlink = None
282 hardlink = None
283 num = 0
283 num = 0
284 closetopic = [None]
284 closetopic = [None]
285 def prog(topic, pos):
285 def prog(topic, pos):
286 if pos is None:
286 if pos is None:
287 closetopic[0] = topic
287 closetopic[0] = topic
288 else:
288 else:
289 ui.progress(topic, pos + num)
289 ui.progress(topic, pos + num)
290 srcpublishing = srcrepo.publishing()
290 srcpublishing = srcrepo.publishing()
291 srcvfs = scmutil.vfs(srcrepo.sharedpath)
291 srcvfs = scmutil.vfs(srcrepo.sharedpath)
292 dstvfs = scmutil.vfs(destpath)
292 dstvfs = scmutil.vfs(destpath)
293 for f in srcrepo.store.copylist():
293 for f in srcrepo.store.copylist():
294 if srcpublishing and f.endswith('phaseroots'):
294 if srcpublishing and f.endswith('phaseroots'):
295 continue
295 continue
296 dstbase = os.path.dirname(f)
296 dstbase = os.path.dirname(f)
297 if dstbase and not dstvfs.exists(dstbase):
297 if dstbase and not dstvfs.exists(dstbase):
298 dstvfs.mkdir(dstbase)
298 dstvfs.mkdir(dstbase)
299 if srcvfs.exists(f):
299 if srcvfs.exists(f):
300 if f.endswith('data'):
300 if f.endswith('data'):
301 # 'dstbase' may be empty (e.g. revlog format 0)
301 # 'dstbase' may be empty (e.g. revlog format 0)
302 lockfile = os.path.join(dstbase, "lock")
302 lockfile = os.path.join(dstbase, "lock")
303 # lock to avoid premature writing to the target
303 # lock to avoid premature writing to the target
304 destlock = lock.lock(dstvfs, lockfile)
304 destlock = lock.lock(dstvfs, lockfile)
305 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
305 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
306 hardlink, progress=prog)
306 hardlink, progress=prog)
307 num += n
307 num += n
308 if hardlink:
308 if hardlink:
309 ui.debug("linked %d files\n" % num)
309 ui.debug("linked %d files\n" % num)
310 if closetopic[0]:
310 if closetopic[0]:
311 ui.progress(closetopic[0], None)
311 ui.progress(closetopic[0], None)
312 else:
312 else:
313 ui.debug("copied %d files\n" % num)
313 ui.debug("copied %d files\n" % num)
314 if closetopic[0]:
314 if closetopic[0]:
315 ui.progress(closetopic[0], None)
315 ui.progress(closetopic[0], None)
316 return destlock
316 return destlock
317 except: # re-raises
317 except: # re-raises
318 release(destlock)
318 release(destlock)
319 raise
319 raise
320
320
321 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
321 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
322 rev=None, update=True, stream=False):
322 rev=None, update=True, stream=False):
323 """Perform a clone using a shared repo.
323 """Perform a clone using a shared repo.
324
324
325 The store for the repository will be located at <sharepath>/.hg. The
325 The store for the repository will be located at <sharepath>/.hg. The
326 specified revisions will be cloned or pulled from "source". A shared repo
326 specified revisions will be cloned or pulled from "source". A shared repo
327 will be created at "dest" and a working copy will be created if "update" is
327 will be created at "dest" and a working copy will be created if "update" is
328 True.
328 True.
329 """
329 """
330 revs = None
330 revs = None
331 if rev:
331 if rev:
332 if not srcpeer.capable('lookup'):
332 if not srcpeer.capable('lookup'):
333 raise error.Abort(_("src repository does not support "
333 raise error.Abort(_("src repository does not support "
334 "revision lookup and so doesn't "
334 "revision lookup and so doesn't "
335 "support clone by revision"))
335 "support clone by revision"))
336 revs = [srcpeer.lookup(r) for r in rev]
336 revs = [srcpeer.lookup(r) for r in rev]
337
337
338 basename = os.path.basename(sharepath)
338 basename = os.path.basename(sharepath)
339
339
340 if os.path.exists(sharepath):
340 if os.path.exists(sharepath):
341 ui.status(_('(sharing from existing pooled repository %s)\n') %
341 ui.status(_('(sharing from existing pooled repository %s)\n') %
342 basename)
342 basename)
343 else:
343 else:
344 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
344 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
345 # Always use pull mode because hardlinks in share mode don't work well.
345 # Always use pull mode because hardlinks in share mode don't work well.
346 # Never update because working copies aren't necessary in share mode.
346 # Never update because working copies aren't necessary in share mode.
347 clone(ui, peeropts, source, dest=sharepath, pull=True,
347 clone(ui, peeropts, source, dest=sharepath, pull=True,
348 rev=rev, update=False, stream=stream)
348 rev=rev, update=False, stream=stream)
349
349
350 sharerepo = repository(ui, path=sharepath)
350 sharerepo = repository(ui, path=sharepath)
351 share(ui, sharerepo, dest=dest, update=update, bookmarks=False)
351 share(ui, sharerepo, dest=dest, update=update, bookmarks=False)
352
352
353 # We need to perform a pull against the dest repo to fetch bookmarks
353 # We need to perform a pull against the dest repo to fetch bookmarks
354 # and other non-store data that isn't shared by default. In the case of
354 # and other non-store data that isn't shared by default. In the case of
355 # non-existing shared repo, this means we pull from the remote twice. This
355 # non-existing shared repo, this means we pull from the remote twice. This
356 # is a bit weird. But at the time it was implemented, there wasn't an easy
356 # is a bit weird. But at the time it was implemented, there wasn't an easy
357 # way to pull just non-changegroup data.
357 # way to pull just non-changegroup data.
358 destrepo = repository(ui, path=dest)
358 destrepo = repository(ui, path=dest)
359 exchange.pull(destrepo, srcpeer, heads=revs)
359 exchange.pull(destrepo, srcpeer, heads=revs)
360
360
361 return srcpeer, peer(ui, peeropts, dest)
361 return srcpeer, peer(ui, peeropts, dest)
362
362
363 def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
363 def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
364 update=True, stream=False, branch=None, shareopts=None):
364 update=True, stream=False, branch=None, shareopts=None):
365 """Make a copy of an existing repository.
365 """Make a copy of an existing repository.
366
366
367 Create a copy of an existing repository in a new directory. The
367 Create a copy of an existing repository in a new directory. The
368 source and destination are URLs, as passed to the repository
368 source and destination are URLs, as passed to the repository
369 function. Returns a pair of repository peers, the source and
369 function. Returns a pair of repository peers, the source and
370 newly created destination.
370 newly created destination.
371
371
372 The location of the source is added to the new repository's
372 The location of the source is added to the new repository's
373 .hg/hgrc file, as the default to be used for future pulls and
373 .hg/hgrc file, as the default to be used for future pulls and
374 pushes.
374 pushes.
375
375
376 If an exception is raised, the partly cloned/updated destination
376 If an exception is raised, the partly cloned/updated destination
377 repository will be deleted.
377 repository will be deleted.
378
378
379 Arguments:
379 Arguments:
380
380
381 source: repository object or URL
381 source: repository object or URL
382
382
383 dest: URL of destination repository to create (defaults to base
383 dest: URL of destination repository to create (defaults to base
384 name of source repository)
384 name of source repository)
385
385
386 pull: always pull from source repository, even in local case or if the
386 pull: always pull from source repository, even in local case or if the
387 server prefers streaming
387 server prefers streaming
388
388
389 stream: stream raw data uncompressed from repository (fast over
389 stream: stream raw data uncompressed from repository (fast over
390 LAN, slow over WAN)
390 LAN, slow over WAN)
391
391
392 rev: revision to clone up to (implies pull=True)
392 rev: revision to clone up to (implies pull=True)
393
393
394 update: update working directory after clone completes, if
394 update: update working directory after clone completes, if
395 destination is local repository (True means update to default rev,
395 destination is local repository (True means update to default rev,
396 anything else is treated as a revision)
396 anything else is treated as a revision)
397
397
398 branch: branches to clone
398 branch: branches to clone
399
399
400 shareopts: dict of options to control auto sharing behavior. The "pool" key
400 shareopts: dict of options to control auto sharing behavior. The "pool" key
401 activates auto sharing mode and defines the directory for stores. The
401 activates auto sharing mode and defines the directory for stores. The
402 "mode" key determines how to construct the directory name of the shared
402 "mode" key determines how to construct the directory name of the shared
403 repository. "identity" means the name is derived from the node of the first
403 repository. "identity" means the name is derived from the node of the first
404 changeset in the repository. "remote" means the name is derived from the
404 changeset in the repository. "remote" means the name is derived from the
405 remote's path/URL. Defaults to "identity."
405 remote's path/URL. Defaults to "identity."
406 """
406 """
407
407
408 if isinstance(source, str):
408 if isinstance(source, str):
409 origsource = ui.expandpath(source)
409 origsource = ui.expandpath(source)
410 source, branch = parseurl(origsource, branch)
410 source, branch = parseurl(origsource, branch)
411 srcpeer = peer(ui, peeropts, source)
411 srcpeer = peer(ui, peeropts, source)
412 else:
412 else:
413 srcpeer = source.peer() # in case we were called with a localrepo
413 srcpeer = source.peer() # in case we were called with a localrepo
414 branch = (None, branch or [])
414 branch = (None, branch or [])
415 origsource = source = srcpeer.url()
415 origsource = source = srcpeer.url()
416 rev, checkout = addbranchrevs(srcpeer, srcpeer, branch, rev)
416 rev, checkout = addbranchrevs(srcpeer, srcpeer, branch, rev)
417
417
418 if dest is None:
418 if dest is None:
419 dest = defaultdest(source)
419 dest = defaultdest(source)
420 if dest:
420 if dest:
421 ui.status(_("destination directory: %s\n") % dest)
421 ui.status(_("destination directory: %s\n") % dest)
422 else:
422 else:
423 dest = ui.expandpath(dest)
423 dest = ui.expandpath(dest)
424
424
425 dest = util.urllocalpath(dest)
425 dest = util.urllocalpath(dest)
426 source = util.urllocalpath(source)
426 source = util.urllocalpath(source)
427
427
428 if not dest:
428 if not dest:
429 raise error.Abort(_("empty destination path is not valid"))
429 raise error.Abort(_("empty destination path is not valid"))
430
430
431 destvfs = scmutil.vfs(dest, expandpath=True)
431 destvfs = scmutil.vfs(dest, expandpath=True)
432 if destvfs.lexists():
432 if destvfs.lexists():
433 if not destvfs.isdir():
433 if not destvfs.isdir():
434 raise error.Abort(_("destination '%s' already exists") % dest)
434 raise error.Abort(_("destination '%s' already exists") % dest)
435 elif destvfs.listdir():
435 elif destvfs.listdir():
436 raise error.Abort(_("destination '%s' is not empty") % dest)
436 raise error.Abort(_("destination '%s' is not empty") % dest)
437
437
438 shareopts = shareopts or {}
438 shareopts = shareopts or {}
439 sharepool = shareopts.get('pool')
439 sharepool = shareopts.get('pool')
440 sharenamemode = shareopts.get('mode')
440 sharenamemode = shareopts.get('mode')
441 if sharepool and islocal(dest):
441 if sharepool and islocal(dest):
442 sharepath = None
442 sharepath = None
443 if sharenamemode == 'identity':
443 if sharenamemode == 'identity':
444 # Resolve the name from the initial changeset in the remote
444 # Resolve the name from the initial changeset in the remote
445 # repository. This returns nullid when the remote is empty. It
445 # repository. This returns nullid when the remote is empty. It
446 # raises RepoLookupError if revision 0 is filtered or otherwise
446 # raises RepoLookupError if revision 0 is filtered or otherwise
447 # not available. If we fail to resolve, sharing is not enabled.
447 # not available. If we fail to resolve, sharing is not enabled.
448 try:
448 try:
449 rootnode = srcpeer.lookup('0')
449 rootnode = srcpeer.lookup('0')
450 if rootnode != node.nullid:
450 if rootnode != node.nullid:
451 sharepath = os.path.join(sharepool, node.hex(rootnode))
451 sharepath = os.path.join(sharepool, node.hex(rootnode))
452 else:
452 else:
453 ui.status(_('(not using pooled storage: '
453 ui.status(_('(not using pooled storage: '
454 'remote appears to be empty)\n'))
454 'remote appears to be empty)\n'))
455 except error.RepoLookupError:
455 except error.RepoLookupError:
456 ui.status(_('(not using pooled storage: '
456 ui.status(_('(not using pooled storage: '
457 'unable to resolve identity of remote)\n'))
457 'unable to resolve identity of remote)\n'))
458 elif sharenamemode == 'remote':
458 elif sharenamemode == 'remote':
459 sharepath = os.path.join(sharepool, util.sha1(source).hexdigest())
459 sharepath = os.path.join(sharepool, util.sha1(source).hexdigest())
460 else:
460 else:
461 raise error.Abort('unknown share naming mode: %s' % sharenamemode)
461 raise error.Abort('unknown share naming mode: %s' % sharenamemode)
462
462
463 if sharepath:
463 if sharepath:
464 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
464 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
465 dest, pull=pull, rev=rev, update=update,
465 dest, pull=pull, rev=rev, update=update,
466 stream=stream)
466 stream=stream)
467
467
468 srclock = destlock = cleandir = None
468 srclock = destlock = cleandir = None
469 srcrepo = srcpeer.local()
469 srcrepo = srcpeer.local()
470 try:
470 try:
471 abspath = origsource
471 abspath = origsource
472 if islocal(origsource):
472 if islocal(origsource):
473 abspath = os.path.abspath(util.urllocalpath(origsource))
473 abspath = os.path.abspath(util.urllocalpath(origsource))
474
474
475 if islocal(dest):
475 if islocal(dest):
476 cleandir = dest
476 cleandir = dest
477
477
478 copy = False
478 copy = False
479 if (srcrepo and srcrepo.cancopy() and islocal(dest)
479 if (srcrepo and srcrepo.cancopy() and islocal(dest)
480 and not phases.hassecret(srcrepo)):
480 and not phases.hassecret(srcrepo)):
481 copy = not pull and not rev
481 copy = not pull and not rev
482
482
483 if copy:
483 if copy:
484 try:
484 try:
485 # we use a lock here because if we race with commit, we
485 # we use a lock here because if we race with commit, we
486 # can end up with extra data in the cloned revlogs that's
486 # can end up with extra data in the cloned revlogs that's
487 # not pointed to by changesets, thus causing verify to
487 # not pointed to by changesets, thus causing verify to
488 # fail
488 # fail
489 srclock = srcrepo.lock(wait=False)
489 srclock = srcrepo.lock(wait=False)
490 except error.LockError:
490 except error.LockError:
491 copy = False
491 copy = False
492
492
493 if copy:
493 if copy:
494 srcrepo.hook('preoutgoing', throw=True, source='clone')
494 srcrepo.hook('preoutgoing', throw=True, source='clone')
495 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
495 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
496 if not os.path.exists(dest):
496 if not os.path.exists(dest):
497 os.mkdir(dest)
497 os.mkdir(dest)
498 else:
498 else:
499 # only clean up directories we create ourselves
499 # only clean up directories we create ourselves
500 cleandir = hgdir
500 cleandir = hgdir
501 try:
501 try:
502 destpath = hgdir
502 destpath = hgdir
503 util.makedir(destpath, notindexed=True)
503 util.makedir(destpath, notindexed=True)
504 except OSError as inst:
504 except OSError as inst:
505 if inst.errno == errno.EEXIST:
505 if inst.errno == errno.EEXIST:
506 cleandir = None
506 cleandir = None
507 raise error.Abort(_("destination '%s' already exists")
507 raise error.Abort(_("destination '%s' already exists")
508 % dest)
508 % dest)
509 raise
509 raise
510
510
511 destlock = copystore(ui, srcrepo, destpath)
511 destlock = copystore(ui, srcrepo, destpath)
512 # copy bookmarks over
512 # copy bookmarks over
513 srcbookmarks = srcrepo.join('bookmarks')
513 srcbookmarks = srcrepo.join('bookmarks')
514 dstbookmarks = os.path.join(destpath, 'bookmarks')
514 dstbookmarks = os.path.join(destpath, 'bookmarks')
515 if os.path.exists(srcbookmarks):
515 if os.path.exists(srcbookmarks):
516 util.copyfile(srcbookmarks, dstbookmarks)
516 util.copyfile(srcbookmarks, dstbookmarks)
517
517
518 # Recomputing branch cache might be slow on big repos,
518 # Recomputing branch cache might be slow on big repos,
519 # so just copy it
519 # so just copy it
520 def copybranchcache(fname):
520 def copybranchcache(fname):
521 srcbranchcache = srcrepo.join('cache/%s' % fname)
521 srcbranchcache = srcrepo.join('cache/%s' % fname)
522 dstbranchcache = os.path.join(dstcachedir, fname)
522 dstbranchcache = os.path.join(dstcachedir, fname)
523 if os.path.exists(srcbranchcache):
523 if os.path.exists(srcbranchcache):
524 if not os.path.exists(dstcachedir):
524 if not os.path.exists(dstcachedir):
525 os.mkdir(dstcachedir)
525 os.mkdir(dstcachedir)
526 util.copyfile(srcbranchcache, dstbranchcache)
526 util.copyfile(srcbranchcache, dstbranchcache)
527
527
528 dstcachedir = os.path.join(destpath, 'cache')
528 dstcachedir = os.path.join(destpath, 'cache')
529 # In local clones we're copying all nodes, not just served
529 # In local clones we're copying all nodes, not just served
530 # ones. Therefore copy all branch caches over.
530 # ones. Therefore copy all branch caches over.
531 copybranchcache('branch2')
531 copybranchcache('branch2')
532 for cachename in repoview.filtertable:
532 for cachename in repoview.filtertable:
533 copybranchcache('branch2-%s' % cachename)
533 copybranchcache('branch2-%s' % cachename)
534
534
535 # we need to re-init the repo after manually copying the data
535 # we need to re-init the repo after manually copying the data
536 # into it
536 # into it
537 destpeer = peer(srcrepo, peeropts, dest)
537 destpeer = peer(srcrepo, peeropts, dest)
538 srcrepo.hook('outgoing', source='clone',
538 srcrepo.hook('outgoing', source='clone',
539 node=node.hex(node.nullid))
539 node=node.hex(node.nullid))
540 else:
540 else:
541 try:
541 try:
542 destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
542 destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
543 # only pass ui when no srcrepo
543 # only pass ui when no srcrepo
544 except OSError as inst:
544 except OSError as inst:
545 if inst.errno == errno.EEXIST:
545 if inst.errno == errno.EEXIST:
546 cleandir = None
546 cleandir = None
547 raise error.Abort(_("destination '%s' already exists")
547 raise error.Abort(_("destination '%s' already exists")
548 % dest)
548 % dest)
549 raise
549 raise
550
550
551 revs = None
551 revs = None
552 if rev:
552 if rev:
553 if not srcpeer.capable('lookup'):
553 if not srcpeer.capable('lookup'):
554 raise error.Abort(_("src repository does not support "
554 raise error.Abort(_("src repository does not support "
555 "revision lookup and so doesn't "
555 "revision lookup and so doesn't "
556 "support clone by revision"))
556 "support clone by revision"))
557 revs = [srcpeer.lookup(r) for r in rev]
557 revs = [srcpeer.lookup(r) for r in rev]
558 checkout = revs[0]
558 checkout = revs[0]
559 local = destpeer.local()
559 local = destpeer.local()
560 if local:
560 if local:
561 if not stream:
561 if not stream:
562 if pull:
562 if pull:
563 stream = False
563 stream = False
564 else:
564 else:
565 stream = None
565 stream = None
566 # internal config: ui.quietbookmarkmove
566 # internal config: ui.quietbookmarkmove
567 quiet = local.ui.backupconfig('ui', 'quietbookmarkmove')
567 quiet = local.ui.backupconfig('ui', 'quietbookmarkmove')
568 try:
568 try:
569 local.ui.setconfig(
569 local.ui.setconfig(
570 'ui', 'quietbookmarkmove', True, 'clone')
570 'ui', 'quietbookmarkmove', True, 'clone')
571 exchange.pull(local, srcpeer, revs,
571 exchange.pull(local, srcpeer, revs,
572 streamclonerequested=stream)
572 streamclonerequested=stream)
573 finally:
573 finally:
574 local.ui.restoreconfig(quiet)
574 local.ui.restoreconfig(quiet)
575 elif srcrepo:
575 elif srcrepo:
576 exchange.push(srcrepo, destpeer, revs=revs,
576 exchange.push(srcrepo, destpeer, revs=revs,
577 bookmarks=srcrepo._bookmarks.keys())
577 bookmarks=srcrepo._bookmarks.keys())
578 else:
578 else:
579 raise error.Abort(_("clone from remote to remote not supported")
579 raise error.Abort(_("clone from remote to remote not supported")
580 )
580 )
581
581
582 cleandir = None
582 cleandir = None
583
583
584 destrepo = destpeer.local()
584 destrepo = destpeer.local()
585 if destrepo:
585 if destrepo:
586 template = uimod.samplehgrcs['cloned']
586 template = uimod.samplehgrcs['cloned']
587 fp = destrepo.vfs("hgrc", "w", text=True)
587 fp = destrepo.vfs("hgrc", "w", text=True)
588 u = util.url(abspath)
588 u = util.url(abspath)
589 u.passwd = None
589 u.passwd = None
590 defaulturl = str(u)
590 defaulturl = str(u)
591 fp.write(template % defaulturl)
591 fp.write(template % defaulturl)
592 fp.close()
592 fp.close()
593
593
594 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
594 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
595
595
596 if update:
596 if update:
597 if update is not True:
597 if update is not True:
598 checkout = srcpeer.lookup(update)
598 checkout = srcpeer.lookup(update)
599 uprev = None
599 uprev = None
600 status = None
600 status = None
601 if checkout is not None:
601 if checkout is not None:
602 try:
602 try:
603 uprev = destrepo.lookup(checkout)
603 uprev = destrepo.lookup(checkout)
604 except error.RepoLookupError:
604 except error.RepoLookupError:
605 if update is not True:
605 if update is not True:
606 try:
606 try:
607 uprev = destrepo.lookup(update)
607 uprev = destrepo.lookup(update)
608 except error.RepoLookupError:
608 except error.RepoLookupError:
609 pass
609 pass
610 if uprev is None:
610 if uprev is None:
611 try:
611 try:
612 uprev = destrepo._bookmarks['@']
612 uprev = destrepo._bookmarks['@']
613 update = '@'
613 update = '@'
614 bn = destrepo[uprev].branch()
614 bn = destrepo[uprev].branch()
615 if bn == 'default':
615 if bn == 'default':
616 status = _("updating to bookmark @\n")
616 status = _("updating to bookmark @\n")
617 else:
617 else:
618 status = (_("updating to bookmark @ on branch %s\n")
618 status = (_("updating to bookmark @ on branch %s\n")
619 % bn)
619 % bn)
620 except KeyError:
620 except KeyError:
621 try:
621 try:
622 uprev = destrepo.branchtip('default')
622 uprev = destrepo.branchtip('default')
623 except error.RepoLookupError:
623 except error.RepoLookupError:
624 uprev = destrepo.lookup('tip')
624 uprev = destrepo.lookup('tip')
625 if not status:
625 if not status:
626 bn = destrepo[uprev].branch()
626 bn = destrepo[uprev].branch()
627 status = _("updating to branch %s\n") % bn
627 status = _("updating to branch %s\n") % bn
628 destrepo.ui.status(status)
628 destrepo.ui.status(status)
629 _update(destrepo, uprev)
629 _update(destrepo, uprev)
630 if update in destrepo._bookmarks:
630 if update in destrepo._bookmarks:
631 bookmarks.activate(destrepo, update)
631 bookmarks.activate(destrepo, update)
632 finally:
632 finally:
633 release(srclock, destlock)
633 release(srclock, destlock)
634 if cleandir is not None:
634 if cleandir is not None:
635 shutil.rmtree(cleandir, True)
635 shutil.rmtree(cleandir, True)
636 if srcpeer is not None:
636 if srcpeer is not None:
637 srcpeer.close()
637 srcpeer.close()
638 return srcpeer, destpeer
638 return srcpeer, destpeer
639
639
640 def _showstats(repo, stats, quietempty=False):
640 def _showstats(repo, stats, quietempty=False):
641 if quietempty and not any(stats):
641 if quietempty and not any(stats):
642 return
642 return
643 repo.ui.status(_("%d files updated, %d files merged, "
643 repo.ui.status(_("%d files updated, %d files merged, "
644 "%d files removed, %d files unresolved\n") % stats)
644 "%d files removed, %d files unresolved\n") % stats)
645
645
646 def updaterepo(repo, node, overwrite):
646 def updaterepo(repo, node, overwrite):
647 """Update the working directory to node.
647 """Update the working directory to node.
648
648
649 When overwrite is set, changes are clobbered, merged else
649 When overwrite is set, changes are clobbered, merged else
650
650
651 returns stats (see pydoc mercurial.merge.applyupdates)"""
651 returns stats (see pydoc mercurial.merge.applyupdates)"""
652 return mergemod.update(repo, node, False, overwrite,
652 return mergemod.update(repo, node, False, overwrite,
653 labels=['working copy', 'destination'])
653 labels=['working copy', 'destination'])
654
654
655 def update(repo, node, quietempty=False):
655 def update(repo, node, quietempty=False):
656 """update the working directory to node, merging linear changes"""
656 """update the working directory to node, merging linear changes"""
657 stats = updaterepo(repo, node, False)
657 stats = updaterepo(repo, node, False)
658 _showstats(repo, stats, quietempty)
658 _showstats(repo, stats, quietempty)
659 if stats[3]:
659 if stats[3]:
660 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
660 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
661 return stats[3] > 0
661 return stats[3] > 0
662
662
663 # naming conflict in clone()
663 # naming conflict in clone()
664 _update = update
664 _update = update
665
665
666 def clean(repo, node, show_stats=True, quietempty=False):
666 def clean(repo, node, show_stats=True, quietempty=False):
667 """forcibly switch the working directory to node, clobbering changes"""
667 """forcibly switch the working directory to node, clobbering changes"""
668 stats = updaterepo(repo, node, True)
668 stats = updaterepo(repo, node, True)
669 util.unlinkpath(repo.join('graftstate'), ignoremissing=True)
669 util.unlinkpath(repo.join('graftstate'), ignoremissing=True)
670 if show_stats:
670 if show_stats:
671 _showstats(repo, stats, quietempty)
671 _showstats(repo, stats, quietempty)
672 return stats[3] > 0
672 return stats[3] > 0
673
673
674 def merge(repo, node, force=None, remind=True, mergeforce=False):
674 def merge(repo, node, force=None, remind=True, mergeforce=False):
675 """Branch merge with node, resolving changes. Return true if any
675 """Branch merge with node, resolving changes. Return true if any
676 unresolved conflicts."""
676 unresolved conflicts."""
677 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce)
677 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce)
678 _showstats(repo, stats)
678 _showstats(repo, stats)
679 if stats[3]:
679 if stats[3]:
680 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
680 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
681 "or 'hg update -C .' to abandon\n"))
681 "or 'hg update -C .' to abandon\n"))
682 elif remind:
682 elif remind:
683 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
683 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
684 return stats[3] > 0
684 return stats[3] > 0
685
685
686 def _incoming(displaychlist, subreporecurse, ui, repo, source,
686 def _incoming(displaychlist, subreporecurse, ui, repo, source,
687 opts, buffered=False):
687 opts, buffered=False):
688 """
688 """
689 Helper for incoming / gincoming.
689 Helper for incoming / gincoming.
690 displaychlist gets called with
690 displaychlist gets called with
691 (remoterepo, incomingchangesetlist, displayer) parameters,
691 (remoterepo, incomingchangesetlist, displayer) parameters,
692 and is supposed to contain only code that can't be unified.
692 and is supposed to contain only code that can't be unified.
693 """
693 """
694 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
694 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
695 other = peer(repo, opts, source)
695 other = peer(repo, opts, source)
696 ui.status(_('comparing with %s\n') % util.hidepassword(source))
696 ui.status(_('comparing with %s\n') % util.hidepassword(source))
697 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
697 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
698
698
699 if revs:
699 if revs:
700 revs = [other.lookup(rev) for rev in revs]
700 revs = [other.lookup(rev) for rev in revs]
701 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
701 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
702 revs, opts["bundle"], opts["force"])
702 revs, opts["bundle"], opts["force"])
703 try:
703 try:
704 if not chlist:
704 if not chlist:
705 ui.status(_("no changes found\n"))
705 ui.status(_("no changes found\n"))
706 return subreporecurse()
706 return subreporecurse()
707
707
708 displayer = cmdutil.show_changeset(ui, other, opts, buffered)
708 displayer = cmdutil.show_changeset(ui, other, opts, buffered)
709 displaychlist(other, chlist, displayer)
709 displaychlist(other, chlist, displayer)
710 displayer.close()
710 displayer.close()
711 finally:
711 finally:
712 cleanupfn()
712 cleanupfn()
713 subreporecurse()
713 subreporecurse()
714 return 0 # exit code is zero since we found incoming changes
714 return 0 # exit code is zero since we found incoming changes
715
715
716 def incoming(ui, repo, source, opts):
716 def incoming(ui, repo, source, opts):
717 def subreporecurse():
717 def subreporecurse():
718 ret = 1
718 ret = 1
719 if opts.get('subrepos'):
719 if opts.get('subrepos'):
720 ctx = repo[None]
720 ctx = repo[None]
721 for subpath in sorted(ctx.substate):
721 for subpath in sorted(ctx.substate):
722 sub = ctx.sub(subpath)
722 sub = ctx.sub(subpath)
723 ret = min(ret, sub.incoming(ui, source, opts))
723 ret = min(ret, sub.incoming(ui, source, opts))
724 return ret
724 return ret
725
725
726 def display(other, chlist, displayer):
726 def display(other, chlist, displayer):
727 limit = cmdutil.loglimit(opts)
727 limit = cmdutil.loglimit(opts)
728 if opts.get('newest_first'):
728 if opts.get('newest_first'):
729 chlist.reverse()
729 chlist.reverse()
730 count = 0
730 count = 0
731 for n in chlist:
731 for n in chlist:
732 if limit is not None and count >= limit:
732 if limit is not None and count >= limit:
733 break
733 break
734 parents = [p for p in other.changelog.parents(n) if p != nullid]
734 parents = [p for p in other.changelog.parents(n) if p != nullid]
735 if opts.get('no_merges') and len(parents) == 2:
735 if opts.get('no_merges') and len(parents) == 2:
736 continue
736 continue
737 count += 1
737 count += 1
738 displayer.show(other[n])
738 displayer.show(other[n])
739 return _incoming(display, subreporecurse, ui, repo, source, opts)
739 return _incoming(display, subreporecurse, ui, repo, source, opts)
740
740
741 def _outgoing(ui, repo, dest, opts):
741 def _outgoing(ui, repo, dest, opts):
742 dest = ui.expandpath(dest or 'default-push', dest or 'default')
742 dest = ui.expandpath(dest or 'default-push', dest or 'default')
743 dest, branches = parseurl(dest, opts.get('branch'))
743 dest, branches = parseurl(dest, opts.get('branch'))
744 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
744 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
745 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
745 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
746 if revs:
746 if revs:
747 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
747 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
748
748
749 other = peer(repo, opts, dest)
749 other = peer(repo, opts, dest)
750 outgoing = discovery.findcommonoutgoing(repo.unfiltered(), other, revs,
750 outgoing = discovery.findcommonoutgoing(repo.unfiltered(), other, revs,
751 force=opts.get('force'))
751 force=opts.get('force'))
752 o = outgoing.missing
752 o = outgoing.missing
753 if not o:
753 if not o:
754 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
754 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
755 return o, other
755 return o, other
756
756
757 def outgoing(ui, repo, dest, opts):
757 def outgoing(ui, repo, dest, opts):
758 def recurse():
758 def recurse():
759 ret = 1
759 ret = 1
760 if opts.get('subrepos'):
760 if opts.get('subrepos'):
761 ctx = repo[None]
761 ctx = repo[None]
762 for subpath in sorted(ctx.substate):
762 for subpath in sorted(ctx.substate):
763 sub = ctx.sub(subpath)
763 sub = ctx.sub(subpath)
764 ret = min(ret, sub.outgoing(ui, dest, opts))
764 ret = min(ret, sub.outgoing(ui, dest, opts))
765 return ret
765 return ret
766
766
767 limit = cmdutil.loglimit(opts)
767 limit = cmdutil.loglimit(opts)
768 o, other = _outgoing(ui, repo, dest, opts)
768 o, other = _outgoing(ui, repo, dest, opts)
769 if not o:
769 if not o:
770 cmdutil.outgoinghooks(ui, repo, other, opts, o)
770 cmdutil.outgoinghooks(ui, repo, other, opts, o)
771 return recurse()
771 return recurse()
772
772
773 if opts.get('newest_first'):
773 if opts.get('newest_first'):
774 o.reverse()
774 o.reverse()
775 displayer = cmdutil.show_changeset(ui, repo, opts)
775 displayer = cmdutil.show_changeset(ui, repo, opts)
776 count = 0
776 count = 0
777 for n in o:
777 for n in o:
778 if limit is not None and count >= limit:
778 if limit is not None and count >= limit:
779 break
779 break
780 parents = [p for p in repo.changelog.parents(n) if p != nullid]
780 parents = [p for p in repo.changelog.parents(n) if p != nullid]
781 if opts.get('no_merges') and len(parents) == 2:
781 if opts.get('no_merges') and len(parents) == 2:
782 continue
782 continue
783 count += 1
783 count += 1
784 displayer.show(repo[n])
784 displayer.show(repo[n])
785 displayer.close()
785 displayer.close()
786 cmdutil.outgoinghooks(ui, repo, other, opts, o)
786 cmdutil.outgoinghooks(ui, repo, other, opts, o)
787 recurse()
787 recurse()
788 return 0 # exit code is zero since we found outgoing changes
788 return 0 # exit code is zero since we found outgoing changes
789
789
790 def verify(repo):
790 def verify(repo):
791 """verify the consistency of a repository"""
791 """verify the consistency of a repository"""
792 ret = verifymod.verify(repo)
792 ret = verifymod.verify(repo)
793
793
794 # Broken subrepo references in hidden csets don't seem worth worrying about,
794 # Broken subrepo references in hidden csets don't seem worth worrying about,
795 # since they can't be pushed/pulled, and --hidden can be used if they are a
795 # since they can't be pushed/pulled, and --hidden can be used if they are a
796 # concern.
796 # concern.
797
797
798 # pathto() is needed for -R case
798 # pathto() is needed for -R case
799 revs = repo.revs("filelog(%s)",
799 revs = repo.revs("filelog(%s)",
800 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
800 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
801
801
802 if revs:
802 if revs:
803 repo.ui.status(_('checking subrepo links\n'))
803 repo.ui.status(_('checking subrepo links\n'))
804 for rev in revs:
804 for rev in revs:
805 ctx = repo[rev]
805 ctx = repo[rev]
806 try:
806 try:
807 for subpath in ctx.substate:
807 for subpath in ctx.substate:
808 ret = ctx.sub(subpath).verify() or ret
808 ret = ctx.sub(subpath).verify() or ret
809 except Exception:
809 except Exception:
810 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
810 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
811 node.short(ctx.node()))
811 node.short(ctx.node()))
812
812
813 return ret
813 return ret
814
814
815 def remoteui(src, opts):
815 def remoteui(src, opts):
816 'build a remote ui from ui or repo and opts'
816 'build a remote ui from ui or repo and opts'
817 if util.safehasattr(src, 'baseui'): # looks like a repository
817 if util.safehasattr(src, 'baseui'): # looks like a repository
818 dst = src.baseui.copy() # drop repo-specific config
818 dst = src.baseui.copy() # drop repo-specific config
819 src = src.ui # copy target options from repo
819 src = src.ui # copy target options from repo
820 else: # assume it's a global ui object
820 else: # assume it's a global ui object
821 dst = src.copy() # keep all global options
821 dst = src.copy() # keep all global options
822
822
823 # copy ssh-specific options
823 # copy ssh-specific options
824 for o in 'ssh', 'remotecmd':
824 for o in 'ssh', 'remotecmd':
825 v = opts.get(o) or src.config('ui', o)
825 v = opts.get(o) or src.config('ui', o)
826 if v:
826 if v:
827 dst.setconfig("ui", o, v, 'copied')
827 dst.setconfig("ui", o, v, 'copied')
828
828
829 # copy bundle-specific options
829 # copy bundle-specific options
830 r = src.config('bundle', 'mainreporoot')
830 r = src.config('bundle', 'mainreporoot')
831 if r:
831 if r:
832 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
832 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
833
833
834 # copy selected local settings to the remote ui
834 # copy selected local settings to the remote ui
835 for sect in ('auth', 'hostfingerprints', 'http_proxy'):
835 for sect in ('auth', 'hostfingerprints', 'http_proxy'):
836 for key, val in src.configitems(sect):
836 for key, val in src.configitems(sect):
837 dst.setconfig(sect, key, val, 'copied')
837 dst.setconfig(sect, key, val, 'copied')
838 v = src.config('web', 'cacerts')
838 v = src.config('web', 'cacerts')
839 if v == '!':
839 if v == '!':
840 dst.setconfig('web', 'cacerts', v, 'copied')
840 dst.setconfig('web', 'cacerts', v, 'copied')
841 elif v:
841 elif v:
842 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
842 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
843
843
844 return dst
844 return dst
845
845
846 # Files of interest
846 # Files of interest
847 # Used to check if the repository has changed looking at mtime and size of
847 # Used to check if the repository has changed looking at mtime and size of
848 # these files.
848 # these files.
849 foi = [('spath', '00changelog.i'),
849 foi = [('spath', '00changelog.i'),
850 ('spath', 'phaseroots'), # ! phase can change content at the same size
850 ('spath', 'phaseroots'), # ! phase can change content at the same size
851 ('spath', 'obsstore'),
851 ('spath', 'obsstore'),
852 ('path', 'bookmarks'), # ! bookmark can change content at the same size
852 ('path', 'bookmarks'), # ! bookmark can change content at the same size
853 ]
853 ]
854
854
855 class cachedlocalrepo(object):
855 class cachedlocalrepo(object):
856 """Holds a localrepository that can be cached and reused."""
856 """Holds a localrepository that can be cached and reused."""
857
857
858 def __init__(self, repo):
858 def __init__(self, repo):
859 """Create a new cached repo from an existing repo.
859 """Create a new cached repo from an existing repo.
860
860
861 We assume the passed in repo was recently created. If the
861 We assume the passed in repo was recently created. If the
862 repo has changed between when it was created and when it was
862 repo has changed between when it was created and when it was
863 turned into a cache, it may not refresh properly.
863 turned into a cache, it may not refresh properly.
864 """
864 """
865 assert isinstance(repo, localrepo.localrepository)
865 assert isinstance(repo, localrepo.localrepository)
866 self._repo = repo
866 self._repo = repo
867 self._state, self.mtime = self._repostate()
867 self._state, self.mtime = self._repostate()
868 self._filtername = repo.filtername
868
869
869 def fetch(self):
870 def fetch(self):
870 """Refresh (if necessary) and return a repository.
871 """Refresh (if necessary) and return a repository.
871
872
872 If the cached instance is out of date, it will be recreated
873 If the cached instance is out of date, it will be recreated
873 automatically and returned.
874 automatically and returned.
874
875
875 Returns a tuple of the repo and a boolean indicating whether a new
876 Returns a tuple of the repo and a boolean indicating whether a new
876 repo instance was created.
877 repo instance was created.
877 """
878 """
878 # We compare the mtimes and sizes of some well-known files to
879 # We compare the mtimes and sizes of some well-known files to
879 # determine if the repo changed. This is not precise, as mtimes
880 # determine if the repo changed. This is not precise, as mtimes
880 # are susceptible to clock skew and imprecise filesystems and
881 # are susceptible to clock skew and imprecise filesystems and
881 # file content can change while maintaining the same size.
882 # file content can change while maintaining the same size.
882
883
883 state, mtime = self._repostate()
884 state, mtime = self._repostate()
884 if state == self._state:
885 if state == self._state:
885 return self._repo, False
886 return self._repo, False
886
887
887 self._repo = repository(self._repo.baseui, self._repo.url())
888 repo = repository(self._repo.baseui, self._repo.url())
889 if self._filtername:
890 self._repo = repo.filtered(self._filtername)
891 else:
892 self._repo = repo.unfiltered()
888 self._state = state
893 self._state = state
889 self.mtime = mtime
894 self.mtime = mtime
890
895
891 return self._repo, True
896 return self._repo, True
892
897
893 def _repostate(self):
898 def _repostate(self):
894 state = []
899 state = []
895 maxmtime = -1
900 maxmtime = -1
896 for attr, fname in foi:
901 for attr, fname in foi:
897 prefix = getattr(self._repo, attr)
902 prefix = getattr(self._repo, attr)
898 p = os.path.join(prefix, fname)
903 p = os.path.join(prefix, fname)
899 try:
904 try:
900 st = os.stat(p)
905 st = os.stat(p)
901 except OSError:
906 except OSError:
902 st = os.stat(prefix)
907 st = os.stat(prefix)
903 state.append((st.st_mtime, st.st_size))
908 state.append((st.st_mtime, st.st_size))
904 maxmtime = max(maxmtime, st.st_mtime)
909 maxmtime = max(maxmtime, st.st_mtime)
905
910
906 return tuple(state), maxmtime
911 return tuple(state), maxmtime
907
912
908 def copy(self):
913 def copy(self):
909 """Obtain a copy of this class instance.
914 """Obtain a copy of this class instance.
910
915
911 A new localrepository instance is obtained. The new instance should be
916 A new localrepository instance is obtained. The new instance should be
912 completely independent of the original.
917 completely independent of the original.
913 """
918 """
914 repo = repository(self._repo.baseui, self._repo.origroot)
919 repo = repository(self._repo.baseui, self._repo.origroot)
920 if self._filtername:
921 repo = repo.filtered(self._filtername)
922 else:
923 repo = repo.unfiltered()
915 c = cachedlocalrepo(repo)
924 c = cachedlocalrepo(repo)
916 c._state = self._state
925 c._state = self._state
917 c.mtime = self.mtime
926 c.mtime = self.mtime
918 return c
927 return c
General Comments 0
You need to be logged in to leave comments. Login now