##// END OF EJS Templates
hg: extract post share update logic into own function...
Gregory Szorc -
r28201:60adda1a default
parent child Browse files
Show More
@@ -1,927 +1,935 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import os
12 import os
13 import shutil
13 import shutil
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import nullid
16 from .node import nullid
17
17
18 from . import (
18 from . import (
19 bookmarks,
19 bookmarks,
20 bundlerepo,
20 bundlerepo,
21 cmdutil,
21 cmdutil,
22 discovery,
22 discovery,
23 error,
23 error,
24 exchange,
24 exchange,
25 extensions,
25 extensions,
26 httppeer,
26 httppeer,
27 localrepo,
27 localrepo,
28 lock,
28 lock,
29 merge as mergemod,
29 merge as mergemod,
30 node,
30 node,
31 phases,
31 phases,
32 repoview,
32 repoview,
33 scmutil,
33 scmutil,
34 sshpeer,
34 sshpeer,
35 statichttprepo,
35 statichttprepo,
36 ui as uimod,
36 ui as uimod,
37 unionrepo,
37 unionrepo,
38 url,
38 url,
39 util,
39 util,
40 verify as verifymod,
40 verify as verifymod,
41 )
41 )
42
42
43 release = lock.release
43 release = lock.release
44
44
45 def _local(path):
45 def _local(path):
46 path = util.expandpath(util.urllocalpath(path))
46 path = util.expandpath(util.urllocalpath(path))
47 return (os.path.isfile(path) and bundlerepo or localrepo)
47 return (os.path.isfile(path) and bundlerepo or localrepo)
48
48
49 def addbranchrevs(lrepo, other, branches, revs):
49 def addbranchrevs(lrepo, other, branches, revs):
50 peer = other.peer() # a courtesy to callers using a localrepo for other
50 peer = other.peer() # a courtesy to callers using a localrepo for other
51 hashbranch, branches = branches
51 hashbranch, branches = branches
52 if not hashbranch and not branches:
52 if not hashbranch and not branches:
53 x = revs or None
53 x = revs or None
54 if util.safehasattr(revs, 'first'):
54 if util.safehasattr(revs, 'first'):
55 y = revs.first()
55 y = revs.first()
56 elif revs:
56 elif revs:
57 y = revs[0]
57 y = revs[0]
58 else:
58 else:
59 y = None
59 y = None
60 return x, y
60 return x, y
61 if revs:
61 if revs:
62 revs = list(revs)
62 revs = list(revs)
63 else:
63 else:
64 revs = []
64 revs = []
65
65
66 if not peer.capable('branchmap'):
66 if not peer.capable('branchmap'):
67 if branches:
67 if branches:
68 raise error.Abort(_("remote branch lookup not supported"))
68 raise error.Abort(_("remote branch lookup not supported"))
69 revs.append(hashbranch)
69 revs.append(hashbranch)
70 return revs, revs[0]
70 return revs, revs[0]
71 branchmap = peer.branchmap()
71 branchmap = peer.branchmap()
72
72
73 def primary(branch):
73 def primary(branch):
74 if branch == '.':
74 if branch == '.':
75 if not lrepo:
75 if not lrepo:
76 raise error.Abort(_("dirstate branch not accessible"))
76 raise error.Abort(_("dirstate branch not accessible"))
77 branch = lrepo.dirstate.branch()
77 branch = lrepo.dirstate.branch()
78 if branch in branchmap:
78 if branch in branchmap:
79 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
79 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
80 return True
80 return True
81 else:
81 else:
82 return False
82 return False
83
83
84 for branch in branches:
84 for branch in branches:
85 if not primary(branch):
85 if not primary(branch):
86 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
86 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
87 if hashbranch:
87 if hashbranch:
88 if not primary(hashbranch):
88 if not primary(hashbranch):
89 revs.append(hashbranch)
89 revs.append(hashbranch)
90 return revs, revs[0]
90 return revs, revs[0]
91
91
92 def parseurl(path, branches=None):
92 def parseurl(path, branches=None):
93 '''parse url#branch, returning (url, (branch, branches))'''
93 '''parse url#branch, returning (url, (branch, branches))'''
94
94
95 u = util.url(path)
95 u = util.url(path)
96 branch = None
96 branch = None
97 if u.fragment:
97 if u.fragment:
98 branch = u.fragment
98 branch = u.fragment
99 u.fragment = None
99 u.fragment = None
100 return str(u), (branch, branches or [])
100 return str(u), (branch, branches or [])
101
101
102 schemes = {
102 schemes = {
103 'bundle': bundlerepo,
103 'bundle': bundlerepo,
104 'union': unionrepo,
104 'union': unionrepo,
105 'file': _local,
105 'file': _local,
106 'http': httppeer,
106 'http': httppeer,
107 'https': httppeer,
107 'https': httppeer,
108 'ssh': sshpeer,
108 'ssh': sshpeer,
109 'static-http': statichttprepo,
109 'static-http': statichttprepo,
110 }
110 }
111
111
112 def _peerlookup(path):
112 def _peerlookup(path):
113 u = util.url(path)
113 u = util.url(path)
114 scheme = u.scheme or 'file'
114 scheme = u.scheme or 'file'
115 thing = schemes.get(scheme) or schemes['file']
115 thing = schemes.get(scheme) or schemes['file']
116 try:
116 try:
117 return thing(path)
117 return thing(path)
118 except TypeError:
118 except TypeError:
119 # we can't test callable(thing) because 'thing' can be an unloaded
119 # we can't test callable(thing) because 'thing' can be an unloaded
120 # module that implements __call__
120 # module that implements __call__
121 if not util.safehasattr(thing, 'instance'):
121 if not util.safehasattr(thing, 'instance'):
122 raise
122 raise
123 return thing
123 return thing
124
124
125 def islocal(repo):
125 def islocal(repo):
126 '''return true if repo (or path pointing to repo) is local'''
126 '''return true if repo (or path pointing to repo) is local'''
127 if isinstance(repo, str):
127 if isinstance(repo, str):
128 try:
128 try:
129 return _peerlookup(repo).islocal(repo)
129 return _peerlookup(repo).islocal(repo)
130 except AttributeError:
130 except AttributeError:
131 return False
131 return False
132 return repo.local()
132 return repo.local()
133
133
134 def openpath(ui, path):
134 def openpath(ui, path):
135 '''open path with open if local, url.open if remote'''
135 '''open path with open if local, url.open if remote'''
136 pathurl = util.url(path, parsequery=False, parsefragment=False)
136 pathurl = util.url(path, parsequery=False, parsefragment=False)
137 if pathurl.islocal():
137 if pathurl.islocal():
138 return util.posixfile(pathurl.localpath(), 'rb')
138 return util.posixfile(pathurl.localpath(), 'rb')
139 else:
139 else:
140 return url.open(ui, path)
140 return url.open(ui, path)
141
141
142 # a list of (ui, repo) functions called for wire peer initialization
142 # a list of (ui, repo) functions called for wire peer initialization
143 wirepeersetupfuncs = []
143 wirepeersetupfuncs = []
144
144
145 def _peerorrepo(ui, path, create=False):
145 def _peerorrepo(ui, path, create=False):
146 """return a repository object for the specified path"""
146 """return a repository object for the specified path"""
147 obj = _peerlookup(path).instance(ui, path, create)
147 obj = _peerlookup(path).instance(ui, path, create)
148 ui = getattr(obj, "ui", ui)
148 ui = getattr(obj, "ui", ui)
149 for name, module in extensions.extensions(ui):
149 for name, module in extensions.extensions(ui):
150 hook = getattr(module, 'reposetup', None)
150 hook = getattr(module, 'reposetup', None)
151 if hook:
151 if hook:
152 hook(ui, obj)
152 hook(ui, obj)
153 if not obj.local():
153 if not obj.local():
154 for f in wirepeersetupfuncs:
154 for f in wirepeersetupfuncs:
155 f(ui, obj)
155 f(ui, obj)
156 return obj
156 return obj
157
157
158 def repository(ui, path='', create=False):
158 def repository(ui, path='', create=False):
159 """return a repository object for the specified path"""
159 """return a repository object for the specified path"""
160 peer = _peerorrepo(ui, path, create)
160 peer = _peerorrepo(ui, path, create)
161 repo = peer.local()
161 repo = peer.local()
162 if not repo:
162 if not repo:
163 raise error.Abort(_("repository '%s' is not local") %
163 raise error.Abort(_("repository '%s' is not local") %
164 (path or peer.url()))
164 (path or peer.url()))
165 return repo.filtered('visible')
165 return repo.filtered('visible')
166
166
167 def peer(uiorrepo, opts, path, create=False):
167 def peer(uiorrepo, opts, path, create=False):
168 '''return a repository peer for the specified path'''
168 '''return a repository peer for the specified path'''
169 rui = remoteui(uiorrepo, opts)
169 rui = remoteui(uiorrepo, opts)
170 return _peerorrepo(rui, path, create).peer()
170 return _peerorrepo(rui, path, create).peer()
171
171
172 def defaultdest(source):
172 def defaultdest(source):
173 '''return default destination of clone if none is given
173 '''return default destination of clone if none is given
174
174
175 >>> defaultdest('foo')
175 >>> defaultdest('foo')
176 'foo'
176 'foo'
177 >>> defaultdest('/foo/bar')
177 >>> defaultdest('/foo/bar')
178 'bar'
178 'bar'
179 >>> defaultdest('/')
179 >>> defaultdest('/')
180 ''
180 ''
181 >>> defaultdest('')
181 >>> defaultdest('')
182 ''
182 ''
183 >>> defaultdest('http://example.org/')
183 >>> defaultdest('http://example.org/')
184 ''
184 ''
185 >>> defaultdest('http://example.org/foo/')
185 >>> defaultdest('http://example.org/foo/')
186 'foo'
186 'foo'
187 '''
187 '''
188 path = util.url(source).path
188 path = util.url(source).path
189 if not path:
189 if not path:
190 return ''
190 return ''
191 return os.path.basename(os.path.normpath(path))
191 return os.path.basename(os.path.normpath(path))
192
192
193 def share(ui, source, dest=None, update=True, bookmarks=True):
193 def share(ui, source, dest=None, update=True, bookmarks=True):
194 '''create a shared repository'''
194 '''create a shared repository'''
195
195
196 if not islocal(source):
196 if not islocal(source):
197 raise error.Abort(_('can only share local repositories'))
197 raise error.Abort(_('can only share local repositories'))
198
198
199 if not dest:
199 if not dest:
200 dest = defaultdest(source)
200 dest = defaultdest(source)
201 else:
201 else:
202 dest = ui.expandpath(dest)
202 dest = ui.expandpath(dest)
203
203
204 if isinstance(source, str):
204 if isinstance(source, str):
205 origsource = ui.expandpath(source)
205 origsource = ui.expandpath(source)
206 source, branches = parseurl(origsource)
206 source, branches = parseurl(origsource)
207 srcrepo = repository(ui, source)
207 srcrepo = repository(ui, source)
208 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
208 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
209 else:
209 else:
210 srcrepo = source.local()
210 srcrepo = source.local()
211 origsource = source = srcrepo.url()
211 origsource = source = srcrepo.url()
212 checkout = None
212 checkout = None
213
213
214 sharedpath = srcrepo.sharedpath # if our source is already sharing
214 sharedpath = srcrepo.sharedpath # if our source is already sharing
215
215
216 destwvfs = scmutil.vfs(dest, realpath=True)
216 destwvfs = scmutil.vfs(dest, realpath=True)
217 destvfs = scmutil.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
217 destvfs = scmutil.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
218
218
219 if destvfs.lexists():
219 if destvfs.lexists():
220 raise error.Abort(_('destination already exists'))
220 raise error.Abort(_('destination already exists'))
221
221
222 if not destwvfs.isdir():
222 if not destwvfs.isdir():
223 destwvfs.mkdir()
223 destwvfs.mkdir()
224 destvfs.makedir()
224 destvfs.makedir()
225
225
226 requirements = ''
226 requirements = ''
227 try:
227 try:
228 requirements = srcrepo.vfs.read('requires')
228 requirements = srcrepo.vfs.read('requires')
229 except IOError as inst:
229 except IOError as inst:
230 if inst.errno != errno.ENOENT:
230 if inst.errno != errno.ENOENT:
231 raise
231 raise
232
232
233 requirements += 'shared\n'
233 requirements += 'shared\n'
234 destvfs.write('requires', requirements)
234 destvfs.write('requires', requirements)
235 destvfs.write('sharedpath', sharedpath)
235 destvfs.write('sharedpath', sharedpath)
236
236
237 r = repository(ui, destwvfs.base)
237 r = repository(ui, destwvfs.base)
238 postshare(srcrepo, r, bookmarks=bookmarks)
238 postshare(srcrepo, r, bookmarks=bookmarks)
239
239 _postshareupdate(r, update, checkout=checkout)
240 if update:
241 r.ui.status(_("updating working directory\n"))
242 if update is not True:
243 checkout = update
244 for test in (checkout, 'default', 'tip'):
245 if test is None:
246 continue
247 try:
248 uprev = r.lookup(test)
249 break
250 except error.RepoLookupError:
251 continue
252 _update(r, uprev)
253
240
254 def postshare(sourcerepo, destrepo, bookmarks=True):
241 def postshare(sourcerepo, destrepo, bookmarks=True):
255 """Called after a new shared repo is created.
242 """Called after a new shared repo is created.
256
243
257 The new repo only has a requirements file and pointer to the source.
244 The new repo only has a requirements file and pointer to the source.
258 This function configures additional shared data.
245 This function configures additional shared data.
259
246
260 Extensions can wrap this function and write additional entries to
247 Extensions can wrap this function and write additional entries to
261 destrepo/.hg/shared to indicate additional pieces of data to be shared.
248 destrepo/.hg/shared to indicate additional pieces of data to be shared.
262 """
249 """
263 default = sourcerepo.ui.config('paths', 'default')
250 default = sourcerepo.ui.config('paths', 'default')
264 if default:
251 if default:
265 fp = destrepo.vfs("hgrc", "w", text=True)
252 fp = destrepo.vfs("hgrc", "w", text=True)
266 fp.write("[paths]\n")
253 fp.write("[paths]\n")
267 fp.write("default = %s\n" % default)
254 fp.write("default = %s\n" % default)
268 fp.close()
255 fp.close()
269
256
270 if bookmarks:
257 if bookmarks:
271 fp = destrepo.vfs('shared', 'w')
258 fp = destrepo.vfs('shared', 'w')
272 fp.write('bookmarks\n')
259 fp.write('bookmarks\n')
273 fp.close()
260 fp.close()
274
261
262 def _postshareupdate(repo, update, checkout=None):
263 """Maybe perform a working directory update after a shared repo is created.
264
265 ``update`` can be a boolean or a revision to update to.
266 """
267 if not update:
268 return
269
270 repo.ui.status(_("updating working directory\n"))
271 if update is not True:
272 checkout = update
273 for test in (checkout, 'default', 'tip'):
274 if test is None:
275 continue
276 try:
277 uprev = repo.lookup(test)
278 break
279 except error.RepoLookupError:
280 continue
281 _update(repo, uprev)
282
275 def copystore(ui, srcrepo, destpath):
283 def copystore(ui, srcrepo, destpath):
276 '''copy files from store of srcrepo in destpath
284 '''copy files from store of srcrepo in destpath
277
285
278 returns destlock
286 returns destlock
279 '''
287 '''
280 destlock = None
288 destlock = None
281 try:
289 try:
282 hardlink = None
290 hardlink = None
283 num = 0
291 num = 0
284 closetopic = [None]
292 closetopic = [None]
285 def prog(topic, pos):
293 def prog(topic, pos):
286 if pos is None:
294 if pos is None:
287 closetopic[0] = topic
295 closetopic[0] = topic
288 else:
296 else:
289 ui.progress(topic, pos + num)
297 ui.progress(topic, pos + num)
290 srcpublishing = srcrepo.publishing()
298 srcpublishing = srcrepo.publishing()
291 srcvfs = scmutil.vfs(srcrepo.sharedpath)
299 srcvfs = scmutil.vfs(srcrepo.sharedpath)
292 dstvfs = scmutil.vfs(destpath)
300 dstvfs = scmutil.vfs(destpath)
293 for f in srcrepo.store.copylist():
301 for f in srcrepo.store.copylist():
294 if srcpublishing and f.endswith('phaseroots'):
302 if srcpublishing and f.endswith('phaseroots'):
295 continue
303 continue
296 dstbase = os.path.dirname(f)
304 dstbase = os.path.dirname(f)
297 if dstbase and not dstvfs.exists(dstbase):
305 if dstbase and not dstvfs.exists(dstbase):
298 dstvfs.mkdir(dstbase)
306 dstvfs.mkdir(dstbase)
299 if srcvfs.exists(f):
307 if srcvfs.exists(f):
300 if f.endswith('data'):
308 if f.endswith('data'):
301 # 'dstbase' may be empty (e.g. revlog format 0)
309 # 'dstbase' may be empty (e.g. revlog format 0)
302 lockfile = os.path.join(dstbase, "lock")
310 lockfile = os.path.join(dstbase, "lock")
303 # lock to avoid premature writing to the target
311 # lock to avoid premature writing to the target
304 destlock = lock.lock(dstvfs, lockfile)
312 destlock = lock.lock(dstvfs, lockfile)
305 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
313 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
306 hardlink, progress=prog)
314 hardlink, progress=prog)
307 num += n
315 num += n
308 if hardlink:
316 if hardlink:
309 ui.debug("linked %d files\n" % num)
317 ui.debug("linked %d files\n" % num)
310 if closetopic[0]:
318 if closetopic[0]:
311 ui.progress(closetopic[0], None)
319 ui.progress(closetopic[0], None)
312 else:
320 else:
313 ui.debug("copied %d files\n" % num)
321 ui.debug("copied %d files\n" % num)
314 if closetopic[0]:
322 if closetopic[0]:
315 ui.progress(closetopic[0], None)
323 ui.progress(closetopic[0], None)
316 return destlock
324 return destlock
317 except: # re-raises
325 except: # re-raises
318 release(destlock)
326 release(destlock)
319 raise
327 raise
320
328
321 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
329 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
322 rev=None, update=True, stream=False):
330 rev=None, update=True, stream=False):
323 """Perform a clone using a shared repo.
331 """Perform a clone using a shared repo.
324
332
325 The store for the repository will be located at <sharepath>/.hg. The
333 The store for the repository will be located at <sharepath>/.hg. The
326 specified revisions will be cloned or pulled from "source". A shared repo
334 specified revisions will be cloned or pulled from "source". A shared repo
327 will be created at "dest" and a working copy will be created if "update" is
335 will be created at "dest" and a working copy will be created if "update" is
328 True.
336 True.
329 """
337 """
330 revs = None
338 revs = None
331 if rev:
339 if rev:
332 if not srcpeer.capable('lookup'):
340 if not srcpeer.capable('lookup'):
333 raise error.Abort(_("src repository does not support "
341 raise error.Abort(_("src repository does not support "
334 "revision lookup and so doesn't "
342 "revision lookup and so doesn't "
335 "support clone by revision"))
343 "support clone by revision"))
336 revs = [srcpeer.lookup(r) for r in rev]
344 revs = [srcpeer.lookup(r) for r in rev]
337
345
338 basename = os.path.basename(sharepath)
346 basename = os.path.basename(sharepath)
339
347
340 if os.path.exists(sharepath):
348 if os.path.exists(sharepath):
341 ui.status(_('(sharing from existing pooled repository %s)\n') %
349 ui.status(_('(sharing from existing pooled repository %s)\n') %
342 basename)
350 basename)
343 else:
351 else:
344 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
352 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
345 # Always use pull mode because hardlinks in share mode don't work well.
353 # Always use pull mode because hardlinks in share mode don't work well.
346 # Never update because working copies aren't necessary in share mode.
354 # Never update because working copies aren't necessary in share mode.
347 clone(ui, peeropts, source, dest=sharepath, pull=True,
355 clone(ui, peeropts, source, dest=sharepath, pull=True,
348 rev=rev, update=False, stream=stream)
356 rev=rev, update=False, stream=stream)
349
357
350 sharerepo = repository(ui, path=sharepath)
358 sharerepo = repository(ui, path=sharepath)
351 share(ui, sharerepo, dest=dest, update=update, bookmarks=False)
359 share(ui, sharerepo, dest=dest, update=update, bookmarks=False)
352
360
353 # We need to perform a pull against the dest repo to fetch bookmarks
361 # We need to perform a pull against the dest repo to fetch bookmarks
354 # and other non-store data that isn't shared by default. In the case of
362 # and other non-store data that isn't shared by default. In the case of
355 # non-existing shared repo, this means we pull from the remote twice. This
363 # non-existing shared repo, this means we pull from the remote twice. This
356 # is a bit weird. But at the time it was implemented, there wasn't an easy
364 # is a bit weird. But at the time it was implemented, there wasn't an easy
357 # way to pull just non-changegroup data.
365 # way to pull just non-changegroup data.
358 destrepo = repository(ui, path=dest)
366 destrepo = repository(ui, path=dest)
359 exchange.pull(destrepo, srcpeer, heads=revs)
367 exchange.pull(destrepo, srcpeer, heads=revs)
360
368
361 return srcpeer, peer(ui, peeropts, dest)
369 return srcpeer, peer(ui, peeropts, dest)
362
370
363 def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
371 def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
364 update=True, stream=False, branch=None, shareopts=None):
372 update=True, stream=False, branch=None, shareopts=None):
365 """Make a copy of an existing repository.
373 """Make a copy of an existing repository.
366
374
367 Create a copy of an existing repository in a new directory. The
375 Create a copy of an existing repository in a new directory. The
368 source and destination are URLs, as passed to the repository
376 source and destination are URLs, as passed to the repository
369 function. Returns a pair of repository peers, the source and
377 function. Returns a pair of repository peers, the source and
370 newly created destination.
378 newly created destination.
371
379
372 The location of the source is added to the new repository's
380 The location of the source is added to the new repository's
373 .hg/hgrc file, as the default to be used for future pulls and
381 .hg/hgrc file, as the default to be used for future pulls and
374 pushes.
382 pushes.
375
383
376 If an exception is raised, the partly cloned/updated destination
384 If an exception is raised, the partly cloned/updated destination
377 repository will be deleted.
385 repository will be deleted.
378
386
379 Arguments:
387 Arguments:
380
388
381 source: repository object or URL
389 source: repository object or URL
382
390
383 dest: URL of destination repository to create (defaults to base
391 dest: URL of destination repository to create (defaults to base
384 name of source repository)
392 name of source repository)
385
393
386 pull: always pull from source repository, even in local case or if the
394 pull: always pull from source repository, even in local case or if the
387 server prefers streaming
395 server prefers streaming
388
396
389 stream: stream raw data uncompressed from repository (fast over
397 stream: stream raw data uncompressed from repository (fast over
390 LAN, slow over WAN)
398 LAN, slow over WAN)
391
399
392 rev: revision to clone up to (implies pull=True)
400 rev: revision to clone up to (implies pull=True)
393
401
394 update: update working directory after clone completes, if
402 update: update working directory after clone completes, if
395 destination is local repository (True means update to default rev,
403 destination is local repository (True means update to default rev,
396 anything else is treated as a revision)
404 anything else is treated as a revision)
397
405
398 branch: branches to clone
406 branch: branches to clone
399
407
400 shareopts: dict of options to control auto sharing behavior. The "pool" key
408 shareopts: dict of options to control auto sharing behavior. The "pool" key
401 activates auto sharing mode and defines the directory for stores. The
409 activates auto sharing mode and defines the directory for stores. The
402 "mode" key determines how to construct the directory name of the shared
410 "mode" key determines how to construct the directory name of the shared
403 repository. "identity" means the name is derived from the node of the first
411 repository. "identity" means the name is derived from the node of the first
404 changeset in the repository. "remote" means the name is derived from the
412 changeset in the repository. "remote" means the name is derived from the
405 remote's path/URL. Defaults to "identity."
413 remote's path/URL. Defaults to "identity."
406 """
414 """
407
415
408 if isinstance(source, str):
416 if isinstance(source, str):
409 origsource = ui.expandpath(source)
417 origsource = ui.expandpath(source)
410 source, branch = parseurl(origsource, branch)
418 source, branch = parseurl(origsource, branch)
411 srcpeer = peer(ui, peeropts, source)
419 srcpeer = peer(ui, peeropts, source)
412 else:
420 else:
413 srcpeer = source.peer() # in case we were called with a localrepo
421 srcpeer = source.peer() # in case we were called with a localrepo
414 branch = (None, branch or [])
422 branch = (None, branch or [])
415 origsource = source = srcpeer.url()
423 origsource = source = srcpeer.url()
416 rev, checkout = addbranchrevs(srcpeer, srcpeer, branch, rev)
424 rev, checkout = addbranchrevs(srcpeer, srcpeer, branch, rev)
417
425
418 if dest is None:
426 if dest is None:
419 dest = defaultdest(source)
427 dest = defaultdest(source)
420 if dest:
428 if dest:
421 ui.status(_("destination directory: %s\n") % dest)
429 ui.status(_("destination directory: %s\n") % dest)
422 else:
430 else:
423 dest = ui.expandpath(dest)
431 dest = ui.expandpath(dest)
424
432
425 dest = util.urllocalpath(dest)
433 dest = util.urllocalpath(dest)
426 source = util.urllocalpath(source)
434 source = util.urllocalpath(source)
427
435
428 if not dest:
436 if not dest:
429 raise error.Abort(_("empty destination path is not valid"))
437 raise error.Abort(_("empty destination path is not valid"))
430
438
431 destvfs = scmutil.vfs(dest, expandpath=True)
439 destvfs = scmutil.vfs(dest, expandpath=True)
432 if destvfs.lexists():
440 if destvfs.lexists():
433 if not destvfs.isdir():
441 if not destvfs.isdir():
434 raise error.Abort(_("destination '%s' already exists") % dest)
442 raise error.Abort(_("destination '%s' already exists") % dest)
435 elif destvfs.listdir():
443 elif destvfs.listdir():
436 raise error.Abort(_("destination '%s' is not empty") % dest)
444 raise error.Abort(_("destination '%s' is not empty") % dest)
437
445
438 shareopts = shareopts or {}
446 shareopts = shareopts or {}
439 sharepool = shareopts.get('pool')
447 sharepool = shareopts.get('pool')
440 sharenamemode = shareopts.get('mode')
448 sharenamemode = shareopts.get('mode')
441 if sharepool and islocal(dest):
449 if sharepool and islocal(dest):
442 sharepath = None
450 sharepath = None
443 if sharenamemode == 'identity':
451 if sharenamemode == 'identity':
444 # Resolve the name from the initial changeset in the remote
452 # Resolve the name from the initial changeset in the remote
445 # repository. This returns nullid when the remote is empty. It
453 # repository. This returns nullid when the remote is empty. It
446 # raises RepoLookupError if revision 0 is filtered or otherwise
454 # raises RepoLookupError if revision 0 is filtered or otherwise
447 # not available. If we fail to resolve, sharing is not enabled.
455 # not available. If we fail to resolve, sharing is not enabled.
448 try:
456 try:
449 rootnode = srcpeer.lookup('0')
457 rootnode = srcpeer.lookup('0')
450 if rootnode != node.nullid:
458 if rootnode != node.nullid:
451 sharepath = os.path.join(sharepool, node.hex(rootnode))
459 sharepath = os.path.join(sharepool, node.hex(rootnode))
452 else:
460 else:
453 ui.status(_('(not using pooled storage: '
461 ui.status(_('(not using pooled storage: '
454 'remote appears to be empty)\n'))
462 'remote appears to be empty)\n'))
455 except error.RepoLookupError:
463 except error.RepoLookupError:
456 ui.status(_('(not using pooled storage: '
464 ui.status(_('(not using pooled storage: '
457 'unable to resolve identity of remote)\n'))
465 'unable to resolve identity of remote)\n'))
458 elif sharenamemode == 'remote':
466 elif sharenamemode == 'remote':
459 sharepath = os.path.join(sharepool, util.sha1(source).hexdigest())
467 sharepath = os.path.join(sharepool, util.sha1(source).hexdigest())
460 else:
468 else:
461 raise error.Abort('unknown share naming mode: %s' % sharenamemode)
469 raise error.Abort('unknown share naming mode: %s' % sharenamemode)
462
470
463 if sharepath:
471 if sharepath:
464 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
472 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
465 dest, pull=pull, rev=rev, update=update,
473 dest, pull=pull, rev=rev, update=update,
466 stream=stream)
474 stream=stream)
467
475
468 srclock = destlock = cleandir = None
476 srclock = destlock = cleandir = None
469 srcrepo = srcpeer.local()
477 srcrepo = srcpeer.local()
470 try:
478 try:
471 abspath = origsource
479 abspath = origsource
472 if islocal(origsource):
480 if islocal(origsource):
473 abspath = os.path.abspath(util.urllocalpath(origsource))
481 abspath = os.path.abspath(util.urllocalpath(origsource))
474
482
475 if islocal(dest):
483 if islocal(dest):
476 cleandir = dest
484 cleandir = dest
477
485
478 copy = False
486 copy = False
479 if (srcrepo and srcrepo.cancopy() and islocal(dest)
487 if (srcrepo and srcrepo.cancopy() and islocal(dest)
480 and not phases.hassecret(srcrepo)):
488 and not phases.hassecret(srcrepo)):
481 copy = not pull and not rev
489 copy = not pull and not rev
482
490
483 if copy:
491 if copy:
484 try:
492 try:
485 # we use a lock here because if we race with commit, we
493 # we use a lock here because if we race with commit, we
486 # can end up with extra data in the cloned revlogs that's
494 # can end up with extra data in the cloned revlogs that's
487 # not pointed to by changesets, thus causing verify to
495 # not pointed to by changesets, thus causing verify to
488 # fail
496 # fail
489 srclock = srcrepo.lock(wait=False)
497 srclock = srcrepo.lock(wait=False)
490 except error.LockError:
498 except error.LockError:
491 copy = False
499 copy = False
492
500
493 if copy:
501 if copy:
494 srcrepo.hook('preoutgoing', throw=True, source='clone')
502 srcrepo.hook('preoutgoing', throw=True, source='clone')
495 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
503 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
496 if not os.path.exists(dest):
504 if not os.path.exists(dest):
497 os.mkdir(dest)
505 os.mkdir(dest)
498 else:
506 else:
499 # only clean up directories we create ourselves
507 # only clean up directories we create ourselves
500 cleandir = hgdir
508 cleandir = hgdir
501 try:
509 try:
502 destpath = hgdir
510 destpath = hgdir
503 util.makedir(destpath, notindexed=True)
511 util.makedir(destpath, notindexed=True)
504 except OSError as inst:
512 except OSError as inst:
505 if inst.errno == errno.EEXIST:
513 if inst.errno == errno.EEXIST:
506 cleandir = None
514 cleandir = None
507 raise error.Abort(_("destination '%s' already exists")
515 raise error.Abort(_("destination '%s' already exists")
508 % dest)
516 % dest)
509 raise
517 raise
510
518
511 destlock = copystore(ui, srcrepo, destpath)
519 destlock = copystore(ui, srcrepo, destpath)
512 # copy bookmarks over
520 # copy bookmarks over
513 srcbookmarks = srcrepo.join('bookmarks')
521 srcbookmarks = srcrepo.join('bookmarks')
514 dstbookmarks = os.path.join(destpath, 'bookmarks')
522 dstbookmarks = os.path.join(destpath, 'bookmarks')
515 if os.path.exists(srcbookmarks):
523 if os.path.exists(srcbookmarks):
516 util.copyfile(srcbookmarks, dstbookmarks)
524 util.copyfile(srcbookmarks, dstbookmarks)
517
525
518 # Recomputing branch cache might be slow on big repos,
526 # Recomputing branch cache might be slow on big repos,
519 # so just copy it
527 # so just copy it
520 def copybranchcache(fname):
528 def copybranchcache(fname):
521 srcbranchcache = srcrepo.join('cache/%s' % fname)
529 srcbranchcache = srcrepo.join('cache/%s' % fname)
522 dstbranchcache = os.path.join(dstcachedir, fname)
530 dstbranchcache = os.path.join(dstcachedir, fname)
523 if os.path.exists(srcbranchcache):
531 if os.path.exists(srcbranchcache):
524 if not os.path.exists(dstcachedir):
532 if not os.path.exists(dstcachedir):
525 os.mkdir(dstcachedir)
533 os.mkdir(dstcachedir)
526 util.copyfile(srcbranchcache, dstbranchcache)
534 util.copyfile(srcbranchcache, dstbranchcache)
527
535
528 dstcachedir = os.path.join(destpath, 'cache')
536 dstcachedir = os.path.join(destpath, 'cache')
529 # In local clones we're copying all nodes, not just served
537 # In local clones we're copying all nodes, not just served
530 # ones. Therefore copy all branch caches over.
538 # ones. Therefore copy all branch caches over.
531 copybranchcache('branch2')
539 copybranchcache('branch2')
532 for cachename in repoview.filtertable:
540 for cachename in repoview.filtertable:
533 copybranchcache('branch2-%s' % cachename)
541 copybranchcache('branch2-%s' % cachename)
534
542
535 # we need to re-init the repo after manually copying the data
543 # we need to re-init the repo after manually copying the data
536 # into it
544 # into it
537 destpeer = peer(srcrepo, peeropts, dest)
545 destpeer = peer(srcrepo, peeropts, dest)
538 srcrepo.hook('outgoing', source='clone',
546 srcrepo.hook('outgoing', source='clone',
539 node=node.hex(node.nullid))
547 node=node.hex(node.nullid))
540 else:
548 else:
541 try:
549 try:
542 destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
550 destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
543 # only pass ui when no srcrepo
551 # only pass ui when no srcrepo
544 except OSError as inst:
552 except OSError as inst:
545 if inst.errno == errno.EEXIST:
553 if inst.errno == errno.EEXIST:
546 cleandir = None
554 cleandir = None
547 raise error.Abort(_("destination '%s' already exists")
555 raise error.Abort(_("destination '%s' already exists")
548 % dest)
556 % dest)
549 raise
557 raise
550
558
551 revs = None
559 revs = None
552 if rev:
560 if rev:
553 if not srcpeer.capable('lookup'):
561 if not srcpeer.capable('lookup'):
554 raise error.Abort(_("src repository does not support "
562 raise error.Abort(_("src repository does not support "
555 "revision lookup and so doesn't "
563 "revision lookup and so doesn't "
556 "support clone by revision"))
564 "support clone by revision"))
557 revs = [srcpeer.lookup(r) for r in rev]
565 revs = [srcpeer.lookup(r) for r in rev]
558 checkout = revs[0]
566 checkout = revs[0]
559 local = destpeer.local()
567 local = destpeer.local()
560 if local:
568 if local:
561 if not stream:
569 if not stream:
562 if pull:
570 if pull:
563 stream = False
571 stream = False
564 else:
572 else:
565 stream = None
573 stream = None
566 # internal config: ui.quietbookmarkmove
574 # internal config: ui.quietbookmarkmove
567 quiet = local.ui.backupconfig('ui', 'quietbookmarkmove')
575 quiet = local.ui.backupconfig('ui', 'quietbookmarkmove')
568 try:
576 try:
569 local.ui.setconfig(
577 local.ui.setconfig(
570 'ui', 'quietbookmarkmove', True, 'clone')
578 'ui', 'quietbookmarkmove', True, 'clone')
571 exchange.pull(local, srcpeer, revs,
579 exchange.pull(local, srcpeer, revs,
572 streamclonerequested=stream)
580 streamclonerequested=stream)
573 finally:
581 finally:
574 local.ui.restoreconfig(quiet)
582 local.ui.restoreconfig(quiet)
575 elif srcrepo:
583 elif srcrepo:
576 exchange.push(srcrepo, destpeer, revs=revs,
584 exchange.push(srcrepo, destpeer, revs=revs,
577 bookmarks=srcrepo._bookmarks.keys())
585 bookmarks=srcrepo._bookmarks.keys())
578 else:
586 else:
579 raise error.Abort(_("clone from remote to remote not supported")
587 raise error.Abort(_("clone from remote to remote not supported")
580 )
588 )
581
589
582 cleandir = None
590 cleandir = None
583
591
584 destrepo = destpeer.local()
592 destrepo = destpeer.local()
585 if destrepo:
593 if destrepo:
586 template = uimod.samplehgrcs['cloned']
594 template = uimod.samplehgrcs['cloned']
587 fp = destrepo.vfs("hgrc", "w", text=True)
595 fp = destrepo.vfs("hgrc", "w", text=True)
588 u = util.url(abspath)
596 u = util.url(abspath)
589 u.passwd = None
597 u.passwd = None
590 defaulturl = str(u)
598 defaulturl = str(u)
591 fp.write(template % defaulturl)
599 fp.write(template % defaulturl)
592 fp.close()
600 fp.close()
593
601
594 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
602 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
595
603
596 if update:
604 if update:
597 if update is not True:
605 if update is not True:
598 checkout = srcpeer.lookup(update)
606 checkout = srcpeer.lookup(update)
599 uprev = None
607 uprev = None
600 status = None
608 status = None
601 if checkout is not None:
609 if checkout is not None:
602 try:
610 try:
603 uprev = destrepo.lookup(checkout)
611 uprev = destrepo.lookup(checkout)
604 except error.RepoLookupError:
612 except error.RepoLookupError:
605 if update is not True:
613 if update is not True:
606 try:
614 try:
607 uprev = destrepo.lookup(update)
615 uprev = destrepo.lookup(update)
608 except error.RepoLookupError:
616 except error.RepoLookupError:
609 pass
617 pass
610 if uprev is None:
618 if uprev is None:
611 try:
619 try:
612 uprev = destrepo._bookmarks['@']
620 uprev = destrepo._bookmarks['@']
613 update = '@'
621 update = '@'
614 bn = destrepo[uprev].branch()
622 bn = destrepo[uprev].branch()
615 if bn == 'default':
623 if bn == 'default':
616 status = _("updating to bookmark @\n")
624 status = _("updating to bookmark @\n")
617 else:
625 else:
618 status = (_("updating to bookmark @ on branch %s\n")
626 status = (_("updating to bookmark @ on branch %s\n")
619 % bn)
627 % bn)
620 except KeyError:
628 except KeyError:
621 try:
629 try:
622 uprev = destrepo.branchtip('default')
630 uprev = destrepo.branchtip('default')
623 except error.RepoLookupError:
631 except error.RepoLookupError:
624 uprev = destrepo.lookup('tip')
632 uprev = destrepo.lookup('tip')
625 if not status:
633 if not status:
626 bn = destrepo[uprev].branch()
634 bn = destrepo[uprev].branch()
627 status = _("updating to branch %s\n") % bn
635 status = _("updating to branch %s\n") % bn
628 destrepo.ui.status(status)
636 destrepo.ui.status(status)
629 _update(destrepo, uprev)
637 _update(destrepo, uprev)
630 if update in destrepo._bookmarks:
638 if update in destrepo._bookmarks:
631 bookmarks.activate(destrepo, update)
639 bookmarks.activate(destrepo, update)
632 finally:
640 finally:
633 release(srclock, destlock)
641 release(srclock, destlock)
634 if cleandir is not None:
642 if cleandir is not None:
635 shutil.rmtree(cleandir, True)
643 shutil.rmtree(cleandir, True)
636 if srcpeer is not None:
644 if srcpeer is not None:
637 srcpeer.close()
645 srcpeer.close()
638 return srcpeer, destpeer
646 return srcpeer, destpeer
639
647
640 def _showstats(repo, stats, quietempty=False):
648 def _showstats(repo, stats, quietempty=False):
641 if quietempty and not any(stats):
649 if quietempty and not any(stats):
642 return
650 return
643 repo.ui.status(_("%d files updated, %d files merged, "
651 repo.ui.status(_("%d files updated, %d files merged, "
644 "%d files removed, %d files unresolved\n") % stats)
652 "%d files removed, %d files unresolved\n") % stats)
645
653
646 def updaterepo(repo, node, overwrite):
654 def updaterepo(repo, node, overwrite):
647 """Update the working directory to node.
655 """Update the working directory to node.
648
656
649 When overwrite is set, changes are clobbered, merged else
657 When overwrite is set, changes are clobbered, merged else
650
658
651 returns stats (see pydoc mercurial.merge.applyupdates)"""
659 returns stats (see pydoc mercurial.merge.applyupdates)"""
652 return mergemod.update(repo, node, False, overwrite,
660 return mergemod.update(repo, node, False, overwrite,
653 labels=['working copy', 'destination'])
661 labels=['working copy', 'destination'])
654
662
655 def update(repo, node, quietempty=False):
663 def update(repo, node, quietempty=False):
656 """update the working directory to node, merging linear changes"""
664 """update the working directory to node, merging linear changes"""
657 stats = updaterepo(repo, node, False)
665 stats = updaterepo(repo, node, False)
658 _showstats(repo, stats, quietempty)
666 _showstats(repo, stats, quietempty)
659 if stats[3]:
667 if stats[3]:
660 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
668 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
661 return stats[3] > 0
669 return stats[3] > 0
662
670
663 # naming conflict in clone()
671 # naming conflict in clone()
664 _update = update
672 _update = update
665
673
666 def clean(repo, node, show_stats=True, quietempty=False):
674 def clean(repo, node, show_stats=True, quietempty=False):
667 """forcibly switch the working directory to node, clobbering changes"""
675 """forcibly switch the working directory to node, clobbering changes"""
668 stats = updaterepo(repo, node, True)
676 stats = updaterepo(repo, node, True)
669 util.unlinkpath(repo.join('graftstate'), ignoremissing=True)
677 util.unlinkpath(repo.join('graftstate'), ignoremissing=True)
670 if show_stats:
678 if show_stats:
671 _showstats(repo, stats, quietempty)
679 _showstats(repo, stats, quietempty)
672 return stats[3] > 0
680 return stats[3] > 0
673
681
674 def merge(repo, node, force=None, remind=True, mergeforce=False):
682 def merge(repo, node, force=None, remind=True, mergeforce=False):
675 """Branch merge with node, resolving changes. Return true if any
683 """Branch merge with node, resolving changes. Return true if any
676 unresolved conflicts."""
684 unresolved conflicts."""
677 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce)
685 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce)
678 _showstats(repo, stats)
686 _showstats(repo, stats)
679 if stats[3]:
687 if stats[3]:
680 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
688 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
681 "or 'hg update -C .' to abandon\n"))
689 "or 'hg update -C .' to abandon\n"))
682 elif remind:
690 elif remind:
683 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
691 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
684 return stats[3] > 0
692 return stats[3] > 0
685
693
686 def _incoming(displaychlist, subreporecurse, ui, repo, source,
694 def _incoming(displaychlist, subreporecurse, ui, repo, source,
687 opts, buffered=False):
695 opts, buffered=False):
688 """
696 """
689 Helper for incoming / gincoming.
697 Helper for incoming / gincoming.
690 displaychlist gets called with
698 displaychlist gets called with
691 (remoterepo, incomingchangesetlist, displayer) parameters,
699 (remoterepo, incomingchangesetlist, displayer) parameters,
692 and is supposed to contain only code that can't be unified.
700 and is supposed to contain only code that can't be unified.
693 """
701 """
694 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
702 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
695 other = peer(repo, opts, source)
703 other = peer(repo, opts, source)
696 ui.status(_('comparing with %s\n') % util.hidepassword(source))
704 ui.status(_('comparing with %s\n') % util.hidepassword(source))
697 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
705 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
698
706
699 if revs:
707 if revs:
700 revs = [other.lookup(rev) for rev in revs]
708 revs = [other.lookup(rev) for rev in revs]
701 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
709 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
702 revs, opts["bundle"], opts["force"])
710 revs, opts["bundle"], opts["force"])
703 try:
711 try:
704 if not chlist:
712 if not chlist:
705 ui.status(_("no changes found\n"))
713 ui.status(_("no changes found\n"))
706 return subreporecurse()
714 return subreporecurse()
707
715
708 displayer = cmdutil.show_changeset(ui, other, opts, buffered)
716 displayer = cmdutil.show_changeset(ui, other, opts, buffered)
709 displaychlist(other, chlist, displayer)
717 displaychlist(other, chlist, displayer)
710 displayer.close()
718 displayer.close()
711 finally:
719 finally:
712 cleanupfn()
720 cleanupfn()
713 subreporecurse()
721 subreporecurse()
714 return 0 # exit code is zero since we found incoming changes
722 return 0 # exit code is zero since we found incoming changes
715
723
716 def incoming(ui, repo, source, opts):
724 def incoming(ui, repo, source, opts):
717 def subreporecurse():
725 def subreporecurse():
718 ret = 1
726 ret = 1
719 if opts.get('subrepos'):
727 if opts.get('subrepos'):
720 ctx = repo[None]
728 ctx = repo[None]
721 for subpath in sorted(ctx.substate):
729 for subpath in sorted(ctx.substate):
722 sub = ctx.sub(subpath)
730 sub = ctx.sub(subpath)
723 ret = min(ret, sub.incoming(ui, source, opts))
731 ret = min(ret, sub.incoming(ui, source, opts))
724 return ret
732 return ret
725
733
726 def display(other, chlist, displayer):
734 def display(other, chlist, displayer):
727 limit = cmdutil.loglimit(opts)
735 limit = cmdutil.loglimit(opts)
728 if opts.get('newest_first'):
736 if opts.get('newest_first'):
729 chlist.reverse()
737 chlist.reverse()
730 count = 0
738 count = 0
731 for n in chlist:
739 for n in chlist:
732 if limit is not None and count >= limit:
740 if limit is not None and count >= limit:
733 break
741 break
734 parents = [p for p in other.changelog.parents(n) if p != nullid]
742 parents = [p for p in other.changelog.parents(n) if p != nullid]
735 if opts.get('no_merges') and len(parents) == 2:
743 if opts.get('no_merges') and len(parents) == 2:
736 continue
744 continue
737 count += 1
745 count += 1
738 displayer.show(other[n])
746 displayer.show(other[n])
739 return _incoming(display, subreporecurse, ui, repo, source, opts)
747 return _incoming(display, subreporecurse, ui, repo, source, opts)
740
748
741 def _outgoing(ui, repo, dest, opts):
749 def _outgoing(ui, repo, dest, opts):
742 dest = ui.expandpath(dest or 'default-push', dest or 'default')
750 dest = ui.expandpath(dest or 'default-push', dest or 'default')
743 dest, branches = parseurl(dest, opts.get('branch'))
751 dest, branches = parseurl(dest, opts.get('branch'))
744 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
752 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
745 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
753 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
746 if revs:
754 if revs:
747 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
755 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
748
756
749 other = peer(repo, opts, dest)
757 other = peer(repo, opts, dest)
750 outgoing = discovery.findcommonoutgoing(repo.unfiltered(), other, revs,
758 outgoing = discovery.findcommonoutgoing(repo.unfiltered(), other, revs,
751 force=opts.get('force'))
759 force=opts.get('force'))
752 o = outgoing.missing
760 o = outgoing.missing
753 if not o:
761 if not o:
754 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
762 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
755 return o, other
763 return o, other
756
764
757 def outgoing(ui, repo, dest, opts):
765 def outgoing(ui, repo, dest, opts):
758 def recurse():
766 def recurse():
759 ret = 1
767 ret = 1
760 if opts.get('subrepos'):
768 if opts.get('subrepos'):
761 ctx = repo[None]
769 ctx = repo[None]
762 for subpath in sorted(ctx.substate):
770 for subpath in sorted(ctx.substate):
763 sub = ctx.sub(subpath)
771 sub = ctx.sub(subpath)
764 ret = min(ret, sub.outgoing(ui, dest, opts))
772 ret = min(ret, sub.outgoing(ui, dest, opts))
765 return ret
773 return ret
766
774
767 limit = cmdutil.loglimit(opts)
775 limit = cmdutil.loglimit(opts)
768 o, other = _outgoing(ui, repo, dest, opts)
776 o, other = _outgoing(ui, repo, dest, opts)
769 if not o:
777 if not o:
770 cmdutil.outgoinghooks(ui, repo, other, opts, o)
778 cmdutil.outgoinghooks(ui, repo, other, opts, o)
771 return recurse()
779 return recurse()
772
780
773 if opts.get('newest_first'):
781 if opts.get('newest_first'):
774 o.reverse()
782 o.reverse()
775 displayer = cmdutil.show_changeset(ui, repo, opts)
783 displayer = cmdutil.show_changeset(ui, repo, opts)
776 count = 0
784 count = 0
777 for n in o:
785 for n in o:
778 if limit is not None and count >= limit:
786 if limit is not None and count >= limit:
779 break
787 break
780 parents = [p for p in repo.changelog.parents(n) if p != nullid]
788 parents = [p for p in repo.changelog.parents(n) if p != nullid]
781 if opts.get('no_merges') and len(parents) == 2:
789 if opts.get('no_merges') and len(parents) == 2:
782 continue
790 continue
783 count += 1
791 count += 1
784 displayer.show(repo[n])
792 displayer.show(repo[n])
785 displayer.close()
793 displayer.close()
786 cmdutil.outgoinghooks(ui, repo, other, opts, o)
794 cmdutil.outgoinghooks(ui, repo, other, opts, o)
787 recurse()
795 recurse()
788 return 0 # exit code is zero since we found outgoing changes
796 return 0 # exit code is zero since we found outgoing changes
789
797
790 def verify(repo):
798 def verify(repo):
791 """verify the consistency of a repository"""
799 """verify the consistency of a repository"""
792 ret = verifymod.verify(repo)
800 ret = verifymod.verify(repo)
793
801
794 # Broken subrepo references in hidden csets don't seem worth worrying about,
802 # Broken subrepo references in hidden csets don't seem worth worrying about,
795 # since they can't be pushed/pulled, and --hidden can be used if they are a
803 # since they can't be pushed/pulled, and --hidden can be used if they are a
796 # concern.
804 # concern.
797
805
798 # pathto() is needed for -R case
806 # pathto() is needed for -R case
799 revs = repo.revs("filelog(%s)",
807 revs = repo.revs("filelog(%s)",
800 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
808 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
801
809
802 if revs:
810 if revs:
803 repo.ui.status(_('checking subrepo links\n'))
811 repo.ui.status(_('checking subrepo links\n'))
804 for rev in revs:
812 for rev in revs:
805 ctx = repo[rev]
813 ctx = repo[rev]
806 try:
814 try:
807 for subpath in ctx.substate:
815 for subpath in ctx.substate:
808 ret = ctx.sub(subpath).verify() or ret
816 ret = ctx.sub(subpath).verify() or ret
809 except Exception:
817 except Exception:
810 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
818 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
811 node.short(ctx.node()))
819 node.short(ctx.node()))
812
820
813 return ret
821 return ret
814
822
815 def remoteui(src, opts):
823 def remoteui(src, opts):
816 'build a remote ui from ui or repo and opts'
824 'build a remote ui from ui or repo and opts'
817 if util.safehasattr(src, 'baseui'): # looks like a repository
825 if util.safehasattr(src, 'baseui'): # looks like a repository
818 dst = src.baseui.copy() # drop repo-specific config
826 dst = src.baseui.copy() # drop repo-specific config
819 src = src.ui # copy target options from repo
827 src = src.ui # copy target options from repo
820 else: # assume it's a global ui object
828 else: # assume it's a global ui object
821 dst = src.copy() # keep all global options
829 dst = src.copy() # keep all global options
822
830
823 # copy ssh-specific options
831 # copy ssh-specific options
824 for o in 'ssh', 'remotecmd':
832 for o in 'ssh', 'remotecmd':
825 v = opts.get(o) or src.config('ui', o)
833 v = opts.get(o) or src.config('ui', o)
826 if v:
834 if v:
827 dst.setconfig("ui", o, v, 'copied')
835 dst.setconfig("ui", o, v, 'copied')
828
836
829 # copy bundle-specific options
837 # copy bundle-specific options
830 r = src.config('bundle', 'mainreporoot')
838 r = src.config('bundle', 'mainreporoot')
831 if r:
839 if r:
832 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
840 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
833
841
834 # copy selected local settings to the remote ui
842 # copy selected local settings to the remote ui
835 for sect in ('auth', 'hostfingerprints', 'http_proxy'):
843 for sect in ('auth', 'hostfingerprints', 'http_proxy'):
836 for key, val in src.configitems(sect):
844 for key, val in src.configitems(sect):
837 dst.setconfig(sect, key, val, 'copied')
845 dst.setconfig(sect, key, val, 'copied')
838 v = src.config('web', 'cacerts')
846 v = src.config('web', 'cacerts')
839 if v == '!':
847 if v == '!':
840 dst.setconfig('web', 'cacerts', v, 'copied')
848 dst.setconfig('web', 'cacerts', v, 'copied')
841 elif v:
849 elif v:
842 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
850 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
843
851
844 return dst
852 return dst
845
853
846 # Files of interest
854 # Files of interest
847 # Used to check if the repository has changed looking at mtime and size of
855 # Used to check if the repository has changed looking at mtime and size of
848 # these files.
856 # these files.
849 foi = [('spath', '00changelog.i'),
857 foi = [('spath', '00changelog.i'),
850 ('spath', 'phaseroots'), # ! phase can change content at the same size
858 ('spath', 'phaseroots'), # ! phase can change content at the same size
851 ('spath', 'obsstore'),
859 ('spath', 'obsstore'),
852 ('path', 'bookmarks'), # ! bookmark can change content at the same size
860 ('path', 'bookmarks'), # ! bookmark can change content at the same size
853 ]
861 ]
854
862
855 class cachedlocalrepo(object):
863 class cachedlocalrepo(object):
856 """Holds a localrepository that can be cached and reused."""
864 """Holds a localrepository that can be cached and reused."""
857
865
858 def __init__(self, repo):
866 def __init__(self, repo):
859 """Create a new cached repo from an existing repo.
867 """Create a new cached repo from an existing repo.
860
868
861 We assume the passed in repo was recently created. If the
869 We assume the passed in repo was recently created. If the
862 repo has changed between when it was created and when it was
870 repo has changed between when it was created and when it was
863 turned into a cache, it may not refresh properly.
871 turned into a cache, it may not refresh properly.
864 """
872 """
865 assert isinstance(repo, localrepo.localrepository)
873 assert isinstance(repo, localrepo.localrepository)
866 self._repo = repo
874 self._repo = repo
867 self._state, self.mtime = self._repostate()
875 self._state, self.mtime = self._repostate()
868 self._filtername = repo.filtername
876 self._filtername = repo.filtername
869
877
870 def fetch(self):
878 def fetch(self):
871 """Refresh (if necessary) and return a repository.
879 """Refresh (if necessary) and return a repository.
872
880
873 If the cached instance is out of date, it will be recreated
881 If the cached instance is out of date, it will be recreated
874 automatically and returned.
882 automatically and returned.
875
883
876 Returns a tuple of the repo and a boolean indicating whether a new
884 Returns a tuple of the repo and a boolean indicating whether a new
877 repo instance was created.
885 repo instance was created.
878 """
886 """
879 # We compare the mtimes and sizes of some well-known files to
887 # We compare the mtimes and sizes of some well-known files to
880 # determine if the repo changed. This is not precise, as mtimes
888 # determine if the repo changed. This is not precise, as mtimes
881 # are susceptible to clock skew and imprecise filesystems and
889 # are susceptible to clock skew and imprecise filesystems and
882 # file content can change while maintaining the same size.
890 # file content can change while maintaining the same size.
883
891
884 state, mtime = self._repostate()
892 state, mtime = self._repostate()
885 if state == self._state:
893 if state == self._state:
886 return self._repo, False
894 return self._repo, False
887
895
888 repo = repository(self._repo.baseui, self._repo.url())
896 repo = repository(self._repo.baseui, self._repo.url())
889 if self._filtername:
897 if self._filtername:
890 self._repo = repo.filtered(self._filtername)
898 self._repo = repo.filtered(self._filtername)
891 else:
899 else:
892 self._repo = repo.unfiltered()
900 self._repo = repo.unfiltered()
893 self._state = state
901 self._state = state
894 self.mtime = mtime
902 self.mtime = mtime
895
903
896 return self._repo, True
904 return self._repo, True
897
905
898 def _repostate(self):
906 def _repostate(self):
899 state = []
907 state = []
900 maxmtime = -1
908 maxmtime = -1
901 for attr, fname in foi:
909 for attr, fname in foi:
902 prefix = getattr(self._repo, attr)
910 prefix = getattr(self._repo, attr)
903 p = os.path.join(prefix, fname)
911 p = os.path.join(prefix, fname)
904 try:
912 try:
905 st = os.stat(p)
913 st = os.stat(p)
906 except OSError:
914 except OSError:
907 st = os.stat(prefix)
915 st = os.stat(prefix)
908 state.append((st.st_mtime, st.st_size))
916 state.append((st.st_mtime, st.st_size))
909 maxmtime = max(maxmtime, st.st_mtime)
917 maxmtime = max(maxmtime, st.st_mtime)
910
918
911 return tuple(state), maxmtime
919 return tuple(state), maxmtime
912
920
913 def copy(self):
921 def copy(self):
914 """Obtain a copy of this class instance.
922 """Obtain a copy of this class instance.
915
923
916 A new localrepository instance is obtained. The new instance should be
924 A new localrepository instance is obtained. The new instance should be
917 completely independent of the original.
925 completely independent of the original.
918 """
926 """
919 repo = repository(self._repo.baseui, self._repo.origroot)
927 repo = repository(self._repo.baseui, self._repo.origroot)
920 if self._filtername:
928 if self._filtername:
921 repo = repo.filtered(self._filtername)
929 repo = repo.filtered(self._filtername)
922 else:
930 else:
923 repo = repo.unfiltered()
931 repo = repo.unfiltered()
924 c = cachedlocalrepo(repo)
932 c = cachedlocalrepo(repo)
925 c._state = self._state
933 c._state = self._state
926 c.mtime = self.mtime
934 c.mtime = self.mtime
927 return c
935 return c
General Comments 0
You need to be logged in to leave comments. Login now