##// END OF EJS Templates
local-clone: extract the listing of caches to copy...
marmoute -
r32493:3c8a71a8 default
parent child Browse files
Show More
@@ -1,1055 +1,1060 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13 import os
13 import os
14 import shutil
14 import shutil
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import nullid
17 from .node import nullid
18
18
19 from . import (
19 from . import (
20 bookmarks,
20 bookmarks,
21 bundlerepo,
21 bundlerepo,
22 cmdutil,
22 cmdutil,
23 destutil,
23 destutil,
24 discovery,
24 discovery,
25 error,
25 error,
26 exchange,
26 exchange,
27 extensions,
27 extensions,
28 httppeer,
28 httppeer,
29 localrepo,
29 localrepo,
30 lock,
30 lock,
31 merge as mergemod,
31 merge as mergemod,
32 node,
32 node,
33 phases,
33 phases,
34 repoview,
34 repoview,
35 scmutil,
35 scmutil,
36 sshpeer,
36 sshpeer,
37 statichttprepo,
37 statichttprepo,
38 ui as uimod,
38 ui as uimod,
39 unionrepo,
39 unionrepo,
40 url,
40 url,
41 util,
41 util,
42 verify as verifymod,
42 verify as verifymod,
43 vfs as vfsmod,
43 vfs as vfsmod,
44 )
44 )
45
45
46 release = lock.release
46 release = lock.release
47
47
48 # shared features
48 # shared features
49 sharedbookmarks = 'bookmarks'
49 sharedbookmarks = 'bookmarks'
50
50
51 def _local(path):
51 def _local(path):
52 path = util.expandpath(util.urllocalpath(path))
52 path = util.expandpath(util.urllocalpath(path))
53 return (os.path.isfile(path) and bundlerepo or localrepo)
53 return (os.path.isfile(path) and bundlerepo or localrepo)
54
54
55 def addbranchrevs(lrepo, other, branches, revs):
55 def addbranchrevs(lrepo, other, branches, revs):
56 peer = other.peer() # a courtesy to callers using a localrepo for other
56 peer = other.peer() # a courtesy to callers using a localrepo for other
57 hashbranch, branches = branches
57 hashbranch, branches = branches
58 if not hashbranch and not branches:
58 if not hashbranch and not branches:
59 x = revs or None
59 x = revs or None
60 if util.safehasattr(revs, 'first'):
60 if util.safehasattr(revs, 'first'):
61 y = revs.first()
61 y = revs.first()
62 elif revs:
62 elif revs:
63 y = revs[0]
63 y = revs[0]
64 else:
64 else:
65 y = None
65 y = None
66 return x, y
66 return x, y
67 if revs:
67 if revs:
68 revs = list(revs)
68 revs = list(revs)
69 else:
69 else:
70 revs = []
70 revs = []
71
71
72 if not peer.capable('branchmap'):
72 if not peer.capable('branchmap'):
73 if branches:
73 if branches:
74 raise error.Abort(_("remote branch lookup not supported"))
74 raise error.Abort(_("remote branch lookup not supported"))
75 revs.append(hashbranch)
75 revs.append(hashbranch)
76 return revs, revs[0]
76 return revs, revs[0]
77 branchmap = peer.branchmap()
77 branchmap = peer.branchmap()
78
78
79 def primary(branch):
79 def primary(branch):
80 if branch == '.':
80 if branch == '.':
81 if not lrepo:
81 if not lrepo:
82 raise error.Abort(_("dirstate branch not accessible"))
82 raise error.Abort(_("dirstate branch not accessible"))
83 branch = lrepo.dirstate.branch()
83 branch = lrepo.dirstate.branch()
84 if branch in branchmap:
84 if branch in branchmap:
85 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
85 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
86 return True
86 return True
87 else:
87 else:
88 return False
88 return False
89
89
90 for branch in branches:
90 for branch in branches:
91 if not primary(branch):
91 if not primary(branch):
92 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
92 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
93 if hashbranch:
93 if hashbranch:
94 if not primary(hashbranch):
94 if not primary(hashbranch):
95 revs.append(hashbranch)
95 revs.append(hashbranch)
96 return revs, revs[0]
96 return revs, revs[0]
97
97
98 def parseurl(path, branches=None):
98 def parseurl(path, branches=None):
99 '''parse url#branch, returning (url, (branch, branches))'''
99 '''parse url#branch, returning (url, (branch, branches))'''
100
100
101 u = util.url(path)
101 u = util.url(path)
102 branch = None
102 branch = None
103 if u.fragment:
103 if u.fragment:
104 branch = u.fragment
104 branch = u.fragment
105 u.fragment = None
105 u.fragment = None
106 return bytes(u), (branch, branches or [])
106 return bytes(u), (branch, branches or [])
107
107
108 schemes = {
108 schemes = {
109 'bundle': bundlerepo,
109 'bundle': bundlerepo,
110 'union': unionrepo,
110 'union': unionrepo,
111 'file': _local,
111 'file': _local,
112 'http': httppeer,
112 'http': httppeer,
113 'https': httppeer,
113 'https': httppeer,
114 'ssh': sshpeer,
114 'ssh': sshpeer,
115 'static-http': statichttprepo,
115 'static-http': statichttprepo,
116 }
116 }
117
117
118 def _peerlookup(path):
118 def _peerlookup(path):
119 u = util.url(path)
119 u = util.url(path)
120 scheme = u.scheme or 'file'
120 scheme = u.scheme or 'file'
121 thing = schemes.get(scheme) or schemes['file']
121 thing = schemes.get(scheme) or schemes['file']
122 try:
122 try:
123 return thing(path)
123 return thing(path)
124 except TypeError:
124 except TypeError:
125 # we can't test callable(thing) because 'thing' can be an unloaded
125 # we can't test callable(thing) because 'thing' can be an unloaded
126 # module that implements __call__
126 # module that implements __call__
127 if not util.safehasattr(thing, 'instance'):
127 if not util.safehasattr(thing, 'instance'):
128 raise
128 raise
129 return thing
129 return thing
130
130
131 def islocal(repo):
131 def islocal(repo):
132 '''return true if repo (or path pointing to repo) is local'''
132 '''return true if repo (or path pointing to repo) is local'''
133 if isinstance(repo, str):
133 if isinstance(repo, str):
134 try:
134 try:
135 return _peerlookup(repo).islocal(repo)
135 return _peerlookup(repo).islocal(repo)
136 except AttributeError:
136 except AttributeError:
137 return False
137 return False
138 return repo.local()
138 return repo.local()
139
139
140 def openpath(ui, path):
140 def openpath(ui, path):
141 '''open path with open if local, url.open if remote'''
141 '''open path with open if local, url.open if remote'''
142 pathurl = util.url(path, parsequery=False, parsefragment=False)
142 pathurl = util.url(path, parsequery=False, parsefragment=False)
143 if pathurl.islocal():
143 if pathurl.islocal():
144 return util.posixfile(pathurl.localpath(), 'rb')
144 return util.posixfile(pathurl.localpath(), 'rb')
145 else:
145 else:
146 return url.open(ui, path)
146 return url.open(ui, path)
147
147
148 # a list of (ui, repo) functions called for wire peer initialization
148 # a list of (ui, repo) functions called for wire peer initialization
149 wirepeersetupfuncs = []
149 wirepeersetupfuncs = []
150
150
151 def _peerorrepo(ui, path, create=False, presetupfuncs=None):
151 def _peerorrepo(ui, path, create=False, presetupfuncs=None):
152 """return a repository object for the specified path"""
152 """return a repository object for the specified path"""
153 obj = _peerlookup(path).instance(ui, path, create)
153 obj = _peerlookup(path).instance(ui, path, create)
154 ui = getattr(obj, "ui", ui)
154 ui = getattr(obj, "ui", ui)
155 for f in presetupfuncs or []:
155 for f in presetupfuncs or []:
156 f(ui, obj)
156 f(ui, obj)
157 for name, module in extensions.extensions(ui):
157 for name, module in extensions.extensions(ui):
158 hook = getattr(module, 'reposetup', None)
158 hook = getattr(module, 'reposetup', None)
159 if hook:
159 if hook:
160 hook(ui, obj)
160 hook(ui, obj)
161 if not obj.local():
161 if not obj.local():
162 for f in wirepeersetupfuncs:
162 for f in wirepeersetupfuncs:
163 f(ui, obj)
163 f(ui, obj)
164 return obj
164 return obj
165
165
166 def repository(ui, path='', create=False, presetupfuncs=None):
166 def repository(ui, path='', create=False, presetupfuncs=None):
167 """return a repository object for the specified path"""
167 """return a repository object for the specified path"""
168 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs)
168 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs)
169 repo = peer.local()
169 repo = peer.local()
170 if not repo:
170 if not repo:
171 raise error.Abort(_("repository '%s' is not local") %
171 raise error.Abort(_("repository '%s' is not local") %
172 (path or peer.url()))
172 (path or peer.url()))
173 return repo.filtered('visible')
173 return repo.filtered('visible')
174
174
175 def peer(uiorrepo, opts, path, create=False):
175 def peer(uiorrepo, opts, path, create=False):
176 '''return a repository peer for the specified path'''
176 '''return a repository peer for the specified path'''
177 rui = remoteui(uiorrepo, opts)
177 rui = remoteui(uiorrepo, opts)
178 return _peerorrepo(rui, path, create).peer()
178 return _peerorrepo(rui, path, create).peer()
179
179
180 def defaultdest(source):
180 def defaultdest(source):
181 '''return default destination of clone if none is given
181 '''return default destination of clone if none is given
182
182
183 >>> defaultdest('foo')
183 >>> defaultdest('foo')
184 'foo'
184 'foo'
185 >>> defaultdest('/foo/bar')
185 >>> defaultdest('/foo/bar')
186 'bar'
186 'bar'
187 >>> defaultdest('/')
187 >>> defaultdest('/')
188 ''
188 ''
189 >>> defaultdest('')
189 >>> defaultdest('')
190 ''
190 ''
191 >>> defaultdest('http://example.org/')
191 >>> defaultdest('http://example.org/')
192 ''
192 ''
193 >>> defaultdest('http://example.org/foo/')
193 >>> defaultdest('http://example.org/foo/')
194 'foo'
194 'foo'
195 '''
195 '''
196 path = util.url(source).path
196 path = util.url(source).path
197 if not path:
197 if not path:
198 return ''
198 return ''
199 return os.path.basename(os.path.normpath(path))
199 return os.path.basename(os.path.normpath(path))
200
200
201 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
201 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
202 relative=False):
202 relative=False):
203 '''create a shared repository'''
203 '''create a shared repository'''
204
204
205 if not islocal(source):
205 if not islocal(source):
206 raise error.Abort(_('can only share local repositories'))
206 raise error.Abort(_('can only share local repositories'))
207
207
208 if not dest:
208 if not dest:
209 dest = defaultdest(source)
209 dest = defaultdest(source)
210 else:
210 else:
211 dest = ui.expandpath(dest)
211 dest = ui.expandpath(dest)
212
212
213 if isinstance(source, str):
213 if isinstance(source, str):
214 origsource = ui.expandpath(source)
214 origsource = ui.expandpath(source)
215 source, branches = parseurl(origsource)
215 source, branches = parseurl(origsource)
216 srcrepo = repository(ui, source)
216 srcrepo = repository(ui, source)
217 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
217 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
218 else:
218 else:
219 srcrepo = source.local()
219 srcrepo = source.local()
220 origsource = source = srcrepo.url()
220 origsource = source = srcrepo.url()
221 checkout = None
221 checkout = None
222
222
223 sharedpath = srcrepo.sharedpath # if our source is already sharing
223 sharedpath = srcrepo.sharedpath # if our source is already sharing
224
224
225 destwvfs = vfsmod.vfs(dest, realpath=True)
225 destwvfs = vfsmod.vfs(dest, realpath=True)
226 destvfs = vfsmod.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
226 destvfs = vfsmod.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
227
227
228 if destvfs.lexists():
228 if destvfs.lexists():
229 raise error.Abort(_('destination already exists'))
229 raise error.Abort(_('destination already exists'))
230
230
231 if not destwvfs.isdir():
231 if not destwvfs.isdir():
232 destwvfs.mkdir()
232 destwvfs.mkdir()
233 destvfs.makedir()
233 destvfs.makedir()
234
234
235 requirements = ''
235 requirements = ''
236 try:
236 try:
237 requirements = srcrepo.vfs.read('requires')
237 requirements = srcrepo.vfs.read('requires')
238 except IOError as inst:
238 except IOError as inst:
239 if inst.errno != errno.ENOENT:
239 if inst.errno != errno.ENOENT:
240 raise
240 raise
241
241
242 if relative:
242 if relative:
243 try:
243 try:
244 sharedpath = os.path.relpath(sharedpath, destvfs.base)
244 sharedpath = os.path.relpath(sharedpath, destvfs.base)
245 requirements += 'relshared\n'
245 requirements += 'relshared\n'
246 except IOError as e:
246 except IOError as e:
247 raise error.Abort(_('cannot calculate relative path'),
247 raise error.Abort(_('cannot calculate relative path'),
248 hint=str(e))
248 hint=str(e))
249 else:
249 else:
250 requirements += 'shared\n'
250 requirements += 'shared\n'
251
251
252 destvfs.write('requires', requirements)
252 destvfs.write('requires', requirements)
253 destvfs.write('sharedpath', sharedpath)
253 destvfs.write('sharedpath', sharedpath)
254
254
255 r = repository(ui, destwvfs.base)
255 r = repository(ui, destwvfs.base)
256 postshare(srcrepo, r, bookmarks=bookmarks, defaultpath=defaultpath)
256 postshare(srcrepo, r, bookmarks=bookmarks, defaultpath=defaultpath)
257 _postshareupdate(r, update, checkout=checkout)
257 _postshareupdate(r, update, checkout=checkout)
258
258
259 def postshare(sourcerepo, destrepo, bookmarks=True, defaultpath=None):
259 def postshare(sourcerepo, destrepo, bookmarks=True, defaultpath=None):
260 """Called after a new shared repo is created.
260 """Called after a new shared repo is created.
261
261
262 The new repo only has a requirements file and pointer to the source.
262 The new repo only has a requirements file and pointer to the source.
263 This function configures additional shared data.
263 This function configures additional shared data.
264
264
265 Extensions can wrap this function and write additional entries to
265 Extensions can wrap this function and write additional entries to
266 destrepo/.hg/shared to indicate additional pieces of data to be shared.
266 destrepo/.hg/shared to indicate additional pieces of data to be shared.
267 """
267 """
268 default = defaultpath or sourcerepo.ui.config('paths', 'default')
268 default = defaultpath or sourcerepo.ui.config('paths', 'default')
269 if default:
269 if default:
270 fp = destrepo.vfs("hgrc", "w", text=True)
270 fp = destrepo.vfs("hgrc", "w", text=True)
271 fp.write("[paths]\n")
271 fp.write("[paths]\n")
272 fp.write("default = %s\n" % default)
272 fp.write("default = %s\n" % default)
273 fp.close()
273 fp.close()
274
274
275 with destrepo.wlock():
275 with destrepo.wlock():
276 if bookmarks:
276 if bookmarks:
277 fp = destrepo.vfs('shared', 'w')
277 fp = destrepo.vfs('shared', 'w')
278 fp.write(sharedbookmarks + '\n')
278 fp.write(sharedbookmarks + '\n')
279 fp.close()
279 fp.close()
280
280
281 def _postshareupdate(repo, update, checkout=None):
281 def _postshareupdate(repo, update, checkout=None):
282 """Maybe perform a working directory update after a shared repo is created.
282 """Maybe perform a working directory update after a shared repo is created.
283
283
284 ``update`` can be a boolean or a revision to update to.
284 ``update`` can be a boolean or a revision to update to.
285 """
285 """
286 if not update:
286 if not update:
287 return
287 return
288
288
289 repo.ui.status(_("updating working directory\n"))
289 repo.ui.status(_("updating working directory\n"))
290 if update is not True:
290 if update is not True:
291 checkout = update
291 checkout = update
292 for test in (checkout, 'default', 'tip'):
292 for test in (checkout, 'default', 'tip'):
293 if test is None:
293 if test is None:
294 continue
294 continue
295 try:
295 try:
296 uprev = repo.lookup(test)
296 uprev = repo.lookup(test)
297 break
297 break
298 except error.RepoLookupError:
298 except error.RepoLookupError:
299 continue
299 continue
300 _update(repo, uprev)
300 _update(repo, uprev)
301
301
302 def copystore(ui, srcrepo, destpath):
302 def copystore(ui, srcrepo, destpath):
303 '''copy files from store of srcrepo in destpath
303 '''copy files from store of srcrepo in destpath
304
304
305 returns destlock
305 returns destlock
306 '''
306 '''
307 destlock = None
307 destlock = None
308 try:
308 try:
309 hardlink = None
309 hardlink = None
310 num = 0
310 num = 0
311 closetopic = [None]
311 closetopic = [None]
312 def prog(topic, pos):
312 def prog(topic, pos):
313 if pos is None:
313 if pos is None:
314 closetopic[0] = topic
314 closetopic[0] = topic
315 else:
315 else:
316 ui.progress(topic, pos + num)
316 ui.progress(topic, pos + num)
317 srcpublishing = srcrepo.publishing()
317 srcpublishing = srcrepo.publishing()
318 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
318 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
319 dstvfs = vfsmod.vfs(destpath)
319 dstvfs = vfsmod.vfs(destpath)
320 for f in srcrepo.store.copylist():
320 for f in srcrepo.store.copylist():
321 if srcpublishing and f.endswith('phaseroots'):
321 if srcpublishing and f.endswith('phaseroots'):
322 continue
322 continue
323 dstbase = os.path.dirname(f)
323 dstbase = os.path.dirname(f)
324 if dstbase and not dstvfs.exists(dstbase):
324 if dstbase and not dstvfs.exists(dstbase):
325 dstvfs.mkdir(dstbase)
325 dstvfs.mkdir(dstbase)
326 if srcvfs.exists(f):
326 if srcvfs.exists(f):
327 if f.endswith('data'):
327 if f.endswith('data'):
328 # 'dstbase' may be empty (e.g. revlog format 0)
328 # 'dstbase' may be empty (e.g. revlog format 0)
329 lockfile = os.path.join(dstbase, "lock")
329 lockfile = os.path.join(dstbase, "lock")
330 # lock to avoid premature writing to the target
330 # lock to avoid premature writing to the target
331 destlock = lock.lock(dstvfs, lockfile)
331 destlock = lock.lock(dstvfs, lockfile)
332 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
332 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
333 hardlink, progress=prog)
333 hardlink, progress=prog)
334 num += n
334 num += n
335 if hardlink:
335 if hardlink:
336 ui.debug("linked %d files\n" % num)
336 ui.debug("linked %d files\n" % num)
337 if closetopic[0]:
337 if closetopic[0]:
338 ui.progress(closetopic[0], None)
338 ui.progress(closetopic[0], None)
339 else:
339 else:
340 ui.debug("copied %d files\n" % num)
340 ui.debug("copied %d files\n" % num)
341 if closetopic[0]:
341 if closetopic[0]:
342 ui.progress(closetopic[0], None)
342 ui.progress(closetopic[0], None)
343 return destlock
343 return destlock
344 except: # re-raises
344 except: # re-raises
345 release(destlock)
345 release(destlock)
346 raise
346 raise
347
347
348 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
348 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
349 rev=None, update=True, stream=False):
349 rev=None, update=True, stream=False):
350 """Perform a clone using a shared repo.
350 """Perform a clone using a shared repo.
351
351
352 The store for the repository will be located at <sharepath>/.hg. The
352 The store for the repository will be located at <sharepath>/.hg. The
353 specified revisions will be cloned or pulled from "source". A shared repo
353 specified revisions will be cloned or pulled from "source". A shared repo
354 will be created at "dest" and a working copy will be created if "update" is
354 will be created at "dest" and a working copy will be created if "update" is
355 True.
355 True.
356 """
356 """
357 revs = None
357 revs = None
358 if rev:
358 if rev:
359 if not srcpeer.capable('lookup'):
359 if not srcpeer.capable('lookup'):
360 raise error.Abort(_("src repository does not support "
360 raise error.Abort(_("src repository does not support "
361 "revision lookup and so doesn't "
361 "revision lookup and so doesn't "
362 "support clone by revision"))
362 "support clone by revision"))
363 revs = [srcpeer.lookup(r) for r in rev]
363 revs = [srcpeer.lookup(r) for r in rev]
364
364
365 # Obtain a lock before checking for or cloning the pooled repo otherwise
365 # Obtain a lock before checking for or cloning the pooled repo otherwise
366 # 2 clients may race creating or populating it.
366 # 2 clients may race creating or populating it.
367 pooldir = os.path.dirname(sharepath)
367 pooldir = os.path.dirname(sharepath)
368 # lock class requires the directory to exist.
368 # lock class requires the directory to exist.
369 try:
369 try:
370 util.makedir(pooldir, False)
370 util.makedir(pooldir, False)
371 except OSError as e:
371 except OSError as e:
372 if e.errno != errno.EEXIST:
372 if e.errno != errno.EEXIST:
373 raise
373 raise
374
374
375 poolvfs = vfsmod.vfs(pooldir)
375 poolvfs = vfsmod.vfs(pooldir)
376 basename = os.path.basename(sharepath)
376 basename = os.path.basename(sharepath)
377
377
378 with lock.lock(poolvfs, '%s.lock' % basename):
378 with lock.lock(poolvfs, '%s.lock' % basename):
379 if os.path.exists(sharepath):
379 if os.path.exists(sharepath):
380 ui.status(_('(sharing from existing pooled repository %s)\n') %
380 ui.status(_('(sharing from existing pooled repository %s)\n') %
381 basename)
381 basename)
382 else:
382 else:
383 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
383 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
384 # Always use pull mode because hardlinks in share mode don't work
384 # Always use pull mode because hardlinks in share mode don't work
385 # well. Never update because working copies aren't necessary in
385 # well. Never update because working copies aren't necessary in
386 # share mode.
386 # share mode.
387 clone(ui, peeropts, source, dest=sharepath, pull=True,
387 clone(ui, peeropts, source, dest=sharepath, pull=True,
388 rev=rev, update=False, stream=stream)
388 rev=rev, update=False, stream=stream)
389
389
390 # Resolve the value to put in [paths] section for the source.
390 # Resolve the value to put in [paths] section for the source.
391 if islocal(source):
391 if islocal(source):
392 defaultpath = os.path.abspath(util.urllocalpath(source))
392 defaultpath = os.path.abspath(util.urllocalpath(source))
393 else:
393 else:
394 defaultpath = source
394 defaultpath = source
395
395
396 sharerepo = repository(ui, path=sharepath)
396 sharerepo = repository(ui, path=sharepath)
397 share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
397 share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
398 defaultpath=defaultpath)
398 defaultpath=defaultpath)
399
399
400 # We need to perform a pull against the dest repo to fetch bookmarks
400 # We need to perform a pull against the dest repo to fetch bookmarks
401 # and other non-store data that isn't shared by default. In the case of
401 # and other non-store data that isn't shared by default. In the case of
402 # non-existing shared repo, this means we pull from the remote twice. This
402 # non-existing shared repo, this means we pull from the remote twice. This
403 # is a bit weird. But at the time it was implemented, there wasn't an easy
403 # is a bit weird. But at the time it was implemented, there wasn't an easy
404 # way to pull just non-changegroup data.
404 # way to pull just non-changegroup data.
405 destrepo = repository(ui, path=dest)
405 destrepo = repository(ui, path=dest)
406 exchange.pull(destrepo, srcpeer, heads=revs)
406 exchange.pull(destrepo, srcpeer, heads=revs)
407
407
408 _postshareupdate(destrepo, update)
408 _postshareupdate(destrepo, update)
409
409
410 return srcpeer, peer(ui, peeropts, dest)
410 return srcpeer, peer(ui, peeropts, dest)
411
411
412 # Recomputing branch cache might be slow on big repos,
412 # Recomputing branch cache might be slow on big repos,
413 # so just copy it
413 # so just copy it
414 def _copycache(srcrepo, dstcachedir, fname):
414 def _copycache(srcrepo, dstcachedir, fname):
415 """copy a cache from srcrepo to destcachedir (if it exists)"""
415 """copy a cache from srcrepo to destcachedir (if it exists)"""
416 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
416 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
417 dstbranchcache = os.path.join(dstcachedir, fname)
417 dstbranchcache = os.path.join(dstcachedir, fname)
418 if os.path.exists(srcbranchcache):
418 if os.path.exists(srcbranchcache):
419 if not os.path.exists(dstcachedir):
419 if not os.path.exists(dstcachedir):
420 os.mkdir(dstcachedir)
420 os.mkdir(dstcachedir)
421 util.copyfile(srcbranchcache, dstbranchcache)
421 util.copyfile(srcbranchcache, dstbranchcache)
422
422
423 def _cachetocopy(srcrepo):
424 """return the list of cache file valuable to copy during a clone"""
425 # In local clones we're copying all nodes, not just served
426 # ones. Therefore copy all branch caches over.
427 cachefiles = ['branch2']
428 cachefiles.extend('branch2-%s' % f for f in repoview.filtertable)
429 return cachefiles
430
423 def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
431 def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
424 update=True, stream=False, branch=None, shareopts=None):
432 update=True, stream=False, branch=None, shareopts=None):
425 """Make a copy of an existing repository.
433 """Make a copy of an existing repository.
426
434
427 Create a copy of an existing repository in a new directory. The
435 Create a copy of an existing repository in a new directory. The
428 source and destination are URLs, as passed to the repository
436 source and destination are URLs, as passed to the repository
429 function. Returns a pair of repository peers, the source and
437 function. Returns a pair of repository peers, the source and
430 newly created destination.
438 newly created destination.
431
439
432 The location of the source is added to the new repository's
440 The location of the source is added to the new repository's
433 .hg/hgrc file, as the default to be used for future pulls and
441 .hg/hgrc file, as the default to be used for future pulls and
434 pushes.
442 pushes.
435
443
436 If an exception is raised, the partly cloned/updated destination
444 If an exception is raised, the partly cloned/updated destination
437 repository will be deleted.
445 repository will be deleted.
438
446
439 Arguments:
447 Arguments:
440
448
441 source: repository object or URL
449 source: repository object or URL
442
450
443 dest: URL of destination repository to create (defaults to base
451 dest: URL of destination repository to create (defaults to base
444 name of source repository)
452 name of source repository)
445
453
446 pull: always pull from source repository, even in local case or if the
454 pull: always pull from source repository, even in local case or if the
447 server prefers streaming
455 server prefers streaming
448
456
449 stream: stream raw data uncompressed from repository (fast over
457 stream: stream raw data uncompressed from repository (fast over
450 LAN, slow over WAN)
458 LAN, slow over WAN)
451
459
452 rev: revision to clone up to (implies pull=True)
460 rev: revision to clone up to (implies pull=True)
453
461
454 update: update working directory after clone completes, if
462 update: update working directory after clone completes, if
455 destination is local repository (True means update to default rev,
463 destination is local repository (True means update to default rev,
456 anything else is treated as a revision)
464 anything else is treated as a revision)
457
465
458 branch: branches to clone
466 branch: branches to clone
459
467
460 shareopts: dict of options to control auto sharing behavior. The "pool" key
468 shareopts: dict of options to control auto sharing behavior. The "pool" key
461 activates auto sharing mode and defines the directory for stores. The
469 activates auto sharing mode and defines the directory for stores. The
462 "mode" key determines how to construct the directory name of the shared
470 "mode" key determines how to construct the directory name of the shared
463 repository. "identity" means the name is derived from the node of the first
471 repository. "identity" means the name is derived from the node of the first
464 changeset in the repository. "remote" means the name is derived from the
472 changeset in the repository. "remote" means the name is derived from the
465 remote's path/URL. Defaults to "identity."
473 remote's path/URL. Defaults to "identity."
466 """
474 """
467
475
468 if isinstance(source, str):
476 if isinstance(source, str):
469 origsource = ui.expandpath(source)
477 origsource = ui.expandpath(source)
470 source, branch = parseurl(origsource, branch)
478 source, branch = parseurl(origsource, branch)
471 srcpeer = peer(ui, peeropts, source)
479 srcpeer = peer(ui, peeropts, source)
472 else:
480 else:
473 srcpeer = source.peer() # in case we were called with a localrepo
481 srcpeer = source.peer() # in case we were called with a localrepo
474 branch = (None, branch or [])
482 branch = (None, branch or [])
475 origsource = source = srcpeer.url()
483 origsource = source = srcpeer.url()
476 rev, checkout = addbranchrevs(srcpeer, srcpeer, branch, rev)
484 rev, checkout = addbranchrevs(srcpeer, srcpeer, branch, rev)
477
485
478 if dest is None:
486 if dest is None:
479 dest = defaultdest(source)
487 dest = defaultdest(source)
480 if dest:
488 if dest:
481 ui.status(_("destination directory: %s\n") % dest)
489 ui.status(_("destination directory: %s\n") % dest)
482 else:
490 else:
483 dest = ui.expandpath(dest)
491 dest = ui.expandpath(dest)
484
492
485 dest = util.urllocalpath(dest)
493 dest = util.urllocalpath(dest)
486 source = util.urllocalpath(source)
494 source = util.urllocalpath(source)
487
495
488 if not dest:
496 if not dest:
489 raise error.Abort(_("empty destination path is not valid"))
497 raise error.Abort(_("empty destination path is not valid"))
490
498
491 destvfs = vfsmod.vfs(dest, expandpath=True)
499 destvfs = vfsmod.vfs(dest, expandpath=True)
492 if destvfs.lexists():
500 if destvfs.lexists():
493 if not destvfs.isdir():
501 if not destvfs.isdir():
494 raise error.Abort(_("destination '%s' already exists") % dest)
502 raise error.Abort(_("destination '%s' already exists") % dest)
495 elif destvfs.listdir():
503 elif destvfs.listdir():
496 raise error.Abort(_("destination '%s' is not empty") % dest)
504 raise error.Abort(_("destination '%s' is not empty") % dest)
497
505
498 shareopts = shareopts or {}
506 shareopts = shareopts or {}
499 sharepool = shareopts.get('pool')
507 sharepool = shareopts.get('pool')
500 sharenamemode = shareopts.get('mode')
508 sharenamemode = shareopts.get('mode')
501 if sharepool and islocal(dest):
509 if sharepool and islocal(dest):
502 sharepath = None
510 sharepath = None
503 if sharenamemode == 'identity':
511 if sharenamemode == 'identity':
504 # Resolve the name from the initial changeset in the remote
512 # Resolve the name from the initial changeset in the remote
505 # repository. This returns nullid when the remote is empty. It
513 # repository. This returns nullid when the remote is empty. It
506 # raises RepoLookupError if revision 0 is filtered or otherwise
514 # raises RepoLookupError if revision 0 is filtered or otherwise
507 # not available. If we fail to resolve, sharing is not enabled.
515 # not available. If we fail to resolve, sharing is not enabled.
508 try:
516 try:
509 rootnode = srcpeer.lookup('0')
517 rootnode = srcpeer.lookup('0')
510 if rootnode != node.nullid:
518 if rootnode != node.nullid:
511 sharepath = os.path.join(sharepool, node.hex(rootnode))
519 sharepath = os.path.join(sharepool, node.hex(rootnode))
512 else:
520 else:
513 ui.status(_('(not using pooled storage: '
521 ui.status(_('(not using pooled storage: '
514 'remote appears to be empty)\n'))
522 'remote appears to be empty)\n'))
515 except error.RepoLookupError:
523 except error.RepoLookupError:
516 ui.status(_('(not using pooled storage: '
524 ui.status(_('(not using pooled storage: '
517 'unable to resolve identity of remote)\n'))
525 'unable to resolve identity of remote)\n'))
518 elif sharenamemode == 'remote':
526 elif sharenamemode == 'remote':
519 sharepath = os.path.join(
527 sharepath = os.path.join(
520 sharepool, hashlib.sha1(source).hexdigest())
528 sharepool, hashlib.sha1(source).hexdigest())
521 else:
529 else:
522 raise error.Abort(_('unknown share naming mode: %s') %
530 raise error.Abort(_('unknown share naming mode: %s') %
523 sharenamemode)
531 sharenamemode)
524
532
525 if sharepath:
533 if sharepath:
526 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
534 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
527 dest, pull=pull, rev=rev, update=update,
535 dest, pull=pull, rev=rev, update=update,
528 stream=stream)
536 stream=stream)
529
537
530 srclock = destlock = cleandir = None
538 srclock = destlock = cleandir = None
531 srcrepo = srcpeer.local()
539 srcrepo = srcpeer.local()
532 try:
540 try:
533 abspath = origsource
541 abspath = origsource
534 if islocal(origsource):
542 if islocal(origsource):
535 abspath = os.path.abspath(util.urllocalpath(origsource))
543 abspath = os.path.abspath(util.urllocalpath(origsource))
536
544
537 if islocal(dest):
545 if islocal(dest):
538 cleandir = dest
546 cleandir = dest
539
547
540 copy = False
548 copy = False
541 if (srcrepo and srcrepo.cancopy() and islocal(dest)
549 if (srcrepo and srcrepo.cancopy() and islocal(dest)
542 and not phases.hassecret(srcrepo)):
550 and not phases.hassecret(srcrepo)):
543 copy = not pull and not rev
551 copy = not pull and not rev
544
552
545 if copy:
553 if copy:
546 try:
554 try:
547 # we use a lock here because if we race with commit, we
555 # we use a lock here because if we race with commit, we
548 # can end up with extra data in the cloned revlogs that's
556 # can end up with extra data in the cloned revlogs that's
549 # not pointed to by changesets, thus causing verify to
557 # not pointed to by changesets, thus causing verify to
550 # fail
558 # fail
551 srclock = srcrepo.lock(wait=False)
559 srclock = srcrepo.lock(wait=False)
552 except error.LockError:
560 except error.LockError:
553 copy = False
561 copy = False
554
562
555 if copy:
563 if copy:
556 srcrepo.hook('preoutgoing', throw=True, source='clone')
564 srcrepo.hook('preoutgoing', throw=True, source='clone')
557 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
565 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
558 if not os.path.exists(dest):
566 if not os.path.exists(dest):
559 os.mkdir(dest)
567 os.mkdir(dest)
560 else:
568 else:
561 # only clean up directories we create ourselves
569 # only clean up directories we create ourselves
562 cleandir = hgdir
570 cleandir = hgdir
563 try:
571 try:
564 destpath = hgdir
572 destpath = hgdir
565 util.makedir(destpath, notindexed=True)
573 util.makedir(destpath, notindexed=True)
566 except OSError as inst:
574 except OSError as inst:
567 if inst.errno == errno.EEXIST:
575 if inst.errno == errno.EEXIST:
568 cleandir = None
576 cleandir = None
569 raise error.Abort(_("destination '%s' already exists")
577 raise error.Abort(_("destination '%s' already exists")
570 % dest)
578 % dest)
571 raise
579 raise
572
580
573 destlock = copystore(ui, srcrepo, destpath)
581 destlock = copystore(ui, srcrepo, destpath)
574 # copy bookmarks over
582 # copy bookmarks over
575 srcbookmarks = srcrepo.vfs.join('bookmarks')
583 srcbookmarks = srcrepo.vfs.join('bookmarks')
576 dstbookmarks = os.path.join(destpath, 'bookmarks')
584 dstbookmarks = os.path.join(destpath, 'bookmarks')
577 if os.path.exists(srcbookmarks):
585 if os.path.exists(srcbookmarks):
578 util.copyfile(srcbookmarks, dstbookmarks)
586 util.copyfile(srcbookmarks, dstbookmarks)
579
587
580 dstcachedir = os.path.join(destpath, 'cache')
588 dstcachedir = os.path.join(destpath, 'cache')
581 # In local clones we're copying all nodes, not just served
589 for cache in _cachetocopy(srcrepo):
582 # ones. Therefore copy all branch caches over.
590 _copycache(srcrepo, dstcachedir, cache)
583 _copycache(srcrepo, dstcachedir, 'branch2')
584 for cachename in repoview.filtertable:
585 _copycache(srcrepo, dstcachedir, 'branch2-%s' % cachename)
586
591
587 # we need to re-init the repo after manually copying the data
592 # we need to re-init the repo after manually copying the data
588 # into it
593 # into it
589 destpeer = peer(srcrepo, peeropts, dest)
594 destpeer = peer(srcrepo, peeropts, dest)
590 srcrepo.hook('outgoing', source='clone',
595 srcrepo.hook('outgoing', source='clone',
591 node=node.hex(node.nullid))
596 node=node.hex(node.nullid))
592 else:
597 else:
593 try:
598 try:
594 destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
599 destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
595 # only pass ui when no srcrepo
600 # only pass ui when no srcrepo
596 except OSError as inst:
601 except OSError as inst:
597 if inst.errno == errno.EEXIST:
602 if inst.errno == errno.EEXIST:
598 cleandir = None
603 cleandir = None
599 raise error.Abort(_("destination '%s' already exists")
604 raise error.Abort(_("destination '%s' already exists")
600 % dest)
605 % dest)
601 raise
606 raise
602
607
603 revs = None
608 revs = None
604 if rev:
609 if rev:
605 if not srcpeer.capable('lookup'):
610 if not srcpeer.capable('lookup'):
606 raise error.Abort(_("src repository does not support "
611 raise error.Abort(_("src repository does not support "
607 "revision lookup and so doesn't "
612 "revision lookup and so doesn't "
608 "support clone by revision"))
613 "support clone by revision"))
609 revs = [srcpeer.lookup(r) for r in rev]
614 revs = [srcpeer.lookup(r) for r in rev]
610 checkout = revs[0]
615 checkout = revs[0]
611 local = destpeer.local()
616 local = destpeer.local()
612 if local:
617 if local:
613 if not stream:
618 if not stream:
614 if pull:
619 if pull:
615 stream = False
620 stream = False
616 else:
621 else:
617 stream = None
622 stream = None
618 # internal config: ui.quietbookmarkmove
623 # internal config: ui.quietbookmarkmove
619 overrides = {('ui', 'quietbookmarkmove'): True}
624 overrides = {('ui', 'quietbookmarkmove'): True}
620 with local.ui.configoverride(overrides, 'clone'):
625 with local.ui.configoverride(overrides, 'clone'):
621 exchange.pull(local, srcpeer, revs,
626 exchange.pull(local, srcpeer, revs,
622 streamclonerequested=stream)
627 streamclonerequested=stream)
623 elif srcrepo:
628 elif srcrepo:
624 exchange.push(srcrepo, destpeer, revs=revs,
629 exchange.push(srcrepo, destpeer, revs=revs,
625 bookmarks=srcrepo._bookmarks.keys())
630 bookmarks=srcrepo._bookmarks.keys())
626 else:
631 else:
627 raise error.Abort(_("clone from remote to remote not supported")
632 raise error.Abort(_("clone from remote to remote not supported")
628 )
633 )
629
634
630 cleandir = None
635 cleandir = None
631
636
632 destrepo = destpeer.local()
637 destrepo = destpeer.local()
633 if destrepo:
638 if destrepo:
634 template = uimod.samplehgrcs['cloned']
639 template = uimod.samplehgrcs['cloned']
635 fp = destrepo.vfs("hgrc", "w", text=True)
640 fp = destrepo.vfs("hgrc", "w", text=True)
636 u = util.url(abspath)
641 u = util.url(abspath)
637 u.passwd = None
642 u.passwd = None
638 defaulturl = str(u)
643 defaulturl = str(u)
639 fp.write(template % defaulturl)
644 fp.write(template % defaulturl)
640 fp.close()
645 fp.close()
641
646
642 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
647 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
643
648
644 if update:
649 if update:
645 if update is not True:
650 if update is not True:
646 checkout = srcpeer.lookup(update)
651 checkout = srcpeer.lookup(update)
647 uprev = None
652 uprev = None
648 status = None
653 status = None
649 if checkout is not None:
654 if checkout is not None:
650 try:
655 try:
651 uprev = destrepo.lookup(checkout)
656 uprev = destrepo.lookup(checkout)
652 except error.RepoLookupError:
657 except error.RepoLookupError:
653 if update is not True:
658 if update is not True:
654 try:
659 try:
655 uprev = destrepo.lookup(update)
660 uprev = destrepo.lookup(update)
656 except error.RepoLookupError:
661 except error.RepoLookupError:
657 pass
662 pass
658 if uprev is None:
663 if uprev is None:
659 try:
664 try:
660 uprev = destrepo._bookmarks['@']
665 uprev = destrepo._bookmarks['@']
661 update = '@'
666 update = '@'
662 bn = destrepo[uprev].branch()
667 bn = destrepo[uprev].branch()
663 if bn == 'default':
668 if bn == 'default':
664 status = _("updating to bookmark @\n")
669 status = _("updating to bookmark @\n")
665 else:
670 else:
666 status = (_("updating to bookmark @ on branch %s\n")
671 status = (_("updating to bookmark @ on branch %s\n")
667 % bn)
672 % bn)
668 except KeyError:
673 except KeyError:
669 try:
674 try:
670 uprev = destrepo.branchtip('default')
675 uprev = destrepo.branchtip('default')
671 except error.RepoLookupError:
676 except error.RepoLookupError:
672 uprev = destrepo.lookup('tip')
677 uprev = destrepo.lookup('tip')
673 if not status:
678 if not status:
674 bn = destrepo[uprev].branch()
679 bn = destrepo[uprev].branch()
675 status = _("updating to branch %s\n") % bn
680 status = _("updating to branch %s\n") % bn
676 destrepo.ui.status(status)
681 destrepo.ui.status(status)
677 _update(destrepo, uprev)
682 _update(destrepo, uprev)
678 if update in destrepo._bookmarks:
683 if update in destrepo._bookmarks:
679 bookmarks.activate(destrepo, update)
684 bookmarks.activate(destrepo, update)
680 finally:
685 finally:
681 release(srclock, destlock)
686 release(srclock, destlock)
682 if cleandir is not None:
687 if cleandir is not None:
683 shutil.rmtree(cleandir, True)
688 shutil.rmtree(cleandir, True)
684 if srcpeer is not None:
689 if srcpeer is not None:
685 srcpeer.close()
690 srcpeer.close()
686 return srcpeer, destpeer
691 return srcpeer, destpeer
687
692
688 def _showstats(repo, stats, quietempty=False):
693 def _showstats(repo, stats, quietempty=False):
689 if quietempty and not any(stats):
694 if quietempty and not any(stats):
690 return
695 return
691 repo.ui.status(_("%d files updated, %d files merged, "
696 repo.ui.status(_("%d files updated, %d files merged, "
692 "%d files removed, %d files unresolved\n") % stats)
697 "%d files removed, %d files unresolved\n") % stats)
693
698
694 def updaterepo(repo, node, overwrite, updatecheck=None):
699 def updaterepo(repo, node, overwrite, updatecheck=None):
695 """Update the working directory to node.
700 """Update the working directory to node.
696
701
697 When overwrite is set, changes are clobbered, merged else
702 When overwrite is set, changes are clobbered, merged else
698
703
699 returns stats (see pydoc mercurial.merge.applyupdates)"""
704 returns stats (see pydoc mercurial.merge.applyupdates)"""
700 return mergemod.update(repo, node, False, overwrite,
705 return mergemod.update(repo, node, False, overwrite,
701 labels=['working copy', 'destination'],
706 labels=['working copy', 'destination'],
702 updatecheck=updatecheck)
707 updatecheck=updatecheck)
703
708
704 def update(repo, node, quietempty=False, updatecheck=None):
709 def update(repo, node, quietempty=False, updatecheck=None):
705 """update the working directory to node"""
710 """update the working directory to node"""
706 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
711 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
707 _showstats(repo, stats, quietempty)
712 _showstats(repo, stats, quietempty)
708 if stats[3]:
713 if stats[3]:
709 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
714 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
710 return stats[3] > 0
715 return stats[3] > 0
711
716
712 # naming conflict in clone()
717 # naming conflict in clone()
713 _update = update
718 _update = update
714
719
715 def clean(repo, node, show_stats=True, quietempty=False):
720 def clean(repo, node, show_stats=True, quietempty=False):
716 """forcibly switch the working directory to node, clobbering changes"""
721 """forcibly switch the working directory to node, clobbering changes"""
717 stats = updaterepo(repo, node, True)
722 stats = updaterepo(repo, node, True)
718 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
723 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
719 if show_stats:
724 if show_stats:
720 _showstats(repo, stats, quietempty)
725 _showstats(repo, stats, quietempty)
721 return stats[3] > 0
726 return stats[3] > 0
722
727
723 # naming conflict in updatetotally()
728 # naming conflict in updatetotally()
724 _clean = clean
729 _clean = clean
725
730
726 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
731 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
727 """Update the working directory with extra care for non-file components
732 """Update the working directory with extra care for non-file components
728
733
729 This takes care of non-file components below:
734 This takes care of non-file components below:
730
735
731 :bookmark: might be advanced or (in)activated
736 :bookmark: might be advanced or (in)activated
732
737
733 This takes arguments below:
738 This takes arguments below:
734
739
735 :checkout: to which revision the working directory is updated
740 :checkout: to which revision the working directory is updated
736 :brev: a name, which might be a bookmark to be activated after updating
741 :brev: a name, which might be a bookmark to be activated after updating
737 :clean: whether changes in the working directory can be discarded
742 :clean: whether changes in the working directory can be discarded
738 :updatecheck: how to deal with a dirty working directory
743 :updatecheck: how to deal with a dirty working directory
739
744
740 Valid values for updatecheck are (None => linear):
745 Valid values for updatecheck are (None => linear):
741
746
742 * abort: abort if the working directory is dirty
747 * abort: abort if the working directory is dirty
743 * none: don't check (merge working directory changes into destination)
748 * none: don't check (merge working directory changes into destination)
744 * linear: check that update is linear before merging working directory
749 * linear: check that update is linear before merging working directory
745 changes into destination
750 changes into destination
746 * noconflict: check that the update does not result in file merges
751 * noconflict: check that the update does not result in file merges
747
752
748 This returns whether conflict is detected at updating or not.
753 This returns whether conflict is detected at updating or not.
749 """
754 """
750 if updatecheck is None:
755 if updatecheck is None:
751 updatecheck = ui.config('experimental', 'updatecheck')
756 updatecheck = ui.config('experimental', 'updatecheck')
752 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
757 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
753 # If not configured, or invalid value configured
758 # If not configured, or invalid value configured
754 updatecheck = 'linear'
759 updatecheck = 'linear'
755 with repo.wlock():
760 with repo.wlock():
756 movemarkfrom = None
761 movemarkfrom = None
757 warndest = False
762 warndest = False
758 if checkout is None:
763 if checkout is None:
759 updata = destutil.destupdate(repo, clean=clean)
764 updata = destutil.destupdate(repo, clean=clean)
760 checkout, movemarkfrom, brev = updata
765 checkout, movemarkfrom, brev = updata
761 warndest = True
766 warndest = True
762
767
763 if clean:
768 if clean:
764 ret = _clean(repo, checkout)
769 ret = _clean(repo, checkout)
765 else:
770 else:
766 if updatecheck == 'abort':
771 if updatecheck == 'abort':
767 cmdutil.bailifchanged(repo, merge=False)
772 cmdutil.bailifchanged(repo, merge=False)
768 updatecheck = 'none'
773 updatecheck = 'none'
769 ret = _update(repo, checkout, updatecheck=updatecheck)
774 ret = _update(repo, checkout, updatecheck=updatecheck)
770
775
771 if not ret and movemarkfrom:
776 if not ret and movemarkfrom:
772 if movemarkfrom == repo['.'].node():
777 if movemarkfrom == repo['.'].node():
773 pass # no-op update
778 pass # no-op update
774 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
779 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
775 b = ui.label(repo._activebookmark, 'bookmarks.active')
780 b = ui.label(repo._activebookmark, 'bookmarks.active')
776 ui.status(_("updating bookmark %s\n") % b)
781 ui.status(_("updating bookmark %s\n") % b)
777 else:
782 else:
778 # this can happen with a non-linear update
783 # this can happen with a non-linear update
779 b = ui.label(repo._activebookmark, 'bookmarks')
784 b = ui.label(repo._activebookmark, 'bookmarks')
780 ui.status(_("(leaving bookmark %s)\n") % b)
785 ui.status(_("(leaving bookmark %s)\n") % b)
781 bookmarks.deactivate(repo)
786 bookmarks.deactivate(repo)
782 elif brev in repo._bookmarks:
787 elif brev in repo._bookmarks:
783 if brev != repo._activebookmark:
788 if brev != repo._activebookmark:
784 b = ui.label(brev, 'bookmarks.active')
789 b = ui.label(brev, 'bookmarks.active')
785 ui.status(_("(activating bookmark %s)\n") % b)
790 ui.status(_("(activating bookmark %s)\n") % b)
786 bookmarks.activate(repo, brev)
791 bookmarks.activate(repo, brev)
787 elif brev:
792 elif brev:
788 if repo._activebookmark:
793 if repo._activebookmark:
789 b = ui.label(repo._activebookmark, 'bookmarks')
794 b = ui.label(repo._activebookmark, 'bookmarks')
790 ui.status(_("(leaving bookmark %s)\n") % b)
795 ui.status(_("(leaving bookmark %s)\n") % b)
791 bookmarks.deactivate(repo)
796 bookmarks.deactivate(repo)
792
797
793 if warndest:
798 if warndest:
794 destutil.statusotherdests(ui, repo)
799 destutil.statusotherdests(ui, repo)
795
800
796 return ret
801 return ret
797
802
798 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None):
803 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None):
799 """Branch merge with node, resolving changes. Return true if any
804 """Branch merge with node, resolving changes. Return true if any
800 unresolved conflicts."""
805 unresolved conflicts."""
801 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce,
806 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce,
802 labels=labels)
807 labels=labels)
803 _showstats(repo, stats)
808 _showstats(repo, stats)
804 if stats[3]:
809 if stats[3]:
805 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
810 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
806 "or 'hg update -C .' to abandon\n"))
811 "or 'hg update -C .' to abandon\n"))
807 elif remind:
812 elif remind:
808 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
813 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
809 return stats[3] > 0
814 return stats[3] > 0
810
815
811 def _incoming(displaychlist, subreporecurse, ui, repo, source,
816 def _incoming(displaychlist, subreporecurse, ui, repo, source,
812 opts, buffered=False):
817 opts, buffered=False):
813 """
818 """
814 Helper for incoming / gincoming.
819 Helper for incoming / gincoming.
815 displaychlist gets called with
820 displaychlist gets called with
816 (remoterepo, incomingchangesetlist, displayer) parameters,
821 (remoterepo, incomingchangesetlist, displayer) parameters,
817 and is supposed to contain only code that can't be unified.
822 and is supposed to contain only code that can't be unified.
818 """
823 """
819 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
824 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
820 other = peer(repo, opts, source)
825 other = peer(repo, opts, source)
821 ui.status(_('comparing with %s\n') % util.hidepassword(source))
826 ui.status(_('comparing with %s\n') % util.hidepassword(source))
822 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
827 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
823
828
824 if revs:
829 if revs:
825 revs = [other.lookup(rev) for rev in revs]
830 revs = [other.lookup(rev) for rev in revs]
826 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
831 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
827 revs, opts["bundle"], opts["force"])
832 revs, opts["bundle"], opts["force"])
828 try:
833 try:
829 if not chlist:
834 if not chlist:
830 ui.status(_("no changes found\n"))
835 ui.status(_("no changes found\n"))
831 return subreporecurse()
836 return subreporecurse()
832 ui.pager('incoming')
837 ui.pager('incoming')
833 displayer = cmdutil.show_changeset(ui, other, opts, buffered)
838 displayer = cmdutil.show_changeset(ui, other, opts, buffered)
834 displaychlist(other, chlist, displayer)
839 displaychlist(other, chlist, displayer)
835 displayer.close()
840 displayer.close()
836 finally:
841 finally:
837 cleanupfn()
842 cleanupfn()
838 subreporecurse()
843 subreporecurse()
839 return 0 # exit code is zero since we found incoming changes
844 return 0 # exit code is zero since we found incoming changes
840
845
841 def incoming(ui, repo, source, opts):
846 def incoming(ui, repo, source, opts):
842 def subreporecurse():
847 def subreporecurse():
843 ret = 1
848 ret = 1
844 if opts.get('subrepos'):
849 if opts.get('subrepos'):
845 ctx = repo[None]
850 ctx = repo[None]
846 for subpath in sorted(ctx.substate):
851 for subpath in sorted(ctx.substate):
847 sub = ctx.sub(subpath)
852 sub = ctx.sub(subpath)
848 ret = min(ret, sub.incoming(ui, source, opts))
853 ret = min(ret, sub.incoming(ui, source, opts))
849 return ret
854 return ret
850
855
851 def display(other, chlist, displayer):
856 def display(other, chlist, displayer):
852 limit = cmdutil.loglimit(opts)
857 limit = cmdutil.loglimit(opts)
853 if opts.get('newest_first'):
858 if opts.get('newest_first'):
854 chlist.reverse()
859 chlist.reverse()
855 count = 0
860 count = 0
856 for n in chlist:
861 for n in chlist:
857 if limit is not None and count >= limit:
862 if limit is not None and count >= limit:
858 break
863 break
859 parents = [p for p in other.changelog.parents(n) if p != nullid]
864 parents = [p for p in other.changelog.parents(n) if p != nullid]
860 if opts.get('no_merges') and len(parents) == 2:
865 if opts.get('no_merges') and len(parents) == 2:
861 continue
866 continue
862 count += 1
867 count += 1
863 displayer.show(other[n])
868 displayer.show(other[n])
864 return _incoming(display, subreporecurse, ui, repo, source, opts)
869 return _incoming(display, subreporecurse, ui, repo, source, opts)
865
870
866 def _outgoing(ui, repo, dest, opts):
871 def _outgoing(ui, repo, dest, opts):
867 dest = ui.expandpath(dest or 'default-push', dest or 'default')
872 dest = ui.expandpath(dest or 'default-push', dest or 'default')
868 dest, branches = parseurl(dest, opts.get('branch'))
873 dest, branches = parseurl(dest, opts.get('branch'))
869 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
874 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
870 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
875 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
871 if revs:
876 if revs:
872 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
877 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
873
878
874 other = peer(repo, opts, dest)
879 other = peer(repo, opts, dest)
875 outgoing = discovery.findcommonoutgoing(repo, other, revs,
880 outgoing = discovery.findcommonoutgoing(repo, other, revs,
876 force=opts.get('force'))
881 force=opts.get('force'))
877 o = outgoing.missing
882 o = outgoing.missing
878 if not o:
883 if not o:
879 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
884 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
880 return o, other
885 return o, other
881
886
882 def outgoing(ui, repo, dest, opts):
887 def outgoing(ui, repo, dest, opts):
883 def recurse():
888 def recurse():
884 ret = 1
889 ret = 1
885 if opts.get('subrepos'):
890 if opts.get('subrepos'):
886 ctx = repo[None]
891 ctx = repo[None]
887 for subpath in sorted(ctx.substate):
892 for subpath in sorted(ctx.substate):
888 sub = ctx.sub(subpath)
893 sub = ctx.sub(subpath)
889 ret = min(ret, sub.outgoing(ui, dest, opts))
894 ret = min(ret, sub.outgoing(ui, dest, opts))
890 return ret
895 return ret
891
896
892 limit = cmdutil.loglimit(opts)
897 limit = cmdutil.loglimit(opts)
893 o, other = _outgoing(ui, repo, dest, opts)
898 o, other = _outgoing(ui, repo, dest, opts)
894 if not o:
899 if not o:
895 cmdutil.outgoinghooks(ui, repo, other, opts, o)
900 cmdutil.outgoinghooks(ui, repo, other, opts, o)
896 return recurse()
901 return recurse()
897
902
898 if opts.get('newest_first'):
903 if opts.get('newest_first'):
899 o.reverse()
904 o.reverse()
900 ui.pager('outgoing')
905 ui.pager('outgoing')
901 displayer = cmdutil.show_changeset(ui, repo, opts)
906 displayer = cmdutil.show_changeset(ui, repo, opts)
902 count = 0
907 count = 0
903 for n in o:
908 for n in o:
904 if limit is not None and count >= limit:
909 if limit is not None and count >= limit:
905 break
910 break
906 parents = [p for p in repo.changelog.parents(n) if p != nullid]
911 parents = [p for p in repo.changelog.parents(n) if p != nullid]
907 if opts.get('no_merges') and len(parents) == 2:
912 if opts.get('no_merges') and len(parents) == 2:
908 continue
913 continue
909 count += 1
914 count += 1
910 displayer.show(repo[n])
915 displayer.show(repo[n])
911 displayer.close()
916 displayer.close()
912 cmdutil.outgoinghooks(ui, repo, other, opts, o)
917 cmdutil.outgoinghooks(ui, repo, other, opts, o)
913 recurse()
918 recurse()
914 return 0 # exit code is zero since we found outgoing changes
919 return 0 # exit code is zero since we found outgoing changes
915
920
916 def verify(repo):
921 def verify(repo):
917 """verify the consistency of a repository"""
922 """verify the consistency of a repository"""
918 ret = verifymod.verify(repo)
923 ret = verifymod.verify(repo)
919
924
920 # Broken subrepo references in hidden csets don't seem worth worrying about,
925 # Broken subrepo references in hidden csets don't seem worth worrying about,
921 # since they can't be pushed/pulled, and --hidden can be used if they are a
926 # since they can't be pushed/pulled, and --hidden can be used if they are a
922 # concern.
927 # concern.
923
928
924 # pathto() is needed for -R case
929 # pathto() is needed for -R case
925 revs = repo.revs("filelog(%s)",
930 revs = repo.revs("filelog(%s)",
926 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
931 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
927
932
928 if revs:
933 if revs:
929 repo.ui.status(_('checking subrepo links\n'))
934 repo.ui.status(_('checking subrepo links\n'))
930 for rev in revs:
935 for rev in revs:
931 ctx = repo[rev]
936 ctx = repo[rev]
932 try:
937 try:
933 for subpath in ctx.substate:
938 for subpath in ctx.substate:
934 try:
939 try:
935 ret = (ctx.sub(subpath, allowcreate=False).verify()
940 ret = (ctx.sub(subpath, allowcreate=False).verify()
936 or ret)
941 or ret)
937 except error.RepoError as e:
942 except error.RepoError as e:
938 repo.ui.warn(('%s: %s\n') % (rev, e))
943 repo.ui.warn(('%s: %s\n') % (rev, e))
939 except Exception:
944 except Exception:
940 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
945 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
941 node.short(ctx.node()))
946 node.short(ctx.node()))
942
947
943 return ret
948 return ret
944
949
945 def remoteui(src, opts):
950 def remoteui(src, opts):
946 'build a remote ui from ui or repo and opts'
951 'build a remote ui from ui or repo and opts'
947 if util.safehasattr(src, 'baseui'): # looks like a repository
952 if util.safehasattr(src, 'baseui'): # looks like a repository
948 dst = src.baseui.copy() # drop repo-specific config
953 dst = src.baseui.copy() # drop repo-specific config
949 src = src.ui # copy target options from repo
954 src = src.ui # copy target options from repo
950 else: # assume it's a global ui object
955 else: # assume it's a global ui object
951 dst = src.copy() # keep all global options
956 dst = src.copy() # keep all global options
952
957
953 # copy ssh-specific options
958 # copy ssh-specific options
954 for o in 'ssh', 'remotecmd':
959 for o in 'ssh', 'remotecmd':
955 v = opts.get(o) or src.config('ui', o)
960 v = opts.get(o) or src.config('ui', o)
956 if v:
961 if v:
957 dst.setconfig("ui", o, v, 'copied')
962 dst.setconfig("ui", o, v, 'copied')
958
963
959 # copy bundle-specific options
964 # copy bundle-specific options
960 r = src.config('bundle', 'mainreporoot')
965 r = src.config('bundle', 'mainreporoot')
961 if r:
966 if r:
962 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
967 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
963
968
964 # copy selected local settings to the remote ui
969 # copy selected local settings to the remote ui
965 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
970 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
966 for key, val in src.configitems(sect):
971 for key, val in src.configitems(sect):
967 dst.setconfig(sect, key, val, 'copied')
972 dst.setconfig(sect, key, val, 'copied')
968 v = src.config('web', 'cacerts')
973 v = src.config('web', 'cacerts')
969 if v:
974 if v:
970 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
975 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
971
976
972 return dst
977 return dst
973
978
974 # Files of interest
979 # Files of interest
975 # Used to check if the repository has changed looking at mtime and size of
980 # Used to check if the repository has changed looking at mtime and size of
976 # these files.
981 # these files.
977 foi = [('spath', '00changelog.i'),
982 foi = [('spath', '00changelog.i'),
978 ('spath', 'phaseroots'), # ! phase can change content at the same size
983 ('spath', 'phaseroots'), # ! phase can change content at the same size
979 ('spath', 'obsstore'),
984 ('spath', 'obsstore'),
980 ('path', 'bookmarks'), # ! bookmark can change content at the same size
985 ('path', 'bookmarks'), # ! bookmark can change content at the same size
981 ]
986 ]
982
987
983 class cachedlocalrepo(object):
988 class cachedlocalrepo(object):
984 """Holds a localrepository that can be cached and reused."""
989 """Holds a localrepository that can be cached and reused."""
985
990
986 def __init__(self, repo):
991 def __init__(self, repo):
987 """Create a new cached repo from an existing repo.
992 """Create a new cached repo from an existing repo.
988
993
989 We assume the passed in repo was recently created. If the
994 We assume the passed in repo was recently created. If the
990 repo has changed between when it was created and when it was
995 repo has changed between when it was created and when it was
991 turned into a cache, it may not refresh properly.
996 turned into a cache, it may not refresh properly.
992 """
997 """
993 assert isinstance(repo, localrepo.localrepository)
998 assert isinstance(repo, localrepo.localrepository)
994 self._repo = repo
999 self._repo = repo
995 self._state, self.mtime = self._repostate()
1000 self._state, self.mtime = self._repostate()
996 self._filtername = repo.filtername
1001 self._filtername = repo.filtername
997
1002
998 def fetch(self):
1003 def fetch(self):
999 """Refresh (if necessary) and return a repository.
1004 """Refresh (if necessary) and return a repository.
1000
1005
1001 If the cached instance is out of date, it will be recreated
1006 If the cached instance is out of date, it will be recreated
1002 automatically and returned.
1007 automatically and returned.
1003
1008
1004 Returns a tuple of the repo and a boolean indicating whether a new
1009 Returns a tuple of the repo and a boolean indicating whether a new
1005 repo instance was created.
1010 repo instance was created.
1006 """
1011 """
1007 # We compare the mtimes and sizes of some well-known files to
1012 # We compare the mtimes and sizes of some well-known files to
1008 # determine if the repo changed. This is not precise, as mtimes
1013 # determine if the repo changed. This is not precise, as mtimes
1009 # are susceptible to clock skew and imprecise filesystems and
1014 # are susceptible to clock skew and imprecise filesystems and
1010 # file content can change while maintaining the same size.
1015 # file content can change while maintaining the same size.
1011
1016
1012 state, mtime = self._repostate()
1017 state, mtime = self._repostate()
1013 if state == self._state:
1018 if state == self._state:
1014 return self._repo, False
1019 return self._repo, False
1015
1020
1016 repo = repository(self._repo.baseui, self._repo.url())
1021 repo = repository(self._repo.baseui, self._repo.url())
1017 if self._filtername:
1022 if self._filtername:
1018 self._repo = repo.filtered(self._filtername)
1023 self._repo = repo.filtered(self._filtername)
1019 else:
1024 else:
1020 self._repo = repo.unfiltered()
1025 self._repo = repo.unfiltered()
1021 self._state = state
1026 self._state = state
1022 self.mtime = mtime
1027 self.mtime = mtime
1023
1028
1024 return self._repo, True
1029 return self._repo, True
1025
1030
1026 def _repostate(self):
1031 def _repostate(self):
1027 state = []
1032 state = []
1028 maxmtime = -1
1033 maxmtime = -1
1029 for attr, fname in foi:
1034 for attr, fname in foi:
1030 prefix = getattr(self._repo, attr)
1035 prefix = getattr(self._repo, attr)
1031 p = os.path.join(prefix, fname)
1036 p = os.path.join(prefix, fname)
1032 try:
1037 try:
1033 st = os.stat(p)
1038 st = os.stat(p)
1034 except OSError:
1039 except OSError:
1035 st = os.stat(prefix)
1040 st = os.stat(prefix)
1036 state.append((st.st_mtime, st.st_size))
1041 state.append((st.st_mtime, st.st_size))
1037 maxmtime = max(maxmtime, st.st_mtime)
1042 maxmtime = max(maxmtime, st.st_mtime)
1038
1043
1039 return tuple(state), maxmtime
1044 return tuple(state), maxmtime
1040
1045
1041 def copy(self):
1046 def copy(self):
1042 """Obtain a copy of this class instance.
1047 """Obtain a copy of this class instance.
1043
1048
1044 A new localrepository instance is obtained. The new instance should be
1049 A new localrepository instance is obtained. The new instance should be
1045 completely independent of the original.
1050 completely independent of the original.
1046 """
1051 """
1047 repo = repository(self._repo.baseui, self._repo.origroot)
1052 repo = repository(self._repo.baseui, self._repo.origroot)
1048 if self._filtername:
1053 if self._filtername:
1049 repo = repo.filtered(self._filtername)
1054 repo = repo.filtered(self._filtername)
1050 else:
1055 else:
1051 repo = repo.unfiltered()
1056 repo = repo.unfiltered()
1052 c = cachedlocalrepo(repo)
1057 c = cachedlocalrepo(repo)
1053 c._state = self._state
1058 c._state = self._state
1054 c.mtime = self.mtime
1059 c.mtime = self.mtime
1055 return c
1060 return c
General Comments 0
You need to be logged in to leave comments. Login now