##// END OF EJS Templates
shared: take wlock for writting the 'shared' file...
Pierre-Yves David -
r29753:e9340808 default
parent child Browse files
Show More
@@ -1,1016 +1,1017 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13 import os
13 import os
14 import shutil
14 import shutil
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import nullid
17 from .node import nullid
18
18
19 from . import (
19 from . import (
20 bookmarks,
20 bookmarks,
21 bundlerepo,
21 bundlerepo,
22 cmdutil,
22 cmdutil,
23 destutil,
23 destutil,
24 discovery,
24 discovery,
25 error,
25 error,
26 exchange,
26 exchange,
27 extensions,
27 extensions,
28 httppeer,
28 httppeer,
29 localrepo,
29 localrepo,
30 lock,
30 lock,
31 merge as mergemod,
31 merge as mergemod,
32 node,
32 node,
33 phases,
33 phases,
34 repoview,
34 repoview,
35 scmutil,
35 scmutil,
36 sshpeer,
36 sshpeer,
37 statichttprepo,
37 statichttprepo,
38 ui as uimod,
38 ui as uimod,
39 unionrepo,
39 unionrepo,
40 url,
40 url,
41 util,
41 util,
42 verify as verifymod,
42 verify as verifymod,
43 )
43 )
44
44
45 release = lock.release
45 release = lock.release
46
46
47 # shared features
47 # shared features
48 sharedbookmarks = 'bookmarks'
48 sharedbookmarks = 'bookmarks'
49
49
50 def _local(path):
50 def _local(path):
51 path = util.expandpath(util.urllocalpath(path))
51 path = util.expandpath(util.urllocalpath(path))
52 return (os.path.isfile(path) and bundlerepo or localrepo)
52 return (os.path.isfile(path) and bundlerepo or localrepo)
53
53
54 def addbranchrevs(lrepo, other, branches, revs):
54 def addbranchrevs(lrepo, other, branches, revs):
55 peer = other.peer() # a courtesy to callers using a localrepo for other
55 peer = other.peer() # a courtesy to callers using a localrepo for other
56 hashbranch, branches = branches
56 hashbranch, branches = branches
57 if not hashbranch and not branches:
57 if not hashbranch and not branches:
58 x = revs or None
58 x = revs or None
59 if util.safehasattr(revs, 'first'):
59 if util.safehasattr(revs, 'first'):
60 y = revs.first()
60 y = revs.first()
61 elif revs:
61 elif revs:
62 y = revs[0]
62 y = revs[0]
63 else:
63 else:
64 y = None
64 y = None
65 return x, y
65 return x, y
66 if revs:
66 if revs:
67 revs = list(revs)
67 revs = list(revs)
68 else:
68 else:
69 revs = []
69 revs = []
70
70
71 if not peer.capable('branchmap'):
71 if not peer.capable('branchmap'):
72 if branches:
72 if branches:
73 raise error.Abort(_("remote branch lookup not supported"))
73 raise error.Abort(_("remote branch lookup not supported"))
74 revs.append(hashbranch)
74 revs.append(hashbranch)
75 return revs, revs[0]
75 return revs, revs[0]
76 branchmap = peer.branchmap()
76 branchmap = peer.branchmap()
77
77
78 def primary(branch):
78 def primary(branch):
79 if branch == '.':
79 if branch == '.':
80 if not lrepo:
80 if not lrepo:
81 raise error.Abort(_("dirstate branch not accessible"))
81 raise error.Abort(_("dirstate branch not accessible"))
82 branch = lrepo.dirstate.branch()
82 branch = lrepo.dirstate.branch()
83 if branch in branchmap:
83 if branch in branchmap:
84 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
84 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
85 return True
85 return True
86 else:
86 else:
87 return False
87 return False
88
88
89 for branch in branches:
89 for branch in branches:
90 if not primary(branch):
90 if not primary(branch):
91 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
91 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
92 if hashbranch:
92 if hashbranch:
93 if not primary(hashbranch):
93 if not primary(hashbranch):
94 revs.append(hashbranch)
94 revs.append(hashbranch)
95 return revs, revs[0]
95 return revs, revs[0]
96
96
97 def parseurl(path, branches=None):
97 def parseurl(path, branches=None):
98 '''parse url#branch, returning (url, (branch, branches))'''
98 '''parse url#branch, returning (url, (branch, branches))'''
99
99
100 u = util.url(path)
100 u = util.url(path)
101 branch = None
101 branch = None
102 if u.fragment:
102 if u.fragment:
103 branch = u.fragment
103 branch = u.fragment
104 u.fragment = None
104 u.fragment = None
105 return str(u), (branch, branches or [])
105 return str(u), (branch, branches or [])
106
106
107 schemes = {
107 schemes = {
108 'bundle': bundlerepo,
108 'bundle': bundlerepo,
109 'union': unionrepo,
109 'union': unionrepo,
110 'file': _local,
110 'file': _local,
111 'http': httppeer,
111 'http': httppeer,
112 'https': httppeer,
112 'https': httppeer,
113 'ssh': sshpeer,
113 'ssh': sshpeer,
114 'static-http': statichttprepo,
114 'static-http': statichttprepo,
115 }
115 }
116
116
117 def _peerlookup(path):
117 def _peerlookup(path):
118 u = util.url(path)
118 u = util.url(path)
119 scheme = u.scheme or 'file'
119 scheme = u.scheme or 'file'
120 thing = schemes.get(scheme) or schemes['file']
120 thing = schemes.get(scheme) or schemes['file']
121 try:
121 try:
122 return thing(path)
122 return thing(path)
123 except TypeError:
123 except TypeError:
124 # we can't test callable(thing) because 'thing' can be an unloaded
124 # we can't test callable(thing) because 'thing' can be an unloaded
125 # module that implements __call__
125 # module that implements __call__
126 if not util.safehasattr(thing, 'instance'):
126 if not util.safehasattr(thing, 'instance'):
127 raise
127 raise
128 return thing
128 return thing
129
129
130 def islocal(repo):
130 def islocal(repo):
131 '''return true if repo (or path pointing to repo) is local'''
131 '''return true if repo (or path pointing to repo) is local'''
132 if isinstance(repo, str):
132 if isinstance(repo, str):
133 try:
133 try:
134 return _peerlookup(repo).islocal(repo)
134 return _peerlookup(repo).islocal(repo)
135 except AttributeError:
135 except AttributeError:
136 return False
136 return False
137 return repo.local()
137 return repo.local()
138
138
139 def openpath(ui, path):
139 def openpath(ui, path):
140 '''open path with open if local, url.open if remote'''
140 '''open path with open if local, url.open if remote'''
141 pathurl = util.url(path, parsequery=False, parsefragment=False)
141 pathurl = util.url(path, parsequery=False, parsefragment=False)
142 if pathurl.islocal():
142 if pathurl.islocal():
143 return util.posixfile(pathurl.localpath(), 'rb')
143 return util.posixfile(pathurl.localpath(), 'rb')
144 else:
144 else:
145 return url.open(ui, path)
145 return url.open(ui, path)
146
146
147 # a list of (ui, repo) functions called for wire peer initialization
147 # a list of (ui, repo) functions called for wire peer initialization
148 wirepeersetupfuncs = []
148 wirepeersetupfuncs = []
149
149
150 def _peerorrepo(ui, path, create=False):
150 def _peerorrepo(ui, path, create=False):
151 """return a repository object for the specified path"""
151 """return a repository object for the specified path"""
152 obj = _peerlookup(path).instance(ui, path, create)
152 obj = _peerlookup(path).instance(ui, path, create)
153 ui = getattr(obj, "ui", ui)
153 ui = getattr(obj, "ui", ui)
154 for name, module in extensions.extensions(ui):
154 for name, module in extensions.extensions(ui):
155 hook = getattr(module, 'reposetup', None)
155 hook = getattr(module, 'reposetup', None)
156 if hook:
156 if hook:
157 hook(ui, obj)
157 hook(ui, obj)
158 if not obj.local():
158 if not obj.local():
159 for f in wirepeersetupfuncs:
159 for f in wirepeersetupfuncs:
160 f(ui, obj)
160 f(ui, obj)
161 return obj
161 return obj
162
162
163 def repository(ui, path='', create=False):
163 def repository(ui, path='', create=False):
164 """return a repository object for the specified path"""
164 """return a repository object for the specified path"""
165 peer = _peerorrepo(ui, path, create)
165 peer = _peerorrepo(ui, path, create)
166 repo = peer.local()
166 repo = peer.local()
167 if not repo:
167 if not repo:
168 raise error.Abort(_("repository '%s' is not local") %
168 raise error.Abort(_("repository '%s' is not local") %
169 (path or peer.url()))
169 (path or peer.url()))
170 return repo.filtered('visible')
170 return repo.filtered('visible')
171
171
172 def peer(uiorrepo, opts, path, create=False):
172 def peer(uiorrepo, opts, path, create=False):
173 '''return a repository peer for the specified path'''
173 '''return a repository peer for the specified path'''
174 rui = remoteui(uiorrepo, opts)
174 rui = remoteui(uiorrepo, opts)
175 return _peerorrepo(rui, path, create).peer()
175 return _peerorrepo(rui, path, create).peer()
176
176
177 def defaultdest(source):
177 def defaultdest(source):
178 '''return default destination of clone if none is given
178 '''return default destination of clone if none is given
179
179
180 >>> defaultdest('foo')
180 >>> defaultdest('foo')
181 'foo'
181 'foo'
182 >>> defaultdest('/foo/bar')
182 >>> defaultdest('/foo/bar')
183 'bar'
183 'bar'
184 >>> defaultdest('/')
184 >>> defaultdest('/')
185 ''
185 ''
186 >>> defaultdest('')
186 >>> defaultdest('')
187 ''
187 ''
188 >>> defaultdest('http://example.org/')
188 >>> defaultdest('http://example.org/')
189 ''
189 ''
190 >>> defaultdest('http://example.org/foo/')
190 >>> defaultdest('http://example.org/foo/')
191 'foo'
191 'foo'
192 '''
192 '''
193 path = util.url(source).path
193 path = util.url(source).path
194 if not path:
194 if not path:
195 return ''
195 return ''
196 return os.path.basename(os.path.normpath(path))
196 return os.path.basename(os.path.normpath(path))
197
197
198 def share(ui, source, dest=None, update=True, bookmarks=True):
198 def share(ui, source, dest=None, update=True, bookmarks=True):
199 '''create a shared repository'''
199 '''create a shared repository'''
200
200
201 if not islocal(source):
201 if not islocal(source):
202 raise error.Abort(_('can only share local repositories'))
202 raise error.Abort(_('can only share local repositories'))
203
203
204 if not dest:
204 if not dest:
205 dest = defaultdest(source)
205 dest = defaultdest(source)
206 else:
206 else:
207 dest = ui.expandpath(dest)
207 dest = ui.expandpath(dest)
208
208
209 if isinstance(source, str):
209 if isinstance(source, str):
210 origsource = ui.expandpath(source)
210 origsource = ui.expandpath(source)
211 source, branches = parseurl(origsource)
211 source, branches = parseurl(origsource)
212 srcrepo = repository(ui, source)
212 srcrepo = repository(ui, source)
213 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
213 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
214 else:
214 else:
215 srcrepo = source.local()
215 srcrepo = source.local()
216 origsource = source = srcrepo.url()
216 origsource = source = srcrepo.url()
217 checkout = None
217 checkout = None
218
218
219 sharedpath = srcrepo.sharedpath # if our source is already sharing
219 sharedpath = srcrepo.sharedpath # if our source is already sharing
220
220
221 destwvfs = scmutil.vfs(dest, realpath=True)
221 destwvfs = scmutil.vfs(dest, realpath=True)
222 destvfs = scmutil.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
222 destvfs = scmutil.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
223
223
224 if destvfs.lexists():
224 if destvfs.lexists():
225 raise error.Abort(_('destination already exists'))
225 raise error.Abort(_('destination already exists'))
226
226
227 if not destwvfs.isdir():
227 if not destwvfs.isdir():
228 destwvfs.mkdir()
228 destwvfs.mkdir()
229 destvfs.makedir()
229 destvfs.makedir()
230
230
231 requirements = ''
231 requirements = ''
232 try:
232 try:
233 requirements = srcrepo.vfs.read('requires')
233 requirements = srcrepo.vfs.read('requires')
234 except IOError as inst:
234 except IOError as inst:
235 if inst.errno != errno.ENOENT:
235 if inst.errno != errno.ENOENT:
236 raise
236 raise
237
237
238 requirements += 'shared\n'
238 requirements += 'shared\n'
239 destvfs.write('requires', requirements)
239 destvfs.write('requires', requirements)
240 destvfs.write('sharedpath', sharedpath)
240 destvfs.write('sharedpath', sharedpath)
241
241
242 r = repository(ui, destwvfs.base)
242 r = repository(ui, destwvfs.base)
243 postshare(srcrepo, r, bookmarks=bookmarks)
243 postshare(srcrepo, r, bookmarks=bookmarks)
244 _postshareupdate(r, update, checkout=checkout)
244 _postshareupdate(r, update, checkout=checkout)
245
245
246 def postshare(sourcerepo, destrepo, bookmarks=True):
246 def postshare(sourcerepo, destrepo, bookmarks=True):
247 """Called after a new shared repo is created.
247 """Called after a new shared repo is created.
248
248
249 The new repo only has a requirements file and pointer to the source.
249 The new repo only has a requirements file and pointer to the source.
250 This function configures additional shared data.
250 This function configures additional shared data.
251
251
252 Extensions can wrap this function and write additional entries to
252 Extensions can wrap this function and write additional entries to
253 destrepo/.hg/shared to indicate additional pieces of data to be shared.
253 destrepo/.hg/shared to indicate additional pieces of data to be shared.
254 """
254 """
255 default = sourcerepo.ui.config('paths', 'default')
255 default = sourcerepo.ui.config('paths', 'default')
256 if default:
256 if default:
257 fp = destrepo.vfs("hgrc", "w", text=True)
257 fp = destrepo.vfs("hgrc", "w", text=True)
258 fp.write("[paths]\n")
258 fp.write("[paths]\n")
259 fp.write("default = %s\n" % default)
259 fp.write("default = %s\n" % default)
260 fp.close()
260 fp.close()
261
261
262 if bookmarks:
262 with destrepo.wlock():
263 fp = destrepo.vfs('shared', 'w')
263 if bookmarks:
264 fp.write(sharedbookmarks + '\n')
264 fp = destrepo.vfs('shared', 'w')
265 fp.close()
265 fp.write(sharedbookmarks + '\n')
266 fp.close()
266
267
267 def _postshareupdate(repo, update, checkout=None):
268 def _postshareupdate(repo, update, checkout=None):
268 """Maybe perform a working directory update after a shared repo is created.
269 """Maybe perform a working directory update after a shared repo is created.
269
270
270 ``update`` can be a boolean or a revision to update to.
271 ``update`` can be a boolean or a revision to update to.
271 """
272 """
272 if not update:
273 if not update:
273 return
274 return
274
275
275 repo.ui.status(_("updating working directory\n"))
276 repo.ui.status(_("updating working directory\n"))
276 if update is not True:
277 if update is not True:
277 checkout = update
278 checkout = update
278 for test in (checkout, 'default', 'tip'):
279 for test in (checkout, 'default', 'tip'):
279 if test is None:
280 if test is None:
280 continue
281 continue
281 try:
282 try:
282 uprev = repo.lookup(test)
283 uprev = repo.lookup(test)
283 break
284 break
284 except error.RepoLookupError:
285 except error.RepoLookupError:
285 continue
286 continue
286 _update(repo, uprev)
287 _update(repo, uprev)
287
288
288 def copystore(ui, srcrepo, destpath):
289 def copystore(ui, srcrepo, destpath):
289 '''copy files from store of srcrepo in destpath
290 '''copy files from store of srcrepo in destpath
290
291
291 returns destlock
292 returns destlock
292 '''
293 '''
293 destlock = None
294 destlock = None
294 try:
295 try:
295 hardlink = None
296 hardlink = None
296 num = 0
297 num = 0
297 closetopic = [None]
298 closetopic = [None]
298 def prog(topic, pos):
299 def prog(topic, pos):
299 if pos is None:
300 if pos is None:
300 closetopic[0] = topic
301 closetopic[0] = topic
301 else:
302 else:
302 ui.progress(topic, pos + num)
303 ui.progress(topic, pos + num)
303 srcpublishing = srcrepo.publishing()
304 srcpublishing = srcrepo.publishing()
304 srcvfs = scmutil.vfs(srcrepo.sharedpath)
305 srcvfs = scmutil.vfs(srcrepo.sharedpath)
305 dstvfs = scmutil.vfs(destpath)
306 dstvfs = scmutil.vfs(destpath)
306 for f in srcrepo.store.copylist():
307 for f in srcrepo.store.copylist():
307 if srcpublishing and f.endswith('phaseroots'):
308 if srcpublishing and f.endswith('phaseroots'):
308 continue
309 continue
309 dstbase = os.path.dirname(f)
310 dstbase = os.path.dirname(f)
310 if dstbase and not dstvfs.exists(dstbase):
311 if dstbase and not dstvfs.exists(dstbase):
311 dstvfs.mkdir(dstbase)
312 dstvfs.mkdir(dstbase)
312 if srcvfs.exists(f):
313 if srcvfs.exists(f):
313 if f.endswith('data'):
314 if f.endswith('data'):
314 # 'dstbase' may be empty (e.g. revlog format 0)
315 # 'dstbase' may be empty (e.g. revlog format 0)
315 lockfile = os.path.join(dstbase, "lock")
316 lockfile = os.path.join(dstbase, "lock")
316 # lock to avoid premature writing to the target
317 # lock to avoid premature writing to the target
317 destlock = lock.lock(dstvfs, lockfile)
318 destlock = lock.lock(dstvfs, lockfile)
318 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
319 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
319 hardlink, progress=prog)
320 hardlink, progress=prog)
320 num += n
321 num += n
321 if hardlink:
322 if hardlink:
322 ui.debug("linked %d files\n" % num)
323 ui.debug("linked %d files\n" % num)
323 if closetopic[0]:
324 if closetopic[0]:
324 ui.progress(closetopic[0], None)
325 ui.progress(closetopic[0], None)
325 else:
326 else:
326 ui.debug("copied %d files\n" % num)
327 ui.debug("copied %d files\n" % num)
327 if closetopic[0]:
328 if closetopic[0]:
328 ui.progress(closetopic[0], None)
329 ui.progress(closetopic[0], None)
329 return destlock
330 return destlock
330 except: # re-raises
331 except: # re-raises
331 release(destlock)
332 release(destlock)
332 raise
333 raise
333
334
334 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
335 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
335 rev=None, update=True, stream=False):
336 rev=None, update=True, stream=False):
336 """Perform a clone using a shared repo.
337 """Perform a clone using a shared repo.
337
338
338 The store for the repository will be located at <sharepath>/.hg. The
339 The store for the repository will be located at <sharepath>/.hg. The
339 specified revisions will be cloned or pulled from "source". A shared repo
340 specified revisions will be cloned or pulled from "source". A shared repo
340 will be created at "dest" and a working copy will be created if "update" is
341 will be created at "dest" and a working copy will be created if "update" is
341 True.
342 True.
342 """
343 """
343 revs = None
344 revs = None
344 if rev:
345 if rev:
345 if not srcpeer.capable('lookup'):
346 if not srcpeer.capable('lookup'):
346 raise error.Abort(_("src repository does not support "
347 raise error.Abort(_("src repository does not support "
347 "revision lookup and so doesn't "
348 "revision lookup and so doesn't "
348 "support clone by revision"))
349 "support clone by revision"))
349 revs = [srcpeer.lookup(r) for r in rev]
350 revs = [srcpeer.lookup(r) for r in rev]
350
351
351 # Obtain a lock before checking for or cloning the pooled repo otherwise
352 # Obtain a lock before checking for or cloning the pooled repo otherwise
352 # 2 clients may race creating or populating it.
353 # 2 clients may race creating or populating it.
353 pooldir = os.path.dirname(sharepath)
354 pooldir = os.path.dirname(sharepath)
354 # lock class requires the directory to exist.
355 # lock class requires the directory to exist.
355 try:
356 try:
356 util.makedir(pooldir, False)
357 util.makedir(pooldir, False)
357 except OSError as e:
358 except OSError as e:
358 if e.errno != errno.EEXIST:
359 if e.errno != errno.EEXIST:
359 raise
360 raise
360
361
361 poolvfs = scmutil.vfs(pooldir)
362 poolvfs = scmutil.vfs(pooldir)
362 basename = os.path.basename(sharepath)
363 basename = os.path.basename(sharepath)
363
364
364 with lock.lock(poolvfs, '%s.lock' % basename):
365 with lock.lock(poolvfs, '%s.lock' % basename):
365 if os.path.exists(sharepath):
366 if os.path.exists(sharepath):
366 ui.status(_('(sharing from existing pooled repository %s)\n') %
367 ui.status(_('(sharing from existing pooled repository %s)\n') %
367 basename)
368 basename)
368 else:
369 else:
369 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
370 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
370 # Always use pull mode because hardlinks in share mode don't work
371 # Always use pull mode because hardlinks in share mode don't work
371 # well. Never update because working copies aren't necessary in
372 # well. Never update because working copies aren't necessary in
372 # share mode.
373 # share mode.
373 clone(ui, peeropts, source, dest=sharepath, pull=True,
374 clone(ui, peeropts, source, dest=sharepath, pull=True,
374 rev=rev, update=False, stream=stream)
375 rev=rev, update=False, stream=stream)
375
376
376 sharerepo = repository(ui, path=sharepath)
377 sharerepo = repository(ui, path=sharepath)
377 share(ui, sharerepo, dest=dest, update=False, bookmarks=False)
378 share(ui, sharerepo, dest=dest, update=False, bookmarks=False)
378
379
379 # We need to perform a pull against the dest repo to fetch bookmarks
380 # We need to perform a pull against the dest repo to fetch bookmarks
380 # and other non-store data that isn't shared by default. In the case of
381 # and other non-store data that isn't shared by default. In the case of
381 # non-existing shared repo, this means we pull from the remote twice. This
382 # non-existing shared repo, this means we pull from the remote twice. This
382 # is a bit weird. But at the time it was implemented, there wasn't an easy
383 # is a bit weird. But at the time it was implemented, there wasn't an easy
383 # way to pull just non-changegroup data.
384 # way to pull just non-changegroup data.
384 destrepo = repository(ui, path=dest)
385 destrepo = repository(ui, path=dest)
385 exchange.pull(destrepo, srcpeer, heads=revs)
386 exchange.pull(destrepo, srcpeer, heads=revs)
386
387
387 _postshareupdate(destrepo, update)
388 _postshareupdate(destrepo, update)
388
389
389 return srcpeer, peer(ui, peeropts, dest)
390 return srcpeer, peer(ui, peeropts, dest)
390
391
391 def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
392 def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
392 update=True, stream=False, branch=None, shareopts=None):
393 update=True, stream=False, branch=None, shareopts=None):
393 """Make a copy of an existing repository.
394 """Make a copy of an existing repository.
394
395
395 Create a copy of an existing repository in a new directory. The
396 Create a copy of an existing repository in a new directory. The
396 source and destination are URLs, as passed to the repository
397 source and destination are URLs, as passed to the repository
397 function. Returns a pair of repository peers, the source and
398 function. Returns a pair of repository peers, the source and
398 newly created destination.
399 newly created destination.
399
400
400 The location of the source is added to the new repository's
401 The location of the source is added to the new repository's
401 .hg/hgrc file, as the default to be used for future pulls and
402 .hg/hgrc file, as the default to be used for future pulls and
402 pushes.
403 pushes.
403
404
404 If an exception is raised, the partly cloned/updated destination
405 If an exception is raised, the partly cloned/updated destination
405 repository will be deleted.
406 repository will be deleted.
406
407
407 Arguments:
408 Arguments:
408
409
409 source: repository object or URL
410 source: repository object or URL
410
411
411 dest: URL of destination repository to create (defaults to base
412 dest: URL of destination repository to create (defaults to base
412 name of source repository)
413 name of source repository)
413
414
414 pull: always pull from source repository, even in local case or if the
415 pull: always pull from source repository, even in local case or if the
415 server prefers streaming
416 server prefers streaming
416
417
417 stream: stream raw data uncompressed from repository (fast over
418 stream: stream raw data uncompressed from repository (fast over
418 LAN, slow over WAN)
419 LAN, slow over WAN)
419
420
420 rev: revision to clone up to (implies pull=True)
421 rev: revision to clone up to (implies pull=True)
421
422
422 update: update working directory after clone completes, if
423 update: update working directory after clone completes, if
423 destination is local repository (True means update to default rev,
424 destination is local repository (True means update to default rev,
424 anything else is treated as a revision)
425 anything else is treated as a revision)
425
426
426 branch: branches to clone
427 branch: branches to clone
427
428
428 shareopts: dict of options to control auto sharing behavior. The "pool" key
429 shareopts: dict of options to control auto sharing behavior. The "pool" key
429 activates auto sharing mode and defines the directory for stores. The
430 activates auto sharing mode and defines the directory for stores. The
430 "mode" key determines how to construct the directory name of the shared
431 "mode" key determines how to construct the directory name of the shared
431 repository. "identity" means the name is derived from the node of the first
432 repository. "identity" means the name is derived from the node of the first
432 changeset in the repository. "remote" means the name is derived from the
433 changeset in the repository. "remote" means the name is derived from the
433 remote's path/URL. Defaults to "identity."
434 remote's path/URL. Defaults to "identity."
434 """
435 """
435
436
436 if isinstance(source, str):
437 if isinstance(source, str):
437 origsource = ui.expandpath(source)
438 origsource = ui.expandpath(source)
438 source, branch = parseurl(origsource, branch)
439 source, branch = parseurl(origsource, branch)
439 srcpeer = peer(ui, peeropts, source)
440 srcpeer = peer(ui, peeropts, source)
440 else:
441 else:
441 srcpeer = source.peer() # in case we were called with a localrepo
442 srcpeer = source.peer() # in case we were called with a localrepo
442 branch = (None, branch or [])
443 branch = (None, branch or [])
443 origsource = source = srcpeer.url()
444 origsource = source = srcpeer.url()
444 rev, checkout = addbranchrevs(srcpeer, srcpeer, branch, rev)
445 rev, checkout = addbranchrevs(srcpeer, srcpeer, branch, rev)
445
446
446 if dest is None:
447 if dest is None:
447 dest = defaultdest(source)
448 dest = defaultdest(source)
448 if dest:
449 if dest:
449 ui.status(_("destination directory: %s\n") % dest)
450 ui.status(_("destination directory: %s\n") % dest)
450 else:
451 else:
451 dest = ui.expandpath(dest)
452 dest = ui.expandpath(dest)
452
453
453 dest = util.urllocalpath(dest)
454 dest = util.urllocalpath(dest)
454 source = util.urllocalpath(source)
455 source = util.urllocalpath(source)
455
456
456 if not dest:
457 if not dest:
457 raise error.Abort(_("empty destination path is not valid"))
458 raise error.Abort(_("empty destination path is not valid"))
458
459
459 destvfs = scmutil.vfs(dest, expandpath=True)
460 destvfs = scmutil.vfs(dest, expandpath=True)
460 if destvfs.lexists():
461 if destvfs.lexists():
461 if not destvfs.isdir():
462 if not destvfs.isdir():
462 raise error.Abort(_("destination '%s' already exists") % dest)
463 raise error.Abort(_("destination '%s' already exists") % dest)
463 elif destvfs.listdir():
464 elif destvfs.listdir():
464 raise error.Abort(_("destination '%s' is not empty") % dest)
465 raise error.Abort(_("destination '%s' is not empty") % dest)
465
466
466 shareopts = shareopts or {}
467 shareopts = shareopts or {}
467 sharepool = shareopts.get('pool')
468 sharepool = shareopts.get('pool')
468 sharenamemode = shareopts.get('mode')
469 sharenamemode = shareopts.get('mode')
469 if sharepool and islocal(dest):
470 if sharepool and islocal(dest):
470 sharepath = None
471 sharepath = None
471 if sharenamemode == 'identity':
472 if sharenamemode == 'identity':
472 # Resolve the name from the initial changeset in the remote
473 # Resolve the name from the initial changeset in the remote
473 # repository. This returns nullid when the remote is empty. It
474 # repository. This returns nullid when the remote is empty. It
474 # raises RepoLookupError if revision 0 is filtered or otherwise
475 # raises RepoLookupError if revision 0 is filtered or otherwise
475 # not available. If we fail to resolve, sharing is not enabled.
476 # not available. If we fail to resolve, sharing is not enabled.
476 try:
477 try:
477 rootnode = srcpeer.lookup('0')
478 rootnode = srcpeer.lookup('0')
478 if rootnode != node.nullid:
479 if rootnode != node.nullid:
479 sharepath = os.path.join(sharepool, node.hex(rootnode))
480 sharepath = os.path.join(sharepool, node.hex(rootnode))
480 else:
481 else:
481 ui.status(_('(not using pooled storage: '
482 ui.status(_('(not using pooled storage: '
482 'remote appears to be empty)\n'))
483 'remote appears to be empty)\n'))
483 except error.RepoLookupError:
484 except error.RepoLookupError:
484 ui.status(_('(not using pooled storage: '
485 ui.status(_('(not using pooled storage: '
485 'unable to resolve identity of remote)\n'))
486 'unable to resolve identity of remote)\n'))
486 elif sharenamemode == 'remote':
487 elif sharenamemode == 'remote':
487 sharepath = os.path.join(
488 sharepath = os.path.join(
488 sharepool, hashlib.sha1(source).hexdigest())
489 sharepool, hashlib.sha1(source).hexdigest())
489 else:
490 else:
490 raise error.Abort(_('unknown share naming mode: %s') %
491 raise error.Abort(_('unknown share naming mode: %s') %
491 sharenamemode)
492 sharenamemode)
492
493
493 if sharepath:
494 if sharepath:
494 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
495 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
495 dest, pull=pull, rev=rev, update=update,
496 dest, pull=pull, rev=rev, update=update,
496 stream=stream)
497 stream=stream)
497
498
498 srclock = destlock = cleandir = None
499 srclock = destlock = cleandir = None
499 srcrepo = srcpeer.local()
500 srcrepo = srcpeer.local()
500 try:
501 try:
501 abspath = origsource
502 abspath = origsource
502 if islocal(origsource):
503 if islocal(origsource):
503 abspath = os.path.abspath(util.urllocalpath(origsource))
504 abspath = os.path.abspath(util.urllocalpath(origsource))
504
505
505 if islocal(dest):
506 if islocal(dest):
506 cleandir = dest
507 cleandir = dest
507
508
508 copy = False
509 copy = False
509 if (srcrepo and srcrepo.cancopy() and islocal(dest)
510 if (srcrepo and srcrepo.cancopy() and islocal(dest)
510 and not phases.hassecret(srcrepo)):
511 and not phases.hassecret(srcrepo)):
511 copy = not pull and not rev
512 copy = not pull and not rev
512
513
513 if copy:
514 if copy:
514 try:
515 try:
515 # we use a lock here because if we race with commit, we
516 # we use a lock here because if we race with commit, we
516 # can end up with extra data in the cloned revlogs that's
517 # can end up with extra data in the cloned revlogs that's
517 # not pointed to by changesets, thus causing verify to
518 # not pointed to by changesets, thus causing verify to
518 # fail
519 # fail
519 srclock = srcrepo.lock(wait=False)
520 srclock = srcrepo.lock(wait=False)
520 except error.LockError:
521 except error.LockError:
521 copy = False
522 copy = False
522
523
523 if copy:
524 if copy:
524 srcrepo.hook('preoutgoing', throw=True, source='clone')
525 srcrepo.hook('preoutgoing', throw=True, source='clone')
525 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
526 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
526 if not os.path.exists(dest):
527 if not os.path.exists(dest):
527 os.mkdir(dest)
528 os.mkdir(dest)
528 else:
529 else:
529 # only clean up directories we create ourselves
530 # only clean up directories we create ourselves
530 cleandir = hgdir
531 cleandir = hgdir
531 try:
532 try:
532 destpath = hgdir
533 destpath = hgdir
533 util.makedir(destpath, notindexed=True)
534 util.makedir(destpath, notindexed=True)
534 except OSError as inst:
535 except OSError as inst:
535 if inst.errno == errno.EEXIST:
536 if inst.errno == errno.EEXIST:
536 cleandir = None
537 cleandir = None
537 raise error.Abort(_("destination '%s' already exists")
538 raise error.Abort(_("destination '%s' already exists")
538 % dest)
539 % dest)
539 raise
540 raise
540
541
541 destlock = copystore(ui, srcrepo, destpath)
542 destlock = copystore(ui, srcrepo, destpath)
542 # copy bookmarks over
543 # copy bookmarks over
543 srcbookmarks = srcrepo.join('bookmarks')
544 srcbookmarks = srcrepo.join('bookmarks')
544 dstbookmarks = os.path.join(destpath, 'bookmarks')
545 dstbookmarks = os.path.join(destpath, 'bookmarks')
545 if os.path.exists(srcbookmarks):
546 if os.path.exists(srcbookmarks):
546 util.copyfile(srcbookmarks, dstbookmarks)
547 util.copyfile(srcbookmarks, dstbookmarks)
547
548
548 # Recomputing branch cache might be slow on big repos,
549 # Recomputing branch cache might be slow on big repos,
549 # so just copy it
550 # so just copy it
550 def copybranchcache(fname):
551 def copybranchcache(fname):
551 srcbranchcache = srcrepo.join('cache/%s' % fname)
552 srcbranchcache = srcrepo.join('cache/%s' % fname)
552 dstbranchcache = os.path.join(dstcachedir, fname)
553 dstbranchcache = os.path.join(dstcachedir, fname)
553 if os.path.exists(srcbranchcache):
554 if os.path.exists(srcbranchcache):
554 if not os.path.exists(dstcachedir):
555 if not os.path.exists(dstcachedir):
555 os.mkdir(dstcachedir)
556 os.mkdir(dstcachedir)
556 util.copyfile(srcbranchcache, dstbranchcache)
557 util.copyfile(srcbranchcache, dstbranchcache)
557
558
558 dstcachedir = os.path.join(destpath, 'cache')
559 dstcachedir = os.path.join(destpath, 'cache')
559 # In local clones we're copying all nodes, not just served
560 # In local clones we're copying all nodes, not just served
560 # ones. Therefore copy all branch caches over.
561 # ones. Therefore copy all branch caches over.
561 copybranchcache('branch2')
562 copybranchcache('branch2')
562 for cachename in repoview.filtertable:
563 for cachename in repoview.filtertable:
563 copybranchcache('branch2-%s' % cachename)
564 copybranchcache('branch2-%s' % cachename)
564
565
565 # we need to re-init the repo after manually copying the data
566 # we need to re-init the repo after manually copying the data
566 # into it
567 # into it
567 destpeer = peer(srcrepo, peeropts, dest)
568 destpeer = peer(srcrepo, peeropts, dest)
568 srcrepo.hook('outgoing', source='clone',
569 srcrepo.hook('outgoing', source='clone',
569 node=node.hex(node.nullid))
570 node=node.hex(node.nullid))
570 else:
571 else:
571 try:
572 try:
572 destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
573 destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
573 # only pass ui when no srcrepo
574 # only pass ui when no srcrepo
574 except OSError as inst:
575 except OSError as inst:
575 if inst.errno == errno.EEXIST:
576 if inst.errno == errno.EEXIST:
576 cleandir = None
577 cleandir = None
577 raise error.Abort(_("destination '%s' already exists")
578 raise error.Abort(_("destination '%s' already exists")
578 % dest)
579 % dest)
579 raise
580 raise
580
581
581 revs = None
582 revs = None
582 if rev:
583 if rev:
583 if not srcpeer.capable('lookup'):
584 if not srcpeer.capable('lookup'):
584 raise error.Abort(_("src repository does not support "
585 raise error.Abort(_("src repository does not support "
585 "revision lookup and so doesn't "
586 "revision lookup and so doesn't "
586 "support clone by revision"))
587 "support clone by revision"))
587 revs = [srcpeer.lookup(r) for r in rev]
588 revs = [srcpeer.lookup(r) for r in rev]
588 checkout = revs[0]
589 checkout = revs[0]
589 local = destpeer.local()
590 local = destpeer.local()
590 if local:
591 if local:
591 if not stream:
592 if not stream:
592 if pull:
593 if pull:
593 stream = False
594 stream = False
594 else:
595 else:
595 stream = None
596 stream = None
596 # internal config: ui.quietbookmarkmove
597 # internal config: ui.quietbookmarkmove
597 quiet = local.ui.backupconfig('ui', 'quietbookmarkmove')
598 quiet = local.ui.backupconfig('ui', 'quietbookmarkmove')
598 try:
599 try:
599 local.ui.setconfig(
600 local.ui.setconfig(
600 'ui', 'quietbookmarkmove', True, 'clone')
601 'ui', 'quietbookmarkmove', True, 'clone')
601 exchange.pull(local, srcpeer, revs,
602 exchange.pull(local, srcpeer, revs,
602 streamclonerequested=stream)
603 streamclonerequested=stream)
603 finally:
604 finally:
604 local.ui.restoreconfig(quiet)
605 local.ui.restoreconfig(quiet)
605 elif srcrepo:
606 elif srcrepo:
606 exchange.push(srcrepo, destpeer, revs=revs,
607 exchange.push(srcrepo, destpeer, revs=revs,
607 bookmarks=srcrepo._bookmarks.keys())
608 bookmarks=srcrepo._bookmarks.keys())
608 else:
609 else:
609 raise error.Abort(_("clone from remote to remote not supported")
610 raise error.Abort(_("clone from remote to remote not supported")
610 )
611 )
611
612
612 cleandir = None
613 cleandir = None
613
614
614 destrepo = destpeer.local()
615 destrepo = destpeer.local()
615 if destrepo:
616 if destrepo:
616 template = uimod.samplehgrcs['cloned']
617 template = uimod.samplehgrcs['cloned']
617 fp = destrepo.vfs("hgrc", "w", text=True)
618 fp = destrepo.vfs("hgrc", "w", text=True)
618 u = util.url(abspath)
619 u = util.url(abspath)
619 u.passwd = None
620 u.passwd = None
620 defaulturl = str(u)
621 defaulturl = str(u)
621 fp.write(template % defaulturl)
622 fp.write(template % defaulturl)
622 fp.close()
623 fp.close()
623
624
624 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
625 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
625
626
626 if update:
627 if update:
627 if update is not True:
628 if update is not True:
628 checkout = srcpeer.lookup(update)
629 checkout = srcpeer.lookup(update)
629 uprev = None
630 uprev = None
630 status = None
631 status = None
631 if checkout is not None:
632 if checkout is not None:
632 try:
633 try:
633 uprev = destrepo.lookup(checkout)
634 uprev = destrepo.lookup(checkout)
634 except error.RepoLookupError:
635 except error.RepoLookupError:
635 if update is not True:
636 if update is not True:
636 try:
637 try:
637 uprev = destrepo.lookup(update)
638 uprev = destrepo.lookup(update)
638 except error.RepoLookupError:
639 except error.RepoLookupError:
639 pass
640 pass
640 if uprev is None:
641 if uprev is None:
641 try:
642 try:
642 uprev = destrepo._bookmarks['@']
643 uprev = destrepo._bookmarks['@']
643 update = '@'
644 update = '@'
644 bn = destrepo[uprev].branch()
645 bn = destrepo[uprev].branch()
645 if bn == 'default':
646 if bn == 'default':
646 status = _("updating to bookmark @\n")
647 status = _("updating to bookmark @\n")
647 else:
648 else:
648 status = (_("updating to bookmark @ on branch %s\n")
649 status = (_("updating to bookmark @ on branch %s\n")
649 % bn)
650 % bn)
650 except KeyError:
651 except KeyError:
651 try:
652 try:
652 uprev = destrepo.branchtip('default')
653 uprev = destrepo.branchtip('default')
653 except error.RepoLookupError:
654 except error.RepoLookupError:
654 uprev = destrepo.lookup('tip')
655 uprev = destrepo.lookup('tip')
655 if not status:
656 if not status:
656 bn = destrepo[uprev].branch()
657 bn = destrepo[uprev].branch()
657 status = _("updating to branch %s\n") % bn
658 status = _("updating to branch %s\n") % bn
658 destrepo.ui.status(status)
659 destrepo.ui.status(status)
659 _update(destrepo, uprev)
660 _update(destrepo, uprev)
660 if update in destrepo._bookmarks:
661 if update in destrepo._bookmarks:
661 bookmarks.activate(destrepo, update)
662 bookmarks.activate(destrepo, update)
662 finally:
663 finally:
663 release(srclock, destlock)
664 release(srclock, destlock)
664 if cleandir is not None:
665 if cleandir is not None:
665 shutil.rmtree(cleandir, True)
666 shutil.rmtree(cleandir, True)
666 if srcpeer is not None:
667 if srcpeer is not None:
667 srcpeer.close()
668 srcpeer.close()
668 return srcpeer, destpeer
669 return srcpeer, destpeer
669
670
670 def _showstats(repo, stats, quietempty=False):
671 def _showstats(repo, stats, quietempty=False):
671 if quietempty and not any(stats):
672 if quietempty and not any(stats):
672 return
673 return
673 repo.ui.status(_("%d files updated, %d files merged, "
674 repo.ui.status(_("%d files updated, %d files merged, "
674 "%d files removed, %d files unresolved\n") % stats)
675 "%d files removed, %d files unresolved\n") % stats)
675
676
676 def updaterepo(repo, node, overwrite):
677 def updaterepo(repo, node, overwrite):
677 """Update the working directory to node.
678 """Update the working directory to node.
678
679
679 When overwrite is set, changes are clobbered, merged else
680 When overwrite is set, changes are clobbered, merged else
680
681
681 returns stats (see pydoc mercurial.merge.applyupdates)"""
682 returns stats (see pydoc mercurial.merge.applyupdates)"""
682 return mergemod.update(repo, node, False, overwrite,
683 return mergemod.update(repo, node, False, overwrite,
683 labels=['working copy', 'destination'])
684 labels=['working copy', 'destination'])
684
685
685 def update(repo, node, quietempty=False):
686 def update(repo, node, quietempty=False):
686 """update the working directory to node, merging linear changes"""
687 """update the working directory to node, merging linear changes"""
687 stats = updaterepo(repo, node, False)
688 stats = updaterepo(repo, node, False)
688 _showstats(repo, stats, quietempty)
689 _showstats(repo, stats, quietempty)
689 if stats[3]:
690 if stats[3]:
690 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
691 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
691 return stats[3] > 0
692 return stats[3] > 0
692
693
693 # naming conflict in clone()
694 # naming conflict in clone()
694 _update = update
695 _update = update
695
696
696 def clean(repo, node, show_stats=True, quietempty=False):
697 def clean(repo, node, show_stats=True, quietempty=False):
697 """forcibly switch the working directory to node, clobbering changes"""
698 """forcibly switch the working directory to node, clobbering changes"""
698 stats = updaterepo(repo, node, True)
699 stats = updaterepo(repo, node, True)
699 util.unlinkpath(repo.join('graftstate'), ignoremissing=True)
700 util.unlinkpath(repo.join('graftstate'), ignoremissing=True)
700 if show_stats:
701 if show_stats:
701 _showstats(repo, stats, quietempty)
702 _showstats(repo, stats, quietempty)
702 return stats[3] > 0
703 return stats[3] > 0
703
704
704 # naming conflict in updatetotally()
705 # naming conflict in updatetotally()
705 _clean = clean
706 _clean = clean
706
707
707 def updatetotally(ui, repo, checkout, brev, clean=False, check=False):
708 def updatetotally(ui, repo, checkout, brev, clean=False, check=False):
708 """Update the working directory with extra care for non-file components
709 """Update the working directory with extra care for non-file components
709
710
710 This takes care of non-file components below:
711 This takes care of non-file components below:
711
712
712 :bookmark: might be advanced or (in)activated
713 :bookmark: might be advanced or (in)activated
713
714
714 This takes arguments below:
715 This takes arguments below:
715
716
716 :checkout: to which revision the working directory is updated
717 :checkout: to which revision the working directory is updated
717 :brev: a name, which might be a bookmark to be activated after updating
718 :brev: a name, which might be a bookmark to be activated after updating
718 :clean: whether changes in the working directory can be discarded
719 :clean: whether changes in the working directory can be discarded
719 :check: whether changes in the working directory should be checked
720 :check: whether changes in the working directory should be checked
720
721
721 This returns whether conflict is detected at updating or not.
722 This returns whether conflict is detected at updating or not.
722 """
723 """
723 with repo.wlock():
724 with repo.wlock():
724 movemarkfrom = None
725 movemarkfrom = None
725 warndest = False
726 warndest = False
726 if checkout is None:
727 if checkout is None:
727 updata = destutil.destupdate(repo, clean=clean, check=check)
728 updata = destutil.destupdate(repo, clean=clean, check=check)
728 checkout, movemarkfrom, brev = updata
729 checkout, movemarkfrom, brev = updata
729 warndest = True
730 warndest = True
730
731
731 if clean:
732 if clean:
732 ret = _clean(repo, checkout)
733 ret = _clean(repo, checkout)
733 else:
734 else:
734 ret = _update(repo, checkout)
735 ret = _update(repo, checkout)
735
736
736 if not ret and movemarkfrom:
737 if not ret and movemarkfrom:
737 if movemarkfrom == repo['.'].node():
738 if movemarkfrom == repo['.'].node():
738 pass # no-op update
739 pass # no-op update
739 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
740 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
740 ui.status(_("updating bookmark %s\n") % repo._activebookmark)
741 ui.status(_("updating bookmark %s\n") % repo._activebookmark)
741 else:
742 else:
742 # this can happen with a non-linear update
743 # this can happen with a non-linear update
743 ui.status(_("(leaving bookmark %s)\n") %
744 ui.status(_("(leaving bookmark %s)\n") %
744 repo._activebookmark)
745 repo._activebookmark)
745 bookmarks.deactivate(repo)
746 bookmarks.deactivate(repo)
746 elif brev in repo._bookmarks:
747 elif brev in repo._bookmarks:
747 if brev != repo._activebookmark:
748 if brev != repo._activebookmark:
748 ui.status(_("(activating bookmark %s)\n") % brev)
749 ui.status(_("(activating bookmark %s)\n") % brev)
749 bookmarks.activate(repo, brev)
750 bookmarks.activate(repo, brev)
750 elif brev:
751 elif brev:
751 if repo._activebookmark:
752 if repo._activebookmark:
752 ui.status(_("(leaving bookmark %s)\n") %
753 ui.status(_("(leaving bookmark %s)\n") %
753 repo._activebookmark)
754 repo._activebookmark)
754 bookmarks.deactivate(repo)
755 bookmarks.deactivate(repo)
755
756
756 if warndest:
757 if warndest:
757 destutil.statusotherdests(ui, repo)
758 destutil.statusotherdests(ui, repo)
758
759
759 return ret
760 return ret
760
761
761 def merge(repo, node, force=None, remind=True, mergeforce=False):
762 def merge(repo, node, force=None, remind=True, mergeforce=False):
762 """Branch merge with node, resolving changes. Return true if any
763 """Branch merge with node, resolving changes. Return true if any
763 unresolved conflicts."""
764 unresolved conflicts."""
764 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce)
765 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce)
765 _showstats(repo, stats)
766 _showstats(repo, stats)
766 if stats[3]:
767 if stats[3]:
767 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
768 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
768 "or 'hg update -C .' to abandon\n"))
769 "or 'hg update -C .' to abandon\n"))
769 elif remind:
770 elif remind:
770 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
771 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
771 return stats[3] > 0
772 return stats[3] > 0
772
773
773 def _incoming(displaychlist, subreporecurse, ui, repo, source,
774 def _incoming(displaychlist, subreporecurse, ui, repo, source,
774 opts, buffered=False):
775 opts, buffered=False):
775 """
776 """
776 Helper for incoming / gincoming.
777 Helper for incoming / gincoming.
777 displaychlist gets called with
778 displaychlist gets called with
778 (remoterepo, incomingchangesetlist, displayer) parameters,
779 (remoterepo, incomingchangesetlist, displayer) parameters,
779 and is supposed to contain only code that can't be unified.
780 and is supposed to contain only code that can't be unified.
780 """
781 """
781 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
782 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
782 other = peer(repo, opts, source)
783 other = peer(repo, opts, source)
783 ui.status(_('comparing with %s\n') % util.hidepassword(source))
784 ui.status(_('comparing with %s\n') % util.hidepassword(source))
784 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
785 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
785
786
786 if revs:
787 if revs:
787 revs = [other.lookup(rev) for rev in revs]
788 revs = [other.lookup(rev) for rev in revs]
788 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
789 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
789 revs, opts["bundle"], opts["force"])
790 revs, opts["bundle"], opts["force"])
790 try:
791 try:
791 if not chlist:
792 if not chlist:
792 ui.status(_("no changes found\n"))
793 ui.status(_("no changes found\n"))
793 return subreporecurse()
794 return subreporecurse()
794
795
795 displayer = cmdutil.show_changeset(ui, other, opts, buffered)
796 displayer = cmdutil.show_changeset(ui, other, opts, buffered)
796 displaychlist(other, chlist, displayer)
797 displaychlist(other, chlist, displayer)
797 displayer.close()
798 displayer.close()
798 finally:
799 finally:
799 cleanupfn()
800 cleanupfn()
800 subreporecurse()
801 subreporecurse()
801 return 0 # exit code is zero since we found incoming changes
802 return 0 # exit code is zero since we found incoming changes
802
803
803 def incoming(ui, repo, source, opts):
804 def incoming(ui, repo, source, opts):
804 def subreporecurse():
805 def subreporecurse():
805 ret = 1
806 ret = 1
806 if opts.get('subrepos'):
807 if opts.get('subrepos'):
807 ctx = repo[None]
808 ctx = repo[None]
808 for subpath in sorted(ctx.substate):
809 for subpath in sorted(ctx.substate):
809 sub = ctx.sub(subpath)
810 sub = ctx.sub(subpath)
810 ret = min(ret, sub.incoming(ui, source, opts))
811 ret = min(ret, sub.incoming(ui, source, opts))
811 return ret
812 return ret
812
813
813 def display(other, chlist, displayer):
814 def display(other, chlist, displayer):
814 limit = cmdutil.loglimit(opts)
815 limit = cmdutil.loglimit(opts)
815 if opts.get('newest_first'):
816 if opts.get('newest_first'):
816 chlist.reverse()
817 chlist.reverse()
817 count = 0
818 count = 0
818 for n in chlist:
819 for n in chlist:
819 if limit is not None and count >= limit:
820 if limit is not None and count >= limit:
820 break
821 break
821 parents = [p for p in other.changelog.parents(n) if p != nullid]
822 parents = [p for p in other.changelog.parents(n) if p != nullid]
822 if opts.get('no_merges') and len(parents) == 2:
823 if opts.get('no_merges') and len(parents) == 2:
823 continue
824 continue
824 count += 1
825 count += 1
825 displayer.show(other[n])
826 displayer.show(other[n])
826 return _incoming(display, subreporecurse, ui, repo, source, opts)
827 return _incoming(display, subreporecurse, ui, repo, source, opts)
827
828
828 def _outgoing(ui, repo, dest, opts):
829 def _outgoing(ui, repo, dest, opts):
829 dest = ui.expandpath(dest or 'default-push', dest or 'default')
830 dest = ui.expandpath(dest or 'default-push', dest or 'default')
830 dest, branches = parseurl(dest, opts.get('branch'))
831 dest, branches = parseurl(dest, opts.get('branch'))
831 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
832 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
832 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
833 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
833 if revs:
834 if revs:
834 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
835 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
835
836
836 other = peer(repo, opts, dest)
837 other = peer(repo, opts, dest)
837 outgoing = discovery.findcommonoutgoing(repo.unfiltered(), other, revs,
838 outgoing = discovery.findcommonoutgoing(repo.unfiltered(), other, revs,
838 force=opts.get('force'))
839 force=opts.get('force'))
839 o = outgoing.missing
840 o = outgoing.missing
840 if not o:
841 if not o:
841 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
842 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
842 return o, other
843 return o, other
843
844
844 def outgoing(ui, repo, dest, opts):
845 def outgoing(ui, repo, dest, opts):
845 def recurse():
846 def recurse():
846 ret = 1
847 ret = 1
847 if opts.get('subrepos'):
848 if opts.get('subrepos'):
848 ctx = repo[None]
849 ctx = repo[None]
849 for subpath in sorted(ctx.substate):
850 for subpath in sorted(ctx.substate):
850 sub = ctx.sub(subpath)
851 sub = ctx.sub(subpath)
851 ret = min(ret, sub.outgoing(ui, dest, opts))
852 ret = min(ret, sub.outgoing(ui, dest, opts))
852 return ret
853 return ret
853
854
854 limit = cmdutil.loglimit(opts)
855 limit = cmdutil.loglimit(opts)
855 o, other = _outgoing(ui, repo, dest, opts)
856 o, other = _outgoing(ui, repo, dest, opts)
856 if not o:
857 if not o:
857 cmdutil.outgoinghooks(ui, repo, other, opts, o)
858 cmdutil.outgoinghooks(ui, repo, other, opts, o)
858 return recurse()
859 return recurse()
859
860
860 if opts.get('newest_first'):
861 if opts.get('newest_first'):
861 o.reverse()
862 o.reverse()
862 displayer = cmdutil.show_changeset(ui, repo, opts)
863 displayer = cmdutil.show_changeset(ui, repo, opts)
863 count = 0
864 count = 0
864 for n in o:
865 for n in o:
865 if limit is not None and count >= limit:
866 if limit is not None and count >= limit:
866 break
867 break
867 parents = [p for p in repo.changelog.parents(n) if p != nullid]
868 parents = [p for p in repo.changelog.parents(n) if p != nullid]
868 if opts.get('no_merges') and len(parents) == 2:
869 if opts.get('no_merges') and len(parents) == 2:
869 continue
870 continue
870 count += 1
871 count += 1
871 displayer.show(repo[n])
872 displayer.show(repo[n])
872 displayer.close()
873 displayer.close()
873 cmdutil.outgoinghooks(ui, repo, other, opts, o)
874 cmdutil.outgoinghooks(ui, repo, other, opts, o)
874 recurse()
875 recurse()
875 return 0 # exit code is zero since we found outgoing changes
876 return 0 # exit code is zero since we found outgoing changes
876
877
877 def verify(repo):
878 def verify(repo):
878 """verify the consistency of a repository"""
879 """verify the consistency of a repository"""
879 ret = verifymod.verify(repo)
880 ret = verifymod.verify(repo)
880
881
881 # Broken subrepo references in hidden csets don't seem worth worrying about,
882 # Broken subrepo references in hidden csets don't seem worth worrying about,
882 # since they can't be pushed/pulled, and --hidden can be used if they are a
883 # since they can't be pushed/pulled, and --hidden can be used if they are a
883 # concern.
884 # concern.
884
885
885 # pathto() is needed for -R case
886 # pathto() is needed for -R case
886 revs = repo.revs("filelog(%s)",
887 revs = repo.revs("filelog(%s)",
887 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
888 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
888
889
889 if revs:
890 if revs:
890 repo.ui.status(_('checking subrepo links\n'))
891 repo.ui.status(_('checking subrepo links\n'))
891 for rev in revs:
892 for rev in revs:
892 ctx = repo[rev]
893 ctx = repo[rev]
893 try:
894 try:
894 for subpath in ctx.substate:
895 for subpath in ctx.substate:
895 try:
896 try:
896 ret = (ctx.sub(subpath, allowcreate=False).verify()
897 ret = (ctx.sub(subpath, allowcreate=False).verify()
897 or ret)
898 or ret)
898 except error.RepoError as e:
899 except error.RepoError as e:
899 repo.ui.warn(('%s: %s\n') % (rev, e))
900 repo.ui.warn(('%s: %s\n') % (rev, e))
900 except Exception:
901 except Exception:
901 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
902 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
902 node.short(ctx.node()))
903 node.short(ctx.node()))
903
904
904 return ret
905 return ret
905
906
906 def remoteui(src, opts):
907 def remoteui(src, opts):
907 'build a remote ui from ui or repo and opts'
908 'build a remote ui from ui or repo and opts'
908 if util.safehasattr(src, 'baseui'): # looks like a repository
909 if util.safehasattr(src, 'baseui'): # looks like a repository
909 dst = src.baseui.copy() # drop repo-specific config
910 dst = src.baseui.copy() # drop repo-specific config
910 src = src.ui # copy target options from repo
911 src = src.ui # copy target options from repo
911 else: # assume it's a global ui object
912 else: # assume it's a global ui object
912 dst = src.copy() # keep all global options
913 dst = src.copy() # keep all global options
913
914
914 # copy ssh-specific options
915 # copy ssh-specific options
915 for o in 'ssh', 'remotecmd':
916 for o in 'ssh', 'remotecmd':
916 v = opts.get(o) or src.config('ui', o)
917 v = opts.get(o) or src.config('ui', o)
917 if v:
918 if v:
918 dst.setconfig("ui", o, v, 'copied')
919 dst.setconfig("ui", o, v, 'copied')
919
920
920 # copy bundle-specific options
921 # copy bundle-specific options
921 r = src.config('bundle', 'mainreporoot')
922 r = src.config('bundle', 'mainreporoot')
922 if r:
923 if r:
923 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
924 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
924
925
925 # copy selected local settings to the remote ui
926 # copy selected local settings to the remote ui
926 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
927 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
927 for key, val in src.configitems(sect):
928 for key, val in src.configitems(sect):
928 dst.setconfig(sect, key, val, 'copied')
929 dst.setconfig(sect, key, val, 'copied')
929 v = src.config('web', 'cacerts')
930 v = src.config('web', 'cacerts')
930 if v:
931 if v:
931 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
932 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
932
933
933 return dst
934 return dst
934
935
935 # Files of interest
936 # Files of interest
936 # Used to check if the repository has changed looking at mtime and size of
937 # Used to check if the repository has changed looking at mtime and size of
937 # these files.
938 # these files.
938 foi = [('spath', '00changelog.i'),
939 foi = [('spath', '00changelog.i'),
939 ('spath', 'phaseroots'), # ! phase can change content at the same size
940 ('spath', 'phaseroots'), # ! phase can change content at the same size
940 ('spath', 'obsstore'),
941 ('spath', 'obsstore'),
941 ('path', 'bookmarks'), # ! bookmark can change content at the same size
942 ('path', 'bookmarks'), # ! bookmark can change content at the same size
942 ]
943 ]
943
944
944 class cachedlocalrepo(object):
945 class cachedlocalrepo(object):
945 """Holds a localrepository that can be cached and reused."""
946 """Holds a localrepository that can be cached and reused."""
946
947
947 def __init__(self, repo):
948 def __init__(self, repo):
948 """Create a new cached repo from an existing repo.
949 """Create a new cached repo from an existing repo.
949
950
950 We assume the passed in repo was recently created. If the
951 We assume the passed in repo was recently created. If the
951 repo has changed between when it was created and when it was
952 repo has changed between when it was created and when it was
952 turned into a cache, it may not refresh properly.
953 turned into a cache, it may not refresh properly.
953 """
954 """
954 assert isinstance(repo, localrepo.localrepository)
955 assert isinstance(repo, localrepo.localrepository)
955 self._repo = repo
956 self._repo = repo
956 self._state, self.mtime = self._repostate()
957 self._state, self.mtime = self._repostate()
957 self._filtername = repo.filtername
958 self._filtername = repo.filtername
958
959
959 def fetch(self):
960 def fetch(self):
960 """Refresh (if necessary) and return a repository.
961 """Refresh (if necessary) and return a repository.
961
962
962 If the cached instance is out of date, it will be recreated
963 If the cached instance is out of date, it will be recreated
963 automatically and returned.
964 automatically and returned.
964
965
965 Returns a tuple of the repo and a boolean indicating whether a new
966 Returns a tuple of the repo and a boolean indicating whether a new
966 repo instance was created.
967 repo instance was created.
967 """
968 """
968 # We compare the mtimes and sizes of some well-known files to
969 # We compare the mtimes and sizes of some well-known files to
969 # determine if the repo changed. This is not precise, as mtimes
970 # determine if the repo changed. This is not precise, as mtimes
970 # are susceptible to clock skew and imprecise filesystems and
971 # are susceptible to clock skew and imprecise filesystems and
971 # file content can change while maintaining the same size.
972 # file content can change while maintaining the same size.
972
973
973 state, mtime = self._repostate()
974 state, mtime = self._repostate()
974 if state == self._state:
975 if state == self._state:
975 return self._repo, False
976 return self._repo, False
976
977
977 repo = repository(self._repo.baseui, self._repo.url())
978 repo = repository(self._repo.baseui, self._repo.url())
978 if self._filtername:
979 if self._filtername:
979 self._repo = repo.filtered(self._filtername)
980 self._repo = repo.filtered(self._filtername)
980 else:
981 else:
981 self._repo = repo.unfiltered()
982 self._repo = repo.unfiltered()
982 self._state = state
983 self._state = state
983 self.mtime = mtime
984 self.mtime = mtime
984
985
985 return self._repo, True
986 return self._repo, True
986
987
987 def _repostate(self):
988 def _repostate(self):
988 state = []
989 state = []
989 maxmtime = -1
990 maxmtime = -1
990 for attr, fname in foi:
991 for attr, fname in foi:
991 prefix = getattr(self._repo, attr)
992 prefix = getattr(self._repo, attr)
992 p = os.path.join(prefix, fname)
993 p = os.path.join(prefix, fname)
993 try:
994 try:
994 st = os.stat(p)
995 st = os.stat(p)
995 except OSError:
996 except OSError:
996 st = os.stat(prefix)
997 st = os.stat(prefix)
997 state.append((st.st_mtime, st.st_size))
998 state.append((st.st_mtime, st.st_size))
998 maxmtime = max(maxmtime, st.st_mtime)
999 maxmtime = max(maxmtime, st.st_mtime)
999
1000
1000 return tuple(state), maxmtime
1001 return tuple(state), maxmtime
1001
1002
1002 def copy(self):
1003 def copy(self):
1003 """Obtain a copy of this class instance.
1004 """Obtain a copy of this class instance.
1004
1005
1005 A new localrepository instance is obtained. The new instance should be
1006 A new localrepository instance is obtained. The new instance should be
1006 completely independent of the original.
1007 completely independent of the original.
1007 """
1008 """
1008 repo = repository(self._repo.baseui, self._repo.origroot)
1009 repo = repository(self._repo.baseui, self._repo.origroot)
1009 if self._filtername:
1010 if self._filtername:
1010 repo = repo.filtered(self._filtername)
1011 repo = repo.filtered(self._filtername)
1011 else:
1012 else:
1012 repo = repo.unfiltered()
1013 repo = repo.unfiltered()
1013 c = cachedlocalrepo(repo)
1014 c = cachedlocalrepo(repo)
1014 c._state = self._state
1015 c._state = self._state
1015 c.mtime = self.mtime
1016 c.mtime = self.mtime
1016 return c
1017 return c
General Comments 0
You need to be logged in to leave comments. Login now