##// END OF EJS Templates
doc: omit useless _() invocation...
FUJIWARA Katsunori -
r29645:3b4d69b3 stable
parent child Browse files
Show More
@@ -1,1016 +1,1016 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13 import os
13 import os
14 import shutil
14 import shutil
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import nullid
17 from .node import nullid
18
18
19 from . import (
19 from . import (
20 bookmarks,
20 bookmarks,
21 bundlerepo,
21 bundlerepo,
22 cmdutil,
22 cmdutil,
23 destutil,
23 destutil,
24 discovery,
24 discovery,
25 error,
25 error,
26 exchange,
26 exchange,
27 extensions,
27 extensions,
28 httppeer,
28 httppeer,
29 localrepo,
29 localrepo,
30 lock,
30 lock,
31 merge as mergemod,
31 merge as mergemod,
32 node,
32 node,
33 phases,
33 phases,
34 repoview,
34 repoview,
35 scmutil,
35 scmutil,
36 sshpeer,
36 sshpeer,
37 statichttprepo,
37 statichttprepo,
38 ui as uimod,
38 ui as uimod,
39 unionrepo,
39 unionrepo,
40 url,
40 url,
41 util,
41 util,
42 verify as verifymod,
42 verify as verifymod,
43 )
43 )
44
44
45 release = lock.release
45 release = lock.release
46
46
47 # shared features
47 # shared features
48 sharedbookmarks = 'bookmarks'
48 sharedbookmarks = 'bookmarks'
49
49
50 def _local(path):
50 def _local(path):
51 path = util.expandpath(util.urllocalpath(path))
51 path = util.expandpath(util.urllocalpath(path))
52 return (os.path.isfile(path) and bundlerepo or localrepo)
52 return (os.path.isfile(path) and bundlerepo or localrepo)
53
53
54 def addbranchrevs(lrepo, other, branches, revs):
54 def addbranchrevs(lrepo, other, branches, revs):
55 peer = other.peer() # a courtesy to callers using a localrepo for other
55 peer = other.peer() # a courtesy to callers using a localrepo for other
56 hashbranch, branches = branches
56 hashbranch, branches = branches
57 if not hashbranch and not branches:
57 if not hashbranch and not branches:
58 x = revs or None
58 x = revs or None
59 if util.safehasattr(revs, 'first'):
59 if util.safehasattr(revs, 'first'):
60 y = revs.first()
60 y = revs.first()
61 elif revs:
61 elif revs:
62 y = revs[0]
62 y = revs[0]
63 else:
63 else:
64 y = None
64 y = None
65 return x, y
65 return x, y
66 if revs:
66 if revs:
67 revs = list(revs)
67 revs = list(revs)
68 else:
68 else:
69 revs = []
69 revs = []
70
70
71 if not peer.capable('branchmap'):
71 if not peer.capable('branchmap'):
72 if branches:
72 if branches:
73 raise error.Abort(_("remote branch lookup not supported"))
73 raise error.Abort(_("remote branch lookup not supported"))
74 revs.append(hashbranch)
74 revs.append(hashbranch)
75 return revs, revs[0]
75 return revs, revs[0]
76 branchmap = peer.branchmap()
76 branchmap = peer.branchmap()
77
77
78 def primary(branch):
78 def primary(branch):
79 if branch == '.':
79 if branch == '.':
80 if not lrepo:
80 if not lrepo:
81 raise error.Abort(_("dirstate branch not accessible"))
81 raise error.Abort(_("dirstate branch not accessible"))
82 branch = lrepo.dirstate.branch()
82 branch = lrepo.dirstate.branch()
83 if branch in branchmap:
83 if branch in branchmap:
84 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
84 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
85 return True
85 return True
86 else:
86 else:
87 return False
87 return False
88
88
89 for branch in branches:
89 for branch in branches:
90 if not primary(branch):
90 if not primary(branch):
91 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
91 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
92 if hashbranch:
92 if hashbranch:
93 if not primary(hashbranch):
93 if not primary(hashbranch):
94 revs.append(hashbranch)
94 revs.append(hashbranch)
95 return revs, revs[0]
95 return revs, revs[0]
96
96
97 def parseurl(path, branches=None):
97 def parseurl(path, branches=None):
98 '''parse url#branch, returning (url, (branch, branches))'''
98 '''parse url#branch, returning (url, (branch, branches))'''
99
99
100 u = util.url(path)
100 u = util.url(path)
101 branch = None
101 branch = None
102 if u.fragment:
102 if u.fragment:
103 branch = u.fragment
103 branch = u.fragment
104 u.fragment = None
104 u.fragment = None
105 return str(u), (branch, branches or [])
105 return str(u), (branch, branches or [])
106
106
107 schemes = {
107 schemes = {
108 'bundle': bundlerepo,
108 'bundle': bundlerepo,
109 'union': unionrepo,
109 'union': unionrepo,
110 'file': _local,
110 'file': _local,
111 'http': httppeer,
111 'http': httppeer,
112 'https': httppeer,
112 'https': httppeer,
113 'ssh': sshpeer,
113 'ssh': sshpeer,
114 'static-http': statichttprepo,
114 'static-http': statichttprepo,
115 }
115 }
116
116
117 def _peerlookup(path):
117 def _peerlookup(path):
118 u = util.url(path)
118 u = util.url(path)
119 scheme = u.scheme or 'file'
119 scheme = u.scheme or 'file'
120 thing = schemes.get(scheme) or schemes['file']
120 thing = schemes.get(scheme) or schemes['file']
121 try:
121 try:
122 return thing(path)
122 return thing(path)
123 except TypeError:
123 except TypeError:
124 # we can't test callable(thing) because 'thing' can be an unloaded
124 # we can't test callable(thing) because 'thing' can be an unloaded
125 # module that implements __call__
125 # module that implements __call__
126 if not util.safehasattr(thing, 'instance'):
126 if not util.safehasattr(thing, 'instance'):
127 raise
127 raise
128 return thing
128 return thing
129
129
130 def islocal(repo):
130 def islocal(repo):
131 '''return true if repo (or path pointing to repo) is local'''
131 '''return true if repo (or path pointing to repo) is local'''
132 if isinstance(repo, str):
132 if isinstance(repo, str):
133 try:
133 try:
134 return _peerlookup(repo).islocal(repo)
134 return _peerlookup(repo).islocal(repo)
135 except AttributeError:
135 except AttributeError:
136 return False
136 return False
137 return repo.local()
137 return repo.local()
138
138
139 def openpath(ui, path):
139 def openpath(ui, path):
140 '''open path with open if local, url.open if remote'''
140 '''open path with open if local, url.open if remote'''
141 pathurl = util.url(path, parsequery=False, parsefragment=False)
141 pathurl = util.url(path, parsequery=False, parsefragment=False)
142 if pathurl.islocal():
142 if pathurl.islocal():
143 return util.posixfile(pathurl.localpath(), 'rb')
143 return util.posixfile(pathurl.localpath(), 'rb')
144 else:
144 else:
145 return url.open(ui, path)
145 return url.open(ui, path)
146
146
147 # a list of (ui, repo) functions called for wire peer initialization
147 # a list of (ui, repo) functions called for wire peer initialization
148 wirepeersetupfuncs = []
148 wirepeersetupfuncs = []
149
149
150 def _peerorrepo(ui, path, create=False):
150 def _peerorrepo(ui, path, create=False):
151 """return a repository object for the specified path"""
151 """return a repository object for the specified path"""
152 obj = _peerlookup(path).instance(ui, path, create)
152 obj = _peerlookup(path).instance(ui, path, create)
153 ui = getattr(obj, "ui", ui)
153 ui = getattr(obj, "ui", ui)
154 for name, module in extensions.extensions(ui):
154 for name, module in extensions.extensions(ui):
155 hook = getattr(module, 'reposetup', None)
155 hook = getattr(module, 'reposetup', None)
156 if hook:
156 if hook:
157 hook(ui, obj)
157 hook(ui, obj)
158 if not obj.local():
158 if not obj.local():
159 for f in wirepeersetupfuncs:
159 for f in wirepeersetupfuncs:
160 f(ui, obj)
160 f(ui, obj)
161 return obj
161 return obj
162
162
163 def repository(ui, path='', create=False):
163 def repository(ui, path='', create=False):
164 """return a repository object for the specified path"""
164 """return a repository object for the specified path"""
165 peer = _peerorrepo(ui, path, create)
165 peer = _peerorrepo(ui, path, create)
166 repo = peer.local()
166 repo = peer.local()
167 if not repo:
167 if not repo:
168 raise error.Abort(_("repository '%s' is not local") %
168 raise error.Abort(_("repository '%s' is not local") %
169 (path or peer.url()))
169 (path or peer.url()))
170 return repo.filtered('visible')
170 return repo.filtered('visible')
171
171
172 def peer(uiorrepo, opts, path, create=False):
172 def peer(uiorrepo, opts, path, create=False):
173 '''return a repository peer for the specified path'''
173 '''return a repository peer for the specified path'''
174 rui = remoteui(uiorrepo, opts)
174 rui = remoteui(uiorrepo, opts)
175 return _peerorrepo(rui, path, create).peer()
175 return _peerorrepo(rui, path, create).peer()
176
176
177 def defaultdest(source):
177 def defaultdest(source):
178 '''return default destination of clone if none is given
178 '''return default destination of clone if none is given
179
179
180 >>> defaultdest('foo')
180 >>> defaultdest('foo')
181 'foo'
181 'foo'
182 >>> defaultdest('/foo/bar')
182 >>> defaultdest('/foo/bar')
183 'bar'
183 'bar'
184 >>> defaultdest('/')
184 >>> defaultdest('/')
185 ''
185 ''
186 >>> defaultdest('')
186 >>> defaultdest('')
187 ''
187 ''
188 >>> defaultdest('http://example.org/')
188 >>> defaultdest('http://example.org/')
189 ''
189 ''
190 >>> defaultdest('http://example.org/foo/')
190 >>> defaultdest('http://example.org/foo/')
191 'foo'
191 'foo'
192 '''
192 '''
193 path = util.url(source).path
193 path = util.url(source).path
194 if not path:
194 if not path:
195 return ''
195 return ''
196 return os.path.basename(os.path.normpath(path))
196 return os.path.basename(os.path.normpath(path))
197
197
198 def share(ui, source, dest=None, update=True, bookmarks=True):
198 def share(ui, source, dest=None, update=True, bookmarks=True):
199 '''create a shared repository'''
199 '''create a shared repository'''
200
200
201 if not islocal(source):
201 if not islocal(source):
202 raise error.Abort(_('can only share local repositories'))
202 raise error.Abort(_('can only share local repositories'))
203
203
204 if not dest:
204 if not dest:
205 dest = defaultdest(source)
205 dest = defaultdest(source)
206 else:
206 else:
207 dest = ui.expandpath(dest)
207 dest = ui.expandpath(dest)
208
208
209 if isinstance(source, str):
209 if isinstance(source, str):
210 origsource = ui.expandpath(source)
210 origsource = ui.expandpath(source)
211 source, branches = parseurl(origsource)
211 source, branches = parseurl(origsource)
212 srcrepo = repository(ui, source)
212 srcrepo = repository(ui, source)
213 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
213 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
214 else:
214 else:
215 srcrepo = source.local()
215 srcrepo = source.local()
216 origsource = source = srcrepo.url()
216 origsource = source = srcrepo.url()
217 checkout = None
217 checkout = None
218
218
219 sharedpath = srcrepo.sharedpath # if our source is already sharing
219 sharedpath = srcrepo.sharedpath # if our source is already sharing
220
220
221 destwvfs = scmutil.vfs(dest, realpath=True)
221 destwvfs = scmutil.vfs(dest, realpath=True)
222 destvfs = scmutil.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
222 destvfs = scmutil.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
223
223
224 if destvfs.lexists():
224 if destvfs.lexists():
225 raise error.Abort(_('destination already exists'))
225 raise error.Abort(_('destination already exists'))
226
226
227 if not destwvfs.isdir():
227 if not destwvfs.isdir():
228 destwvfs.mkdir()
228 destwvfs.mkdir()
229 destvfs.makedir()
229 destvfs.makedir()
230
230
231 requirements = ''
231 requirements = ''
232 try:
232 try:
233 requirements = srcrepo.vfs.read('requires')
233 requirements = srcrepo.vfs.read('requires')
234 except IOError as inst:
234 except IOError as inst:
235 if inst.errno != errno.ENOENT:
235 if inst.errno != errno.ENOENT:
236 raise
236 raise
237
237
238 requirements += 'shared\n'
238 requirements += 'shared\n'
239 destvfs.write('requires', requirements)
239 destvfs.write('requires', requirements)
240 destvfs.write('sharedpath', sharedpath)
240 destvfs.write('sharedpath', sharedpath)
241
241
242 r = repository(ui, destwvfs.base)
242 r = repository(ui, destwvfs.base)
243 postshare(srcrepo, r, bookmarks=bookmarks)
243 postshare(srcrepo, r, bookmarks=bookmarks)
244 _postshareupdate(r, update, checkout=checkout)
244 _postshareupdate(r, update, checkout=checkout)
245
245
246 def postshare(sourcerepo, destrepo, bookmarks=True):
246 def postshare(sourcerepo, destrepo, bookmarks=True):
247 """Called after a new shared repo is created.
247 """Called after a new shared repo is created.
248
248
249 The new repo only has a requirements file and pointer to the source.
249 The new repo only has a requirements file and pointer to the source.
250 This function configures additional shared data.
250 This function configures additional shared data.
251
251
252 Extensions can wrap this function and write additional entries to
252 Extensions can wrap this function and write additional entries to
253 destrepo/.hg/shared to indicate additional pieces of data to be shared.
253 destrepo/.hg/shared to indicate additional pieces of data to be shared.
254 """
254 """
255 default = sourcerepo.ui.config('paths', 'default')
255 default = sourcerepo.ui.config('paths', 'default')
256 if default:
256 if default:
257 fp = destrepo.vfs("hgrc", "w", text=True)
257 fp = destrepo.vfs("hgrc", "w", text=True)
258 fp.write("[paths]\n")
258 fp.write("[paths]\n")
259 fp.write("default = %s\n" % default)
259 fp.write("default = %s\n" % default)
260 fp.close()
260 fp.close()
261
261
262 if bookmarks:
262 if bookmarks:
263 fp = destrepo.vfs('shared', 'w')
263 fp = destrepo.vfs('shared', 'w')
264 fp.write(sharedbookmarks + '\n')
264 fp.write(sharedbookmarks + '\n')
265 fp.close()
265 fp.close()
266
266
267 def _postshareupdate(repo, update, checkout=None):
267 def _postshareupdate(repo, update, checkout=None):
268 """Maybe perform a working directory update after a shared repo is created.
268 """Maybe perform a working directory update after a shared repo is created.
269
269
270 ``update`` can be a boolean or a revision to update to.
270 ``update`` can be a boolean or a revision to update to.
271 """
271 """
272 if not update:
272 if not update:
273 return
273 return
274
274
275 repo.ui.status(_("updating working directory\n"))
275 repo.ui.status(_("updating working directory\n"))
276 if update is not True:
276 if update is not True:
277 checkout = update
277 checkout = update
278 for test in (checkout, 'default', 'tip'):
278 for test in (checkout, 'default', 'tip'):
279 if test is None:
279 if test is None:
280 continue
280 continue
281 try:
281 try:
282 uprev = repo.lookup(test)
282 uprev = repo.lookup(test)
283 break
283 break
284 except error.RepoLookupError:
284 except error.RepoLookupError:
285 continue
285 continue
286 _update(repo, uprev)
286 _update(repo, uprev)
287
287
288 def copystore(ui, srcrepo, destpath):
288 def copystore(ui, srcrepo, destpath):
289 '''copy files from store of srcrepo in destpath
289 '''copy files from store of srcrepo in destpath
290
290
291 returns destlock
291 returns destlock
292 '''
292 '''
293 destlock = None
293 destlock = None
294 try:
294 try:
295 hardlink = None
295 hardlink = None
296 num = 0
296 num = 0
297 closetopic = [None]
297 closetopic = [None]
298 def prog(topic, pos):
298 def prog(topic, pos):
299 if pos is None:
299 if pos is None:
300 closetopic[0] = topic
300 closetopic[0] = topic
301 else:
301 else:
302 ui.progress(topic, pos + num)
302 ui.progress(topic, pos + num)
303 srcpublishing = srcrepo.publishing()
303 srcpublishing = srcrepo.publishing()
304 srcvfs = scmutil.vfs(srcrepo.sharedpath)
304 srcvfs = scmutil.vfs(srcrepo.sharedpath)
305 dstvfs = scmutil.vfs(destpath)
305 dstvfs = scmutil.vfs(destpath)
306 for f in srcrepo.store.copylist():
306 for f in srcrepo.store.copylist():
307 if srcpublishing and f.endswith('phaseroots'):
307 if srcpublishing and f.endswith('phaseroots'):
308 continue
308 continue
309 dstbase = os.path.dirname(f)
309 dstbase = os.path.dirname(f)
310 if dstbase and not dstvfs.exists(dstbase):
310 if dstbase and not dstvfs.exists(dstbase):
311 dstvfs.mkdir(dstbase)
311 dstvfs.mkdir(dstbase)
312 if srcvfs.exists(f):
312 if srcvfs.exists(f):
313 if f.endswith('data'):
313 if f.endswith('data'):
314 # 'dstbase' may be empty (e.g. revlog format 0)
314 # 'dstbase' may be empty (e.g. revlog format 0)
315 lockfile = os.path.join(dstbase, "lock")
315 lockfile = os.path.join(dstbase, "lock")
316 # lock to avoid premature writing to the target
316 # lock to avoid premature writing to the target
317 destlock = lock.lock(dstvfs, lockfile)
317 destlock = lock.lock(dstvfs, lockfile)
318 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
318 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
319 hardlink, progress=prog)
319 hardlink, progress=prog)
320 num += n
320 num += n
321 if hardlink:
321 if hardlink:
322 ui.debug("linked %d files\n" % num)
322 ui.debug("linked %d files\n" % num)
323 if closetopic[0]:
323 if closetopic[0]:
324 ui.progress(closetopic[0], None)
324 ui.progress(closetopic[0], None)
325 else:
325 else:
326 ui.debug("copied %d files\n" % num)
326 ui.debug("copied %d files\n" % num)
327 if closetopic[0]:
327 if closetopic[0]:
328 ui.progress(closetopic[0], None)
328 ui.progress(closetopic[0], None)
329 return destlock
329 return destlock
330 except: # re-raises
330 except: # re-raises
331 release(destlock)
331 release(destlock)
332 raise
332 raise
333
333
334 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
334 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
335 rev=None, update=True, stream=False):
335 rev=None, update=True, stream=False):
336 """Perform a clone using a shared repo.
336 """Perform a clone using a shared repo.
337
337
338 The store for the repository will be located at <sharepath>/.hg. The
338 The store for the repository will be located at <sharepath>/.hg. The
339 specified revisions will be cloned or pulled from "source". A shared repo
339 specified revisions will be cloned or pulled from "source". A shared repo
340 will be created at "dest" and a working copy will be created if "update" is
340 will be created at "dest" and a working copy will be created if "update" is
341 True.
341 True.
342 """
342 """
343 revs = None
343 revs = None
344 if rev:
344 if rev:
345 if not srcpeer.capable('lookup'):
345 if not srcpeer.capable('lookup'):
346 raise error.Abort(_("src repository does not support "
346 raise error.Abort(_("src repository does not support "
347 "revision lookup and so doesn't "
347 "revision lookup and so doesn't "
348 "support clone by revision"))
348 "support clone by revision"))
349 revs = [srcpeer.lookup(r) for r in rev]
349 revs = [srcpeer.lookup(r) for r in rev]
350
350
351 # Obtain a lock before checking for or cloning the pooled repo otherwise
351 # Obtain a lock before checking for or cloning the pooled repo otherwise
352 # 2 clients may race creating or populating it.
352 # 2 clients may race creating or populating it.
353 pooldir = os.path.dirname(sharepath)
353 pooldir = os.path.dirname(sharepath)
354 # lock class requires the directory to exist.
354 # lock class requires the directory to exist.
355 try:
355 try:
356 util.makedir(pooldir, False)
356 util.makedir(pooldir, False)
357 except OSError as e:
357 except OSError as e:
358 if e.errno != errno.EEXIST:
358 if e.errno != errno.EEXIST:
359 raise
359 raise
360
360
361 poolvfs = scmutil.vfs(pooldir)
361 poolvfs = scmutil.vfs(pooldir)
362 basename = os.path.basename(sharepath)
362 basename = os.path.basename(sharepath)
363
363
364 with lock.lock(poolvfs, '%s.lock' % basename):
364 with lock.lock(poolvfs, '%s.lock' % basename):
365 if os.path.exists(sharepath):
365 if os.path.exists(sharepath):
366 ui.status(_('(sharing from existing pooled repository %s)\n') %
366 ui.status(_('(sharing from existing pooled repository %s)\n') %
367 basename)
367 basename)
368 else:
368 else:
369 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
369 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
370 # Always use pull mode because hardlinks in share mode don't work
370 # Always use pull mode because hardlinks in share mode don't work
371 # well. Never update because working copies aren't necessary in
371 # well. Never update because working copies aren't necessary in
372 # share mode.
372 # share mode.
373 clone(ui, peeropts, source, dest=sharepath, pull=True,
373 clone(ui, peeropts, source, dest=sharepath, pull=True,
374 rev=rev, update=False, stream=stream)
374 rev=rev, update=False, stream=stream)
375
375
376 sharerepo = repository(ui, path=sharepath)
376 sharerepo = repository(ui, path=sharepath)
377 share(ui, sharerepo, dest=dest, update=False, bookmarks=False)
377 share(ui, sharerepo, dest=dest, update=False, bookmarks=False)
378
378
379 # We need to perform a pull against the dest repo to fetch bookmarks
379 # We need to perform a pull against the dest repo to fetch bookmarks
380 # and other non-store data that isn't shared by default. In the case of
380 # and other non-store data that isn't shared by default. In the case of
381 # non-existing shared repo, this means we pull from the remote twice. This
381 # non-existing shared repo, this means we pull from the remote twice. This
382 # is a bit weird. But at the time it was implemented, there wasn't an easy
382 # is a bit weird. But at the time it was implemented, there wasn't an easy
383 # way to pull just non-changegroup data.
383 # way to pull just non-changegroup data.
384 destrepo = repository(ui, path=dest)
384 destrepo = repository(ui, path=dest)
385 exchange.pull(destrepo, srcpeer, heads=revs)
385 exchange.pull(destrepo, srcpeer, heads=revs)
386
386
387 _postshareupdate(destrepo, update)
387 _postshareupdate(destrepo, update)
388
388
389 return srcpeer, peer(ui, peeropts, dest)
389 return srcpeer, peer(ui, peeropts, dest)
390
390
391 def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
391 def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
392 update=True, stream=False, branch=None, shareopts=None):
392 update=True, stream=False, branch=None, shareopts=None):
393 """Make a copy of an existing repository.
393 """Make a copy of an existing repository.
394
394
395 Create a copy of an existing repository in a new directory. The
395 Create a copy of an existing repository in a new directory. The
396 source and destination are URLs, as passed to the repository
396 source and destination are URLs, as passed to the repository
397 function. Returns a pair of repository peers, the source and
397 function. Returns a pair of repository peers, the source and
398 newly created destination.
398 newly created destination.
399
399
400 The location of the source is added to the new repository's
400 The location of the source is added to the new repository's
401 .hg/hgrc file, as the default to be used for future pulls and
401 .hg/hgrc file, as the default to be used for future pulls and
402 pushes.
402 pushes.
403
403
404 If an exception is raised, the partly cloned/updated destination
404 If an exception is raised, the partly cloned/updated destination
405 repository will be deleted.
405 repository will be deleted.
406
406
407 Arguments:
407 Arguments:
408
408
409 source: repository object or URL
409 source: repository object or URL
410
410
411 dest: URL of destination repository to create (defaults to base
411 dest: URL of destination repository to create (defaults to base
412 name of source repository)
412 name of source repository)
413
413
414 pull: always pull from source repository, even in local case or if the
414 pull: always pull from source repository, even in local case or if the
415 server prefers streaming
415 server prefers streaming
416
416
417 stream: stream raw data uncompressed from repository (fast over
417 stream: stream raw data uncompressed from repository (fast over
418 LAN, slow over WAN)
418 LAN, slow over WAN)
419
419
420 rev: revision to clone up to (implies pull=True)
420 rev: revision to clone up to (implies pull=True)
421
421
422 update: update working directory after clone completes, if
422 update: update working directory after clone completes, if
423 destination is local repository (True means update to default rev,
423 destination is local repository (True means update to default rev,
424 anything else is treated as a revision)
424 anything else is treated as a revision)
425
425
426 branch: branches to clone
426 branch: branches to clone
427
427
428 shareopts: dict of options to control auto sharing behavior. The "pool" key
428 shareopts: dict of options to control auto sharing behavior. The "pool" key
429 activates auto sharing mode and defines the directory for stores. The
429 activates auto sharing mode and defines the directory for stores. The
430 "mode" key determines how to construct the directory name of the shared
430 "mode" key determines how to construct the directory name of the shared
431 repository. "identity" means the name is derived from the node of the first
431 repository. "identity" means the name is derived from the node of the first
432 changeset in the repository. "remote" means the name is derived from the
432 changeset in the repository. "remote" means the name is derived from the
433 remote's path/URL. Defaults to "identity."
433 remote's path/URL. Defaults to "identity."
434 """
434 """
435
435
436 if isinstance(source, str):
436 if isinstance(source, str):
437 origsource = ui.expandpath(source)
437 origsource = ui.expandpath(source)
438 source, branch = parseurl(origsource, branch)
438 source, branch = parseurl(origsource, branch)
439 srcpeer = peer(ui, peeropts, source)
439 srcpeer = peer(ui, peeropts, source)
440 else:
440 else:
441 srcpeer = source.peer() # in case we were called with a localrepo
441 srcpeer = source.peer() # in case we were called with a localrepo
442 branch = (None, branch or [])
442 branch = (None, branch or [])
443 origsource = source = srcpeer.url()
443 origsource = source = srcpeer.url()
444 rev, checkout = addbranchrevs(srcpeer, srcpeer, branch, rev)
444 rev, checkout = addbranchrevs(srcpeer, srcpeer, branch, rev)
445
445
446 if dest is None:
446 if dest is None:
447 dest = defaultdest(source)
447 dest = defaultdest(source)
448 if dest:
448 if dest:
449 ui.status(_("destination directory: %s\n") % dest)
449 ui.status(_("destination directory: %s\n") % dest)
450 else:
450 else:
451 dest = ui.expandpath(dest)
451 dest = ui.expandpath(dest)
452
452
453 dest = util.urllocalpath(dest)
453 dest = util.urllocalpath(dest)
454 source = util.urllocalpath(source)
454 source = util.urllocalpath(source)
455
455
456 if not dest:
456 if not dest:
457 raise error.Abort(_("empty destination path is not valid"))
457 raise error.Abort(_("empty destination path is not valid"))
458
458
459 destvfs = scmutil.vfs(dest, expandpath=True)
459 destvfs = scmutil.vfs(dest, expandpath=True)
460 if destvfs.lexists():
460 if destvfs.lexists():
461 if not destvfs.isdir():
461 if not destvfs.isdir():
462 raise error.Abort(_("destination '%s' already exists") % dest)
462 raise error.Abort(_("destination '%s' already exists") % dest)
463 elif destvfs.listdir():
463 elif destvfs.listdir():
464 raise error.Abort(_("destination '%s' is not empty") % dest)
464 raise error.Abort(_("destination '%s' is not empty") % dest)
465
465
466 shareopts = shareopts or {}
466 shareopts = shareopts or {}
467 sharepool = shareopts.get('pool')
467 sharepool = shareopts.get('pool')
468 sharenamemode = shareopts.get('mode')
468 sharenamemode = shareopts.get('mode')
469 if sharepool and islocal(dest):
469 if sharepool and islocal(dest):
470 sharepath = None
470 sharepath = None
471 if sharenamemode == 'identity':
471 if sharenamemode == 'identity':
472 # Resolve the name from the initial changeset in the remote
472 # Resolve the name from the initial changeset in the remote
473 # repository. This returns nullid when the remote is empty. It
473 # repository. This returns nullid when the remote is empty. It
474 # raises RepoLookupError if revision 0 is filtered or otherwise
474 # raises RepoLookupError if revision 0 is filtered or otherwise
475 # not available. If we fail to resolve, sharing is not enabled.
475 # not available. If we fail to resolve, sharing is not enabled.
476 try:
476 try:
477 rootnode = srcpeer.lookup('0')
477 rootnode = srcpeer.lookup('0')
478 if rootnode != node.nullid:
478 if rootnode != node.nullid:
479 sharepath = os.path.join(sharepool, node.hex(rootnode))
479 sharepath = os.path.join(sharepool, node.hex(rootnode))
480 else:
480 else:
481 ui.status(_('(not using pooled storage: '
481 ui.status(_('(not using pooled storage: '
482 'remote appears to be empty)\n'))
482 'remote appears to be empty)\n'))
483 except error.RepoLookupError:
483 except error.RepoLookupError:
484 ui.status(_('(not using pooled storage: '
484 ui.status(_('(not using pooled storage: '
485 'unable to resolve identity of remote)\n'))
485 'unable to resolve identity of remote)\n'))
486 elif sharenamemode == 'remote':
486 elif sharenamemode == 'remote':
487 sharepath = os.path.join(
487 sharepath = os.path.join(
488 sharepool, hashlib.sha1(source).hexdigest())
488 sharepool, hashlib.sha1(source).hexdigest())
489 else:
489 else:
490 raise error.Abort(_('unknown share naming mode: %s') %
490 raise error.Abort(_('unknown share naming mode: %s') %
491 sharenamemode)
491 sharenamemode)
492
492
493 if sharepath:
493 if sharepath:
494 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
494 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
495 dest, pull=pull, rev=rev, update=update,
495 dest, pull=pull, rev=rev, update=update,
496 stream=stream)
496 stream=stream)
497
497
498 srclock = destlock = cleandir = None
498 srclock = destlock = cleandir = None
499 srcrepo = srcpeer.local()
499 srcrepo = srcpeer.local()
500 try:
500 try:
501 abspath = origsource
501 abspath = origsource
502 if islocal(origsource):
502 if islocal(origsource):
503 abspath = os.path.abspath(util.urllocalpath(origsource))
503 abspath = os.path.abspath(util.urllocalpath(origsource))
504
504
505 if islocal(dest):
505 if islocal(dest):
506 cleandir = dest
506 cleandir = dest
507
507
508 copy = False
508 copy = False
509 if (srcrepo and srcrepo.cancopy() and islocal(dest)
509 if (srcrepo and srcrepo.cancopy() and islocal(dest)
510 and not phases.hassecret(srcrepo)):
510 and not phases.hassecret(srcrepo)):
511 copy = not pull and not rev
511 copy = not pull and not rev
512
512
513 if copy:
513 if copy:
514 try:
514 try:
515 # we use a lock here because if we race with commit, we
515 # we use a lock here because if we race with commit, we
516 # can end up with extra data in the cloned revlogs that's
516 # can end up with extra data in the cloned revlogs that's
517 # not pointed to by changesets, thus causing verify to
517 # not pointed to by changesets, thus causing verify to
518 # fail
518 # fail
519 srclock = srcrepo.lock(wait=False)
519 srclock = srcrepo.lock(wait=False)
520 except error.LockError:
520 except error.LockError:
521 copy = False
521 copy = False
522
522
523 if copy:
523 if copy:
524 srcrepo.hook('preoutgoing', throw=True, source='clone')
524 srcrepo.hook('preoutgoing', throw=True, source='clone')
525 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
525 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
526 if not os.path.exists(dest):
526 if not os.path.exists(dest):
527 os.mkdir(dest)
527 os.mkdir(dest)
528 else:
528 else:
529 # only clean up directories we create ourselves
529 # only clean up directories we create ourselves
530 cleandir = hgdir
530 cleandir = hgdir
531 try:
531 try:
532 destpath = hgdir
532 destpath = hgdir
533 util.makedir(destpath, notindexed=True)
533 util.makedir(destpath, notindexed=True)
534 except OSError as inst:
534 except OSError as inst:
535 if inst.errno == errno.EEXIST:
535 if inst.errno == errno.EEXIST:
536 cleandir = None
536 cleandir = None
537 raise error.Abort(_("destination '%s' already exists")
537 raise error.Abort(_("destination '%s' already exists")
538 % dest)
538 % dest)
539 raise
539 raise
540
540
541 destlock = copystore(ui, srcrepo, destpath)
541 destlock = copystore(ui, srcrepo, destpath)
542 # copy bookmarks over
542 # copy bookmarks over
543 srcbookmarks = srcrepo.join('bookmarks')
543 srcbookmarks = srcrepo.join('bookmarks')
544 dstbookmarks = os.path.join(destpath, 'bookmarks')
544 dstbookmarks = os.path.join(destpath, 'bookmarks')
545 if os.path.exists(srcbookmarks):
545 if os.path.exists(srcbookmarks):
546 util.copyfile(srcbookmarks, dstbookmarks)
546 util.copyfile(srcbookmarks, dstbookmarks)
547
547
548 # Recomputing branch cache might be slow on big repos,
548 # Recomputing branch cache might be slow on big repos,
549 # so just copy it
549 # so just copy it
550 def copybranchcache(fname):
550 def copybranchcache(fname):
551 srcbranchcache = srcrepo.join('cache/%s' % fname)
551 srcbranchcache = srcrepo.join('cache/%s' % fname)
552 dstbranchcache = os.path.join(dstcachedir, fname)
552 dstbranchcache = os.path.join(dstcachedir, fname)
553 if os.path.exists(srcbranchcache):
553 if os.path.exists(srcbranchcache):
554 if not os.path.exists(dstcachedir):
554 if not os.path.exists(dstcachedir):
555 os.mkdir(dstcachedir)
555 os.mkdir(dstcachedir)
556 util.copyfile(srcbranchcache, dstbranchcache)
556 util.copyfile(srcbranchcache, dstbranchcache)
557
557
558 dstcachedir = os.path.join(destpath, 'cache')
558 dstcachedir = os.path.join(destpath, 'cache')
559 # In local clones we're copying all nodes, not just served
559 # In local clones we're copying all nodes, not just served
560 # ones. Therefore copy all branch caches over.
560 # ones. Therefore copy all branch caches over.
561 copybranchcache('branch2')
561 copybranchcache('branch2')
562 for cachename in repoview.filtertable:
562 for cachename in repoview.filtertable:
563 copybranchcache('branch2-%s' % cachename)
563 copybranchcache('branch2-%s' % cachename)
564
564
565 # we need to re-init the repo after manually copying the data
565 # we need to re-init the repo after manually copying the data
566 # into it
566 # into it
567 destpeer = peer(srcrepo, peeropts, dest)
567 destpeer = peer(srcrepo, peeropts, dest)
568 srcrepo.hook('outgoing', source='clone',
568 srcrepo.hook('outgoing', source='clone',
569 node=node.hex(node.nullid))
569 node=node.hex(node.nullid))
570 else:
570 else:
571 try:
571 try:
572 destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
572 destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
573 # only pass ui when no srcrepo
573 # only pass ui when no srcrepo
574 except OSError as inst:
574 except OSError as inst:
575 if inst.errno == errno.EEXIST:
575 if inst.errno == errno.EEXIST:
576 cleandir = None
576 cleandir = None
577 raise error.Abort(_("destination '%s' already exists")
577 raise error.Abort(_("destination '%s' already exists")
578 % dest)
578 % dest)
579 raise
579 raise
580
580
581 revs = None
581 revs = None
582 if rev:
582 if rev:
583 if not srcpeer.capable('lookup'):
583 if not srcpeer.capable('lookup'):
584 raise error.Abort(_("src repository does not support "
584 raise error.Abort(_("src repository does not support "
585 "revision lookup and so doesn't "
585 "revision lookup and so doesn't "
586 "support clone by revision"))
586 "support clone by revision"))
587 revs = [srcpeer.lookup(r) for r in rev]
587 revs = [srcpeer.lookup(r) for r in rev]
588 checkout = revs[0]
588 checkout = revs[0]
589 local = destpeer.local()
589 local = destpeer.local()
590 if local:
590 if local:
591 if not stream:
591 if not stream:
592 if pull:
592 if pull:
593 stream = False
593 stream = False
594 else:
594 else:
595 stream = None
595 stream = None
596 # internal config: ui.quietbookmarkmove
596 # internal config: ui.quietbookmarkmove
597 quiet = local.ui.backupconfig('ui', 'quietbookmarkmove')
597 quiet = local.ui.backupconfig('ui', 'quietbookmarkmove')
598 try:
598 try:
599 local.ui.setconfig(
599 local.ui.setconfig(
600 'ui', 'quietbookmarkmove', True, 'clone')
600 'ui', 'quietbookmarkmove', True, 'clone')
601 exchange.pull(local, srcpeer, revs,
601 exchange.pull(local, srcpeer, revs,
602 streamclonerequested=stream)
602 streamclonerequested=stream)
603 finally:
603 finally:
604 local.ui.restoreconfig(quiet)
604 local.ui.restoreconfig(quiet)
605 elif srcrepo:
605 elif srcrepo:
606 exchange.push(srcrepo, destpeer, revs=revs,
606 exchange.push(srcrepo, destpeer, revs=revs,
607 bookmarks=srcrepo._bookmarks.keys())
607 bookmarks=srcrepo._bookmarks.keys())
608 else:
608 else:
609 raise error.Abort(_("clone from remote to remote not supported")
609 raise error.Abort(_("clone from remote to remote not supported")
610 )
610 )
611
611
612 cleandir = None
612 cleandir = None
613
613
614 destrepo = destpeer.local()
614 destrepo = destpeer.local()
615 if destrepo:
615 if destrepo:
616 template = uimod.samplehgrcs['cloned']
616 template = uimod.samplehgrcs['cloned']
617 fp = destrepo.vfs("hgrc", "w", text=True)
617 fp = destrepo.vfs("hgrc", "w", text=True)
618 u = util.url(abspath)
618 u = util.url(abspath)
619 u.passwd = None
619 u.passwd = None
620 defaulturl = str(u)
620 defaulturl = str(u)
621 fp.write(template % defaulturl)
621 fp.write(template % defaulturl)
622 fp.close()
622 fp.close()
623
623
624 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
624 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
625
625
626 if update:
626 if update:
627 if update is not True:
627 if update is not True:
628 checkout = srcpeer.lookup(update)
628 checkout = srcpeer.lookup(update)
629 uprev = None
629 uprev = None
630 status = None
630 status = None
631 if checkout is not None:
631 if checkout is not None:
632 try:
632 try:
633 uprev = destrepo.lookup(checkout)
633 uprev = destrepo.lookup(checkout)
634 except error.RepoLookupError:
634 except error.RepoLookupError:
635 if update is not True:
635 if update is not True:
636 try:
636 try:
637 uprev = destrepo.lookup(update)
637 uprev = destrepo.lookup(update)
638 except error.RepoLookupError:
638 except error.RepoLookupError:
639 pass
639 pass
640 if uprev is None:
640 if uprev is None:
641 try:
641 try:
642 uprev = destrepo._bookmarks['@']
642 uprev = destrepo._bookmarks['@']
643 update = '@'
643 update = '@'
644 bn = destrepo[uprev].branch()
644 bn = destrepo[uprev].branch()
645 if bn == 'default':
645 if bn == 'default':
646 status = _("updating to bookmark @\n")
646 status = _("updating to bookmark @\n")
647 else:
647 else:
648 status = (_("updating to bookmark @ on branch %s\n")
648 status = (_("updating to bookmark @ on branch %s\n")
649 % bn)
649 % bn)
650 except KeyError:
650 except KeyError:
651 try:
651 try:
652 uprev = destrepo.branchtip('default')
652 uprev = destrepo.branchtip('default')
653 except error.RepoLookupError:
653 except error.RepoLookupError:
654 uprev = destrepo.lookup('tip')
654 uprev = destrepo.lookup('tip')
655 if not status:
655 if not status:
656 bn = destrepo[uprev].branch()
656 bn = destrepo[uprev].branch()
657 status = _("updating to branch %s\n") % bn
657 status = _("updating to branch %s\n") % bn
658 destrepo.ui.status(status)
658 destrepo.ui.status(status)
659 _update(destrepo, uprev)
659 _update(destrepo, uprev)
660 if update in destrepo._bookmarks:
660 if update in destrepo._bookmarks:
661 bookmarks.activate(destrepo, update)
661 bookmarks.activate(destrepo, update)
662 finally:
662 finally:
663 release(srclock, destlock)
663 release(srclock, destlock)
664 if cleandir is not None:
664 if cleandir is not None:
665 shutil.rmtree(cleandir, True)
665 shutil.rmtree(cleandir, True)
666 if srcpeer is not None:
666 if srcpeer is not None:
667 srcpeer.close()
667 srcpeer.close()
668 return srcpeer, destpeer
668 return srcpeer, destpeer
669
669
670 def _showstats(repo, stats, quietempty=False):
670 def _showstats(repo, stats, quietempty=False):
671 if quietempty and not any(stats):
671 if quietempty and not any(stats):
672 return
672 return
673 repo.ui.status(_("%d files updated, %d files merged, "
673 repo.ui.status(_("%d files updated, %d files merged, "
674 "%d files removed, %d files unresolved\n") % stats)
674 "%d files removed, %d files unresolved\n") % stats)
675
675
676 def updaterepo(repo, node, overwrite):
676 def updaterepo(repo, node, overwrite):
677 """Update the working directory to node.
677 """Update the working directory to node.
678
678
679 When overwrite is set, changes are clobbered, merged else
679 When overwrite is set, changes are clobbered, merged else
680
680
681 returns stats (see pydoc mercurial.merge.applyupdates)"""
681 returns stats (see pydoc mercurial.merge.applyupdates)"""
682 return mergemod.update(repo, node, False, overwrite,
682 return mergemod.update(repo, node, False, overwrite,
683 labels=['working copy', 'destination'])
683 labels=['working copy', 'destination'])
684
684
685 def update(repo, node, quietempty=False):
685 def update(repo, node, quietempty=False):
686 """update the working directory to node, merging linear changes"""
686 """update the working directory to node, merging linear changes"""
687 stats = updaterepo(repo, node, False)
687 stats = updaterepo(repo, node, False)
688 _showstats(repo, stats, quietempty)
688 _showstats(repo, stats, quietempty)
689 if stats[3]:
689 if stats[3]:
690 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
690 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
691 return stats[3] > 0
691 return stats[3] > 0
692
692
693 # naming conflict in clone()
693 # naming conflict in clone()
694 _update = update
694 _update = update
695
695
696 def clean(repo, node, show_stats=True, quietempty=False):
696 def clean(repo, node, show_stats=True, quietempty=False):
697 """forcibly switch the working directory to node, clobbering changes"""
697 """forcibly switch the working directory to node, clobbering changes"""
698 stats = updaterepo(repo, node, True)
698 stats = updaterepo(repo, node, True)
699 util.unlinkpath(repo.join('graftstate'), ignoremissing=True)
699 util.unlinkpath(repo.join('graftstate'), ignoremissing=True)
700 if show_stats:
700 if show_stats:
701 _showstats(repo, stats, quietempty)
701 _showstats(repo, stats, quietempty)
702 return stats[3] > 0
702 return stats[3] > 0
703
703
704 # naming conflict in updatetotally()
704 # naming conflict in updatetotally()
705 _clean = clean
705 _clean = clean
706
706
707 def updatetotally(ui, repo, checkout, brev, clean=False, check=False):
707 def updatetotally(ui, repo, checkout, brev, clean=False, check=False):
708 """Update the working directory with extra care for non-file components
708 """Update the working directory with extra care for non-file components
709
709
710 This takes care of non-file components below:
710 This takes care of non-file components below:
711
711
712 :bookmark: might be advanced or (in)activated
712 :bookmark: might be advanced or (in)activated
713
713
714 This takes arguments below:
714 This takes arguments below:
715
715
716 :checkout: to which revision the working directory is updated
716 :checkout: to which revision the working directory is updated
717 :brev: a name, which might be a bookmark to be activated after updating
717 :brev: a name, which might be a bookmark to be activated after updating
718 :clean: whether changes in the working directory can be discarded
718 :clean: whether changes in the working directory can be discarded
719 :check: whether changes in the working directory should be checked
719 :check: whether changes in the working directory should be checked
720
720
721 This returns whether conflict is detected at updating or not.
721 This returns whether conflict is detected at updating or not.
722 """
722 """
723 with repo.wlock():
723 with repo.wlock():
724 movemarkfrom = None
724 movemarkfrom = None
725 warndest = False
725 warndest = False
726 if checkout is None:
726 if checkout is None:
727 updata = destutil.destupdate(repo, clean=clean, check=check)
727 updata = destutil.destupdate(repo, clean=clean, check=check)
728 checkout, movemarkfrom, brev = updata
728 checkout, movemarkfrom, brev = updata
729 warndest = True
729 warndest = True
730
730
731 if clean:
731 if clean:
732 ret = _clean(repo, checkout)
732 ret = _clean(repo, checkout)
733 else:
733 else:
734 ret = _update(repo, checkout)
734 ret = _update(repo, checkout)
735
735
736 if not ret and movemarkfrom:
736 if not ret and movemarkfrom:
737 if movemarkfrom == repo['.'].node():
737 if movemarkfrom == repo['.'].node():
738 pass # no-op update
738 pass # no-op update
739 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
739 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
740 ui.status(_("updating bookmark %s\n") % repo._activebookmark)
740 ui.status(_("updating bookmark %s\n") % repo._activebookmark)
741 else:
741 else:
742 # this can happen with a non-linear update
742 # this can happen with a non-linear update
743 ui.status(_("(leaving bookmark %s)\n") %
743 ui.status(_("(leaving bookmark %s)\n") %
744 repo._activebookmark)
744 repo._activebookmark)
745 bookmarks.deactivate(repo)
745 bookmarks.deactivate(repo)
746 elif brev in repo._bookmarks:
746 elif brev in repo._bookmarks:
747 if brev != repo._activebookmark:
747 if brev != repo._activebookmark:
748 ui.status(_("(activating bookmark %s)\n") % brev)
748 ui.status(_("(activating bookmark %s)\n") % brev)
749 bookmarks.activate(repo, brev)
749 bookmarks.activate(repo, brev)
750 elif brev:
750 elif brev:
751 if repo._activebookmark:
751 if repo._activebookmark:
752 ui.status(_("(leaving bookmark %s)\n") %
752 ui.status(_("(leaving bookmark %s)\n") %
753 repo._activebookmark)
753 repo._activebookmark)
754 bookmarks.deactivate(repo)
754 bookmarks.deactivate(repo)
755
755
756 if warndest:
756 if warndest:
757 destutil.statusotherdests(ui, repo)
757 destutil.statusotherdests(ui, repo)
758
758
759 return ret
759 return ret
760
760
761 def merge(repo, node, force=None, remind=True, mergeforce=False):
761 def merge(repo, node, force=None, remind=True, mergeforce=False):
762 """Branch merge with node, resolving changes. Return true if any
762 """Branch merge with node, resolving changes. Return true if any
763 unresolved conflicts."""
763 unresolved conflicts."""
764 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce)
764 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce)
765 _showstats(repo, stats)
765 _showstats(repo, stats)
766 if stats[3]:
766 if stats[3]:
767 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
767 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
768 "or 'hg update -C .' to abandon\n"))
768 "or 'hg update -C .' to abandon\n"))
769 elif remind:
769 elif remind:
770 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
770 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
771 return stats[3] > 0
771 return stats[3] > 0
772
772
773 def _incoming(displaychlist, subreporecurse, ui, repo, source,
773 def _incoming(displaychlist, subreporecurse, ui, repo, source,
774 opts, buffered=False):
774 opts, buffered=False):
775 """
775 """
776 Helper for incoming / gincoming.
776 Helper for incoming / gincoming.
777 displaychlist gets called with
777 displaychlist gets called with
778 (remoterepo, incomingchangesetlist, displayer) parameters,
778 (remoterepo, incomingchangesetlist, displayer) parameters,
779 and is supposed to contain only code that can't be unified.
779 and is supposed to contain only code that can't be unified.
780 """
780 """
781 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
781 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
782 other = peer(repo, opts, source)
782 other = peer(repo, opts, source)
783 ui.status(_('comparing with %s\n') % util.hidepassword(source))
783 ui.status(_('comparing with %s\n') % util.hidepassword(source))
784 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
784 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
785
785
786 if revs:
786 if revs:
787 revs = [other.lookup(rev) for rev in revs]
787 revs = [other.lookup(rev) for rev in revs]
788 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
788 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
789 revs, opts["bundle"], opts["force"])
789 revs, opts["bundle"], opts["force"])
790 try:
790 try:
791 if not chlist:
791 if not chlist:
792 ui.status(_("no changes found\n"))
792 ui.status(_("no changes found\n"))
793 return subreporecurse()
793 return subreporecurse()
794
794
795 displayer = cmdutil.show_changeset(ui, other, opts, buffered)
795 displayer = cmdutil.show_changeset(ui, other, opts, buffered)
796 displaychlist(other, chlist, displayer)
796 displaychlist(other, chlist, displayer)
797 displayer.close()
797 displayer.close()
798 finally:
798 finally:
799 cleanupfn()
799 cleanupfn()
800 subreporecurse()
800 subreporecurse()
801 return 0 # exit code is zero since we found incoming changes
801 return 0 # exit code is zero since we found incoming changes
802
802
803 def incoming(ui, repo, source, opts):
803 def incoming(ui, repo, source, opts):
804 def subreporecurse():
804 def subreporecurse():
805 ret = 1
805 ret = 1
806 if opts.get('subrepos'):
806 if opts.get('subrepos'):
807 ctx = repo[None]
807 ctx = repo[None]
808 for subpath in sorted(ctx.substate):
808 for subpath in sorted(ctx.substate):
809 sub = ctx.sub(subpath)
809 sub = ctx.sub(subpath)
810 ret = min(ret, sub.incoming(ui, source, opts))
810 ret = min(ret, sub.incoming(ui, source, opts))
811 return ret
811 return ret
812
812
813 def display(other, chlist, displayer):
813 def display(other, chlist, displayer):
814 limit = cmdutil.loglimit(opts)
814 limit = cmdutil.loglimit(opts)
815 if opts.get('newest_first'):
815 if opts.get('newest_first'):
816 chlist.reverse()
816 chlist.reverse()
817 count = 0
817 count = 0
818 for n in chlist:
818 for n in chlist:
819 if limit is not None and count >= limit:
819 if limit is not None and count >= limit:
820 break
820 break
821 parents = [p for p in other.changelog.parents(n) if p != nullid]
821 parents = [p for p in other.changelog.parents(n) if p != nullid]
822 if opts.get('no_merges') and len(parents) == 2:
822 if opts.get('no_merges') and len(parents) == 2:
823 continue
823 continue
824 count += 1
824 count += 1
825 displayer.show(other[n])
825 displayer.show(other[n])
826 return _incoming(display, subreporecurse, ui, repo, source, opts)
826 return _incoming(display, subreporecurse, ui, repo, source, opts)
827
827
828 def _outgoing(ui, repo, dest, opts):
828 def _outgoing(ui, repo, dest, opts):
829 dest = ui.expandpath(dest or 'default-push', dest or 'default')
829 dest = ui.expandpath(dest or 'default-push', dest or 'default')
830 dest, branches = parseurl(dest, opts.get('branch'))
830 dest, branches = parseurl(dest, opts.get('branch'))
831 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
831 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
832 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
832 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
833 if revs:
833 if revs:
834 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
834 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
835
835
836 other = peer(repo, opts, dest)
836 other = peer(repo, opts, dest)
837 outgoing = discovery.findcommonoutgoing(repo.unfiltered(), other, revs,
837 outgoing = discovery.findcommonoutgoing(repo.unfiltered(), other, revs,
838 force=opts.get('force'))
838 force=opts.get('force'))
839 o = outgoing.missing
839 o = outgoing.missing
840 if not o:
840 if not o:
841 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
841 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
842 return o, other
842 return o, other
843
843
844 def outgoing(ui, repo, dest, opts):
844 def outgoing(ui, repo, dest, opts):
845 def recurse():
845 def recurse():
846 ret = 1
846 ret = 1
847 if opts.get('subrepos'):
847 if opts.get('subrepos'):
848 ctx = repo[None]
848 ctx = repo[None]
849 for subpath in sorted(ctx.substate):
849 for subpath in sorted(ctx.substate):
850 sub = ctx.sub(subpath)
850 sub = ctx.sub(subpath)
851 ret = min(ret, sub.outgoing(ui, dest, opts))
851 ret = min(ret, sub.outgoing(ui, dest, opts))
852 return ret
852 return ret
853
853
854 limit = cmdutil.loglimit(opts)
854 limit = cmdutil.loglimit(opts)
855 o, other = _outgoing(ui, repo, dest, opts)
855 o, other = _outgoing(ui, repo, dest, opts)
856 if not o:
856 if not o:
857 cmdutil.outgoinghooks(ui, repo, other, opts, o)
857 cmdutil.outgoinghooks(ui, repo, other, opts, o)
858 return recurse()
858 return recurse()
859
859
860 if opts.get('newest_first'):
860 if opts.get('newest_first'):
861 o.reverse()
861 o.reverse()
862 displayer = cmdutil.show_changeset(ui, repo, opts)
862 displayer = cmdutil.show_changeset(ui, repo, opts)
863 count = 0
863 count = 0
864 for n in o:
864 for n in o:
865 if limit is not None and count >= limit:
865 if limit is not None and count >= limit:
866 break
866 break
867 parents = [p for p in repo.changelog.parents(n) if p != nullid]
867 parents = [p for p in repo.changelog.parents(n) if p != nullid]
868 if opts.get('no_merges') and len(parents) == 2:
868 if opts.get('no_merges') and len(parents) == 2:
869 continue
869 continue
870 count += 1
870 count += 1
871 displayer.show(repo[n])
871 displayer.show(repo[n])
872 displayer.close()
872 displayer.close()
873 cmdutil.outgoinghooks(ui, repo, other, opts, o)
873 cmdutil.outgoinghooks(ui, repo, other, opts, o)
874 recurse()
874 recurse()
875 return 0 # exit code is zero since we found outgoing changes
875 return 0 # exit code is zero since we found outgoing changes
876
876
877 def verify(repo):
877 def verify(repo):
878 """verify the consistency of a repository"""
878 """verify the consistency of a repository"""
879 ret = verifymod.verify(repo)
879 ret = verifymod.verify(repo)
880
880
881 # Broken subrepo references in hidden csets don't seem worth worrying about,
881 # Broken subrepo references in hidden csets don't seem worth worrying about,
882 # since they can't be pushed/pulled, and --hidden can be used if they are a
882 # since they can't be pushed/pulled, and --hidden can be used if they are a
883 # concern.
883 # concern.
884
884
885 # pathto() is needed for -R case
885 # pathto() is needed for -R case
886 revs = repo.revs("filelog(%s)",
886 revs = repo.revs("filelog(%s)",
887 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
887 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
888
888
889 if revs:
889 if revs:
890 repo.ui.status(_('checking subrepo links\n'))
890 repo.ui.status(_('checking subrepo links\n'))
891 for rev in revs:
891 for rev in revs:
892 ctx = repo[rev]
892 ctx = repo[rev]
893 try:
893 try:
894 for subpath in ctx.substate:
894 for subpath in ctx.substate:
895 try:
895 try:
896 ret = (ctx.sub(subpath, allowcreate=False).verify()
896 ret = (ctx.sub(subpath, allowcreate=False).verify()
897 or ret)
897 or ret)
898 except error.RepoError as e:
898 except error.RepoError as e:
899 repo.ui.warn(_('%s: %s\n') % (rev, e))
899 repo.ui.warn(('%s: %s\n') % (rev, e))
900 except Exception:
900 except Exception:
901 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
901 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
902 node.short(ctx.node()))
902 node.short(ctx.node()))
903
903
904 return ret
904 return ret
905
905
906 def remoteui(src, opts):
906 def remoteui(src, opts):
907 'build a remote ui from ui or repo and opts'
907 'build a remote ui from ui or repo and opts'
908 if util.safehasattr(src, 'baseui'): # looks like a repository
908 if util.safehasattr(src, 'baseui'): # looks like a repository
909 dst = src.baseui.copy() # drop repo-specific config
909 dst = src.baseui.copy() # drop repo-specific config
910 src = src.ui # copy target options from repo
910 src = src.ui # copy target options from repo
911 else: # assume it's a global ui object
911 else: # assume it's a global ui object
912 dst = src.copy() # keep all global options
912 dst = src.copy() # keep all global options
913
913
914 # copy ssh-specific options
914 # copy ssh-specific options
915 for o in 'ssh', 'remotecmd':
915 for o in 'ssh', 'remotecmd':
916 v = opts.get(o) or src.config('ui', o)
916 v = opts.get(o) or src.config('ui', o)
917 if v:
917 if v:
918 dst.setconfig("ui", o, v, 'copied')
918 dst.setconfig("ui", o, v, 'copied')
919
919
920 # copy bundle-specific options
920 # copy bundle-specific options
921 r = src.config('bundle', 'mainreporoot')
921 r = src.config('bundle', 'mainreporoot')
922 if r:
922 if r:
923 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
923 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
924
924
925 # copy selected local settings to the remote ui
925 # copy selected local settings to the remote ui
926 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
926 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
927 for key, val in src.configitems(sect):
927 for key, val in src.configitems(sect):
928 dst.setconfig(sect, key, val, 'copied')
928 dst.setconfig(sect, key, val, 'copied')
929 v = src.config('web', 'cacerts')
929 v = src.config('web', 'cacerts')
930 if v:
930 if v:
931 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
931 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
932
932
933 return dst
933 return dst
934
934
935 # Files of interest
935 # Files of interest
936 # Used to check if the repository has changed looking at mtime and size of
936 # Used to check if the repository has changed looking at mtime and size of
937 # these files.
937 # these files.
938 foi = [('spath', '00changelog.i'),
938 foi = [('spath', '00changelog.i'),
939 ('spath', 'phaseroots'), # ! phase can change content at the same size
939 ('spath', 'phaseroots'), # ! phase can change content at the same size
940 ('spath', 'obsstore'),
940 ('spath', 'obsstore'),
941 ('path', 'bookmarks'), # ! bookmark can change content at the same size
941 ('path', 'bookmarks'), # ! bookmark can change content at the same size
942 ]
942 ]
943
943
944 class cachedlocalrepo(object):
944 class cachedlocalrepo(object):
945 """Holds a localrepository that can be cached and reused."""
945 """Holds a localrepository that can be cached and reused."""
946
946
947 def __init__(self, repo):
947 def __init__(self, repo):
948 """Create a new cached repo from an existing repo.
948 """Create a new cached repo from an existing repo.
949
949
950 We assume the passed in repo was recently created. If the
950 We assume the passed in repo was recently created. If the
951 repo has changed between when it was created and when it was
951 repo has changed between when it was created and when it was
952 turned into a cache, it may not refresh properly.
952 turned into a cache, it may not refresh properly.
953 """
953 """
954 assert isinstance(repo, localrepo.localrepository)
954 assert isinstance(repo, localrepo.localrepository)
955 self._repo = repo
955 self._repo = repo
956 self._state, self.mtime = self._repostate()
956 self._state, self.mtime = self._repostate()
957 self._filtername = repo.filtername
957 self._filtername = repo.filtername
958
958
959 def fetch(self):
959 def fetch(self):
960 """Refresh (if necessary) and return a repository.
960 """Refresh (if necessary) and return a repository.
961
961
962 If the cached instance is out of date, it will be recreated
962 If the cached instance is out of date, it will be recreated
963 automatically and returned.
963 automatically and returned.
964
964
965 Returns a tuple of the repo and a boolean indicating whether a new
965 Returns a tuple of the repo and a boolean indicating whether a new
966 repo instance was created.
966 repo instance was created.
967 """
967 """
968 # We compare the mtimes and sizes of some well-known files to
968 # We compare the mtimes and sizes of some well-known files to
969 # determine if the repo changed. This is not precise, as mtimes
969 # determine if the repo changed. This is not precise, as mtimes
970 # are susceptible to clock skew and imprecise filesystems and
970 # are susceptible to clock skew and imprecise filesystems and
971 # file content can change while maintaining the same size.
971 # file content can change while maintaining the same size.
972
972
973 state, mtime = self._repostate()
973 state, mtime = self._repostate()
974 if state == self._state:
974 if state == self._state:
975 return self._repo, False
975 return self._repo, False
976
976
977 repo = repository(self._repo.baseui, self._repo.url())
977 repo = repository(self._repo.baseui, self._repo.url())
978 if self._filtername:
978 if self._filtername:
979 self._repo = repo.filtered(self._filtername)
979 self._repo = repo.filtered(self._filtername)
980 else:
980 else:
981 self._repo = repo.unfiltered()
981 self._repo = repo.unfiltered()
982 self._state = state
982 self._state = state
983 self.mtime = mtime
983 self.mtime = mtime
984
984
985 return self._repo, True
985 return self._repo, True
986
986
987 def _repostate(self):
987 def _repostate(self):
988 state = []
988 state = []
989 maxmtime = -1
989 maxmtime = -1
990 for attr, fname in foi:
990 for attr, fname in foi:
991 prefix = getattr(self._repo, attr)
991 prefix = getattr(self._repo, attr)
992 p = os.path.join(prefix, fname)
992 p = os.path.join(prefix, fname)
993 try:
993 try:
994 st = os.stat(p)
994 st = os.stat(p)
995 except OSError:
995 except OSError:
996 st = os.stat(prefix)
996 st = os.stat(prefix)
997 state.append((st.st_mtime, st.st_size))
997 state.append((st.st_mtime, st.st_size))
998 maxmtime = max(maxmtime, st.st_mtime)
998 maxmtime = max(maxmtime, st.st_mtime)
999
999
1000 return tuple(state), maxmtime
1000 return tuple(state), maxmtime
1001
1001
1002 def copy(self):
1002 def copy(self):
1003 """Obtain a copy of this class instance.
1003 """Obtain a copy of this class instance.
1004
1004
1005 A new localrepository instance is obtained. The new instance should be
1005 A new localrepository instance is obtained. The new instance should be
1006 completely independent of the original.
1006 completely independent of the original.
1007 """
1007 """
1008 repo = repository(self._repo.baseui, self._repo.origroot)
1008 repo = repository(self._repo.baseui, self._repo.origroot)
1009 if self._filtername:
1009 if self._filtername:
1010 repo = repo.filtered(self._filtername)
1010 repo = repo.filtered(self._filtername)
1011 else:
1011 else:
1012 repo = repo.unfiltered()
1012 repo = repo.unfiltered()
1013 c = cachedlocalrepo(repo)
1013 c = cachedlocalrepo(repo)
1014 c._state = self._state
1014 c._state = self._state
1015 c.mtime = self.mtime
1015 c.mtime = self.mtime
1016 return c
1016 return c
General Comments 0
You need to be logged in to leave comments. Login now