##// END OF EJS Templates
py3: check for bytes instead of str in isinstance
Pulkit Goyal -
r33018:071732d9 default
parent child Browse files
Show More
@@ -1,1064 +1,1064 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13 import os
13 import os
14 import shutil
14 import shutil
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import nullid
17 from .node import nullid
18
18
19 from . import (
19 from . import (
20 bookmarks,
20 bookmarks,
21 bundlerepo,
21 bundlerepo,
22 cmdutil,
22 cmdutil,
23 destutil,
23 destutil,
24 discovery,
24 discovery,
25 error,
25 error,
26 exchange,
26 exchange,
27 extensions,
27 extensions,
28 httppeer,
28 httppeer,
29 localrepo,
29 localrepo,
30 lock,
30 lock,
31 merge as mergemod,
31 merge as mergemod,
32 node,
32 node,
33 phases,
33 phases,
34 repoview,
34 repoview,
35 scmutil,
35 scmutil,
36 sshpeer,
36 sshpeer,
37 statichttprepo,
37 statichttprepo,
38 ui as uimod,
38 ui as uimod,
39 unionrepo,
39 unionrepo,
40 url,
40 url,
41 util,
41 util,
42 verify as verifymod,
42 verify as verifymod,
43 vfs as vfsmod,
43 vfs as vfsmod,
44 )
44 )
45
45
46 release = lock.release
46 release = lock.release
47
47
48 # shared features
48 # shared features
49 sharedbookmarks = 'bookmarks'
49 sharedbookmarks = 'bookmarks'
50
50
51 def _local(path):
51 def _local(path):
52 path = util.expandpath(util.urllocalpath(path))
52 path = util.expandpath(util.urllocalpath(path))
53 return (os.path.isfile(path) and bundlerepo or localrepo)
53 return (os.path.isfile(path) and bundlerepo or localrepo)
54
54
55 def addbranchrevs(lrepo, other, branches, revs):
55 def addbranchrevs(lrepo, other, branches, revs):
56 peer = other.peer() # a courtesy to callers using a localrepo for other
56 peer = other.peer() # a courtesy to callers using a localrepo for other
57 hashbranch, branches = branches
57 hashbranch, branches = branches
58 if not hashbranch and not branches:
58 if not hashbranch and not branches:
59 x = revs or None
59 x = revs or None
60 if util.safehasattr(revs, 'first'):
60 if util.safehasattr(revs, 'first'):
61 y = revs.first()
61 y = revs.first()
62 elif revs:
62 elif revs:
63 y = revs[0]
63 y = revs[0]
64 else:
64 else:
65 y = None
65 y = None
66 return x, y
66 return x, y
67 if revs:
67 if revs:
68 revs = list(revs)
68 revs = list(revs)
69 else:
69 else:
70 revs = []
70 revs = []
71
71
72 if not peer.capable('branchmap'):
72 if not peer.capable('branchmap'):
73 if branches:
73 if branches:
74 raise error.Abort(_("remote branch lookup not supported"))
74 raise error.Abort(_("remote branch lookup not supported"))
75 revs.append(hashbranch)
75 revs.append(hashbranch)
76 return revs, revs[0]
76 return revs, revs[0]
77 branchmap = peer.branchmap()
77 branchmap = peer.branchmap()
78
78
79 def primary(branch):
79 def primary(branch):
80 if branch == '.':
80 if branch == '.':
81 if not lrepo:
81 if not lrepo:
82 raise error.Abort(_("dirstate branch not accessible"))
82 raise error.Abort(_("dirstate branch not accessible"))
83 branch = lrepo.dirstate.branch()
83 branch = lrepo.dirstate.branch()
84 if branch in branchmap:
84 if branch in branchmap:
85 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
85 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
86 return True
86 return True
87 else:
87 else:
88 return False
88 return False
89
89
90 for branch in branches:
90 for branch in branches:
91 if not primary(branch):
91 if not primary(branch):
92 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
92 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
93 if hashbranch:
93 if hashbranch:
94 if not primary(hashbranch):
94 if not primary(hashbranch):
95 revs.append(hashbranch)
95 revs.append(hashbranch)
96 return revs, revs[0]
96 return revs, revs[0]
97
97
98 def parseurl(path, branches=None):
98 def parseurl(path, branches=None):
99 '''parse url#branch, returning (url, (branch, branches))'''
99 '''parse url#branch, returning (url, (branch, branches))'''
100
100
101 u = util.url(path)
101 u = util.url(path)
102 branch = None
102 branch = None
103 if u.fragment:
103 if u.fragment:
104 branch = u.fragment
104 branch = u.fragment
105 u.fragment = None
105 u.fragment = None
106 return bytes(u), (branch, branches or [])
106 return bytes(u), (branch, branches or [])
107
107
108 schemes = {
108 schemes = {
109 'bundle': bundlerepo,
109 'bundle': bundlerepo,
110 'union': unionrepo,
110 'union': unionrepo,
111 'file': _local,
111 'file': _local,
112 'http': httppeer,
112 'http': httppeer,
113 'https': httppeer,
113 'https': httppeer,
114 'ssh': sshpeer,
114 'ssh': sshpeer,
115 'static-http': statichttprepo,
115 'static-http': statichttprepo,
116 }
116 }
117
117
118 def _peerlookup(path):
118 def _peerlookup(path):
119 u = util.url(path)
119 u = util.url(path)
120 scheme = u.scheme or 'file'
120 scheme = u.scheme or 'file'
121 thing = schemes.get(scheme) or schemes['file']
121 thing = schemes.get(scheme) or schemes['file']
122 try:
122 try:
123 return thing(path)
123 return thing(path)
124 except TypeError:
124 except TypeError:
125 # we can't test callable(thing) because 'thing' can be an unloaded
125 # we can't test callable(thing) because 'thing' can be an unloaded
126 # module that implements __call__
126 # module that implements __call__
127 if not util.safehasattr(thing, 'instance'):
127 if not util.safehasattr(thing, 'instance'):
128 raise
128 raise
129 return thing
129 return thing
130
130
131 def islocal(repo):
131 def islocal(repo):
132 '''return true if repo (or path pointing to repo) is local'''
132 '''return true if repo (or path pointing to repo) is local'''
133 if isinstance(repo, str):
133 if isinstance(repo, bytes):
134 try:
134 try:
135 return _peerlookup(repo).islocal(repo)
135 return _peerlookup(repo).islocal(repo)
136 except AttributeError:
136 except AttributeError:
137 return False
137 return False
138 return repo.local()
138 return repo.local()
139
139
140 def openpath(ui, path):
140 def openpath(ui, path):
141 '''open path with open if local, url.open if remote'''
141 '''open path with open if local, url.open if remote'''
142 pathurl = util.url(path, parsequery=False, parsefragment=False)
142 pathurl = util.url(path, parsequery=False, parsefragment=False)
143 if pathurl.islocal():
143 if pathurl.islocal():
144 return util.posixfile(pathurl.localpath(), 'rb')
144 return util.posixfile(pathurl.localpath(), 'rb')
145 else:
145 else:
146 return url.open(ui, path)
146 return url.open(ui, path)
147
147
148 # a list of (ui, repo) functions called for wire peer initialization
148 # a list of (ui, repo) functions called for wire peer initialization
149 wirepeersetupfuncs = []
149 wirepeersetupfuncs = []
150
150
151 def _peerorrepo(ui, path, create=False, presetupfuncs=None):
151 def _peerorrepo(ui, path, create=False, presetupfuncs=None):
152 """return a repository object for the specified path"""
152 """return a repository object for the specified path"""
153 obj = _peerlookup(path).instance(ui, path, create)
153 obj = _peerlookup(path).instance(ui, path, create)
154 ui = getattr(obj, "ui", ui)
154 ui = getattr(obj, "ui", ui)
155 for f in presetupfuncs or []:
155 for f in presetupfuncs or []:
156 f(ui, obj)
156 f(ui, obj)
157 for name, module in extensions.extensions(ui):
157 for name, module in extensions.extensions(ui):
158 hook = getattr(module, 'reposetup', None)
158 hook = getattr(module, 'reposetup', None)
159 if hook:
159 if hook:
160 hook(ui, obj)
160 hook(ui, obj)
161 if not obj.local():
161 if not obj.local():
162 for f in wirepeersetupfuncs:
162 for f in wirepeersetupfuncs:
163 f(ui, obj)
163 f(ui, obj)
164 return obj
164 return obj
165
165
166 def repository(ui, path='', create=False, presetupfuncs=None):
166 def repository(ui, path='', create=False, presetupfuncs=None):
167 """return a repository object for the specified path"""
167 """return a repository object for the specified path"""
168 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs)
168 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs)
169 repo = peer.local()
169 repo = peer.local()
170 if not repo:
170 if not repo:
171 raise error.Abort(_("repository '%s' is not local") %
171 raise error.Abort(_("repository '%s' is not local") %
172 (path or peer.url()))
172 (path or peer.url()))
173 return repo.filtered('visible')
173 return repo.filtered('visible')
174
174
175 def peer(uiorrepo, opts, path, create=False):
175 def peer(uiorrepo, opts, path, create=False):
176 '''return a repository peer for the specified path'''
176 '''return a repository peer for the specified path'''
177 rui = remoteui(uiorrepo, opts)
177 rui = remoteui(uiorrepo, opts)
178 return _peerorrepo(rui, path, create).peer()
178 return _peerorrepo(rui, path, create).peer()
179
179
180 def defaultdest(source):
180 def defaultdest(source):
181 '''return default destination of clone if none is given
181 '''return default destination of clone if none is given
182
182
183 >>> defaultdest('foo')
183 >>> defaultdest('foo')
184 'foo'
184 'foo'
185 >>> defaultdest('/foo/bar')
185 >>> defaultdest('/foo/bar')
186 'bar'
186 'bar'
187 >>> defaultdest('/')
187 >>> defaultdest('/')
188 ''
188 ''
189 >>> defaultdest('')
189 >>> defaultdest('')
190 ''
190 ''
191 >>> defaultdest('http://example.org/')
191 >>> defaultdest('http://example.org/')
192 ''
192 ''
193 >>> defaultdest('http://example.org/foo/')
193 >>> defaultdest('http://example.org/foo/')
194 'foo'
194 'foo'
195 '''
195 '''
196 path = util.url(source).path
196 path = util.url(source).path
197 if not path:
197 if not path:
198 return ''
198 return ''
199 return os.path.basename(os.path.normpath(path))
199 return os.path.basename(os.path.normpath(path))
200
200
201 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
201 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
202 relative=False):
202 relative=False):
203 '''create a shared repository'''
203 '''create a shared repository'''
204
204
205 if not islocal(source):
205 if not islocal(source):
206 raise error.Abort(_('can only share local repositories'))
206 raise error.Abort(_('can only share local repositories'))
207
207
208 if not dest:
208 if not dest:
209 dest = defaultdest(source)
209 dest = defaultdest(source)
210 else:
210 else:
211 dest = ui.expandpath(dest)
211 dest = ui.expandpath(dest)
212
212
213 if isinstance(source, str):
213 if isinstance(source, str):
214 origsource = ui.expandpath(source)
214 origsource = ui.expandpath(source)
215 source, branches = parseurl(origsource)
215 source, branches = parseurl(origsource)
216 srcrepo = repository(ui, source)
216 srcrepo = repository(ui, source)
217 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
217 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
218 else:
218 else:
219 srcrepo = source.local()
219 srcrepo = source.local()
220 origsource = source = srcrepo.url()
220 origsource = source = srcrepo.url()
221 checkout = None
221 checkout = None
222
222
223 sharedpath = srcrepo.sharedpath # if our source is already sharing
223 sharedpath = srcrepo.sharedpath # if our source is already sharing
224
224
225 destwvfs = vfsmod.vfs(dest, realpath=True)
225 destwvfs = vfsmod.vfs(dest, realpath=True)
226 destvfs = vfsmod.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
226 destvfs = vfsmod.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
227
227
228 if destvfs.lexists():
228 if destvfs.lexists():
229 raise error.Abort(_('destination already exists'))
229 raise error.Abort(_('destination already exists'))
230
230
231 if not destwvfs.isdir():
231 if not destwvfs.isdir():
232 destwvfs.mkdir()
232 destwvfs.mkdir()
233 destvfs.makedir()
233 destvfs.makedir()
234
234
235 requirements = ''
235 requirements = ''
236 try:
236 try:
237 requirements = srcrepo.vfs.read('requires')
237 requirements = srcrepo.vfs.read('requires')
238 except IOError as inst:
238 except IOError as inst:
239 if inst.errno != errno.ENOENT:
239 if inst.errno != errno.ENOENT:
240 raise
240 raise
241
241
242 if relative:
242 if relative:
243 try:
243 try:
244 sharedpath = os.path.relpath(sharedpath, destvfs.base)
244 sharedpath = os.path.relpath(sharedpath, destvfs.base)
245 requirements += 'relshared\n'
245 requirements += 'relshared\n'
246 except IOError as e:
246 except IOError as e:
247 raise error.Abort(_('cannot calculate relative path'),
247 raise error.Abort(_('cannot calculate relative path'),
248 hint=str(e))
248 hint=str(e))
249 else:
249 else:
250 requirements += 'shared\n'
250 requirements += 'shared\n'
251
251
252 destvfs.write('requires', requirements)
252 destvfs.write('requires', requirements)
253 destvfs.write('sharedpath', sharedpath)
253 destvfs.write('sharedpath', sharedpath)
254
254
255 r = repository(ui, destwvfs.base)
255 r = repository(ui, destwvfs.base)
256 postshare(srcrepo, r, bookmarks=bookmarks, defaultpath=defaultpath)
256 postshare(srcrepo, r, bookmarks=bookmarks, defaultpath=defaultpath)
257 _postshareupdate(r, update, checkout=checkout)
257 _postshareupdate(r, update, checkout=checkout)
258
258
259 def postshare(sourcerepo, destrepo, bookmarks=True, defaultpath=None):
259 def postshare(sourcerepo, destrepo, bookmarks=True, defaultpath=None):
260 """Called after a new shared repo is created.
260 """Called after a new shared repo is created.
261
261
262 The new repo only has a requirements file and pointer to the source.
262 The new repo only has a requirements file and pointer to the source.
263 This function configures additional shared data.
263 This function configures additional shared data.
264
264
265 Extensions can wrap this function and write additional entries to
265 Extensions can wrap this function and write additional entries to
266 destrepo/.hg/shared to indicate additional pieces of data to be shared.
266 destrepo/.hg/shared to indicate additional pieces of data to be shared.
267 """
267 """
268 default = defaultpath or sourcerepo.ui.config('paths', 'default')
268 default = defaultpath or sourcerepo.ui.config('paths', 'default')
269 if default:
269 if default:
270 fp = destrepo.vfs("hgrc", "w", text=True)
270 fp = destrepo.vfs("hgrc", "w", text=True)
271 fp.write("[paths]\n")
271 fp.write("[paths]\n")
272 fp.write("default = %s\n" % default)
272 fp.write("default = %s\n" % default)
273 fp.close()
273 fp.close()
274
274
275 with destrepo.wlock():
275 with destrepo.wlock():
276 if bookmarks:
276 if bookmarks:
277 fp = destrepo.vfs('shared', 'w')
277 fp = destrepo.vfs('shared', 'w')
278 fp.write(sharedbookmarks + '\n')
278 fp.write(sharedbookmarks + '\n')
279 fp.close()
279 fp.close()
280
280
281 def _postshareupdate(repo, update, checkout=None):
281 def _postshareupdate(repo, update, checkout=None):
282 """Maybe perform a working directory update after a shared repo is created.
282 """Maybe perform a working directory update after a shared repo is created.
283
283
284 ``update`` can be a boolean or a revision to update to.
284 ``update`` can be a boolean or a revision to update to.
285 """
285 """
286 if not update:
286 if not update:
287 return
287 return
288
288
289 repo.ui.status(_("updating working directory\n"))
289 repo.ui.status(_("updating working directory\n"))
290 if update is not True:
290 if update is not True:
291 checkout = update
291 checkout = update
292 for test in (checkout, 'default', 'tip'):
292 for test in (checkout, 'default', 'tip'):
293 if test is None:
293 if test is None:
294 continue
294 continue
295 try:
295 try:
296 uprev = repo.lookup(test)
296 uprev = repo.lookup(test)
297 break
297 break
298 except error.RepoLookupError:
298 except error.RepoLookupError:
299 continue
299 continue
300 _update(repo, uprev)
300 _update(repo, uprev)
301
301
302 def copystore(ui, srcrepo, destpath):
302 def copystore(ui, srcrepo, destpath):
303 '''copy files from store of srcrepo in destpath
303 '''copy files from store of srcrepo in destpath
304
304
305 returns destlock
305 returns destlock
306 '''
306 '''
307 destlock = None
307 destlock = None
308 try:
308 try:
309 hardlink = None
309 hardlink = None
310 num = 0
310 num = 0
311 closetopic = [None]
311 closetopic = [None]
312 def prog(topic, pos):
312 def prog(topic, pos):
313 if pos is None:
313 if pos is None:
314 closetopic[0] = topic
314 closetopic[0] = topic
315 else:
315 else:
316 ui.progress(topic, pos + num)
316 ui.progress(topic, pos + num)
317 srcpublishing = srcrepo.publishing()
317 srcpublishing = srcrepo.publishing()
318 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
318 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
319 dstvfs = vfsmod.vfs(destpath)
319 dstvfs = vfsmod.vfs(destpath)
320 for f in srcrepo.store.copylist():
320 for f in srcrepo.store.copylist():
321 if srcpublishing and f.endswith('phaseroots'):
321 if srcpublishing and f.endswith('phaseroots'):
322 continue
322 continue
323 dstbase = os.path.dirname(f)
323 dstbase = os.path.dirname(f)
324 if dstbase and not dstvfs.exists(dstbase):
324 if dstbase and not dstvfs.exists(dstbase):
325 dstvfs.mkdir(dstbase)
325 dstvfs.mkdir(dstbase)
326 if srcvfs.exists(f):
326 if srcvfs.exists(f):
327 if f.endswith('data'):
327 if f.endswith('data'):
328 # 'dstbase' may be empty (e.g. revlog format 0)
328 # 'dstbase' may be empty (e.g. revlog format 0)
329 lockfile = os.path.join(dstbase, "lock")
329 lockfile = os.path.join(dstbase, "lock")
330 # lock to avoid premature writing to the target
330 # lock to avoid premature writing to the target
331 destlock = lock.lock(dstvfs, lockfile)
331 destlock = lock.lock(dstvfs, lockfile)
332 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
332 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
333 hardlink, progress=prog)
333 hardlink, progress=prog)
334 num += n
334 num += n
335 if hardlink:
335 if hardlink:
336 ui.debug("linked %d files\n" % num)
336 ui.debug("linked %d files\n" % num)
337 if closetopic[0]:
337 if closetopic[0]:
338 ui.progress(closetopic[0], None)
338 ui.progress(closetopic[0], None)
339 else:
339 else:
340 ui.debug("copied %d files\n" % num)
340 ui.debug("copied %d files\n" % num)
341 if closetopic[0]:
341 if closetopic[0]:
342 ui.progress(closetopic[0], None)
342 ui.progress(closetopic[0], None)
343 return destlock
343 return destlock
344 except: # re-raises
344 except: # re-raises
345 release(destlock)
345 release(destlock)
346 raise
346 raise
347
347
348 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
348 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
349 rev=None, update=True, stream=False):
349 rev=None, update=True, stream=False):
350 """Perform a clone using a shared repo.
350 """Perform a clone using a shared repo.
351
351
352 The store for the repository will be located at <sharepath>/.hg. The
352 The store for the repository will be located at <sharepath>/.hg. The
353 specified revisions will be cloned or pulled from "source". A shared repo
353 specified revisions will be cloned or pulled from "source". A shared repo
354 will be created at "dest" and a working copy will be created if "update" is
354 will be created at "dest" and a working copy will be created if "update" is
355 True.
355 True.
356 """
356 """
357 revs = None
357 revs = None
358 if rev:
358 if rev:
359 if not srcpeer.capable('lookup'):
359 if not srcpeer.capable('lookup'):
360 raise error.Abort(_("src repository does not support "
360 raise error.Abort(_("src repository does not support "
361 "revision lookup and so doesn't "
361 "revision lookup and so doesn't "
362 "support clone by revision"))
362 "support clone by revision"))
363 revs = [srcpeer.lookup(r) for r in rev]
363 revs = [srcpeer.lookup(r) for r in rev]
364
364
365 # Obtain a lock before checking for or cloning the pooled repo otherwise
365 # Obtain a lock before checking for or cloning the pooled repo otherwise
366 # 2 clients may race creating or populating it.
366 # 2 clients may race creating or populating it.
367 pooldir = os.path.dirname(sharepath)
367 pooldir = os.path.dirname(sharepath)
368 # lock class requires the directory to exist.
368 # lock class requires the directory to exist.
369 try:
369 try:
370 util.makedir(pooldir, False)
370 util.makedir(pooldir, False)
371 except OSError as e:
371 except OSError as e:
372 if e.errno != errno.EEXIST:
372 if e.errno != errno.EEXIST:
373 raise
373 raise
374
374
375 poolvfs = vfsmod.vfs(pooldir)
375 poolvfs = vfsmod.vfs(pooldir)
376 basename = os.path.basename(sharepath)
376 basename = os.path.basename(sharepath)
377
377
378 with lock.lock(poolvfs, '%s.lock' % basename):
378 with lock.lock(poolvfs, '%s.lock' % basename):
379 if os.path.exists(sharepath):
379 if os.path.exists(sharepath):
380 ui.status(_('(sharing from existing pooled repository %s)\n') %
380 ui.status(_('(sharing from existing pooled repository %s)\n') %
381 basename)
381 basename)
382 else:
382 else:
383 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
383 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
384 # Always use pull mode because hardlinks in share mode don't work
384 # Always use pull mode because hardlinks in share mode don't work
385 # well. Never update because working copies aren't necessary in
385 # well. Never update because working copies aren't necessary in
386 # share mode.
386 # share mode.
387 clone(ui, peeropts, source, dest=sharepath, pull=True,
387 clone(ui, peeropts, source, dest=sharepath, pull=True,
388 rev=rev, update=False, stream=stream)
388 rev=rev, update=False, stream=stream)
389
389
390 # Resolve the value to put in [paths] section for the source.
390 # Resolve the value to put in [paths] section for the source.
391 if islocal(source):
391 if islocal(source):
392 defaultpath = os.path.abspath(util.urllocalpath(source))
392 defaultpath = os.path.abspath(util.urllocalpath(source))
393 else:
393 else:
394 defaultpath = source
394 defaultpath = source
395
395
396 sharerepo = repository(ui, path=sharepath)
396 sharerepo = repository(ui, path=sharepath)
397 share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
397 share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
398 defaultpath=defaultpath)
398 defaultpath=defaultpath)
399
399
400 # We need to perform a pull against the dest repo to fetch bookmarks
400 # We need to perform a pull against the dest repo to fetch bookmarks
401 # and other non-store data that isn't shared by default. In the case of
401 # and other non-store data that isn't shared by default. In the case of
402 # non-existing shared repo, this means we pull from the remote twice. This
402 # non-existing shared repo, this means we pull from the remote twice. This
403 # is a bit weird. But at the time it was implemented, there wasn't an easy
403 # is a bit weird. But at the time it was implemented, there wasn't an easy
404 # way to pull just non-changegroup data.
404 # way to pull just non-changegroup data.
405 destrepo = repository(ui, path=dest)
405 destrepo = repository(ui, path=dest)
406 exchange.pull(destrepo, srcpeer, heads=revs)
406 exchange.pull(destrepo, srcpeer, heads=revs)
407
407
408 _postshareupdate(destrepo, update)
408 _postshareupdate(destrepo, update)
409
409
410 return srcpeer, peer(ui, peeropts, dest)
410 return srcpeer, peer(ui, peeropts, dest)
411
411
412 # Recomputing branch cache might be slow on big repos,
412 # Recomputing branch cache might be slow on big repos,
413 # so just copy it
413 # so just copy it
414 def _copycache(srcrepo, dstcachedir, fname):
414 def _copycache(srcrepo, dstcachedir, fname):
415 """copy a cache from srcrepo to destcachedir (if it exists)"""
415 """copy a cache from srcrepo to destcachedir (if it exists)"""
416 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
416 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
417 dstbranchcache = os.path.join(dstcachedir, fname)
417 dstbranchcache = os.path.join(dstcachedir, fname)
418 if os.path.exists(srcbranchcache):
418 if os.path.exists(srcbranchcache):
419 if not os.path.exists(dstcachedir):
419 if not os.path.exists(dstcachedir):
420 os.mkdir(dstcachedir)
420 os.mkdir(dstcachedir)
421 util.copyfile(srcbranchcache, dstbranchcache)
421 util.copyfile(srcbranchcache, dstbranchcache)
422
422
423 def _cachetocopy(srcrepo):
423 def _cachetocopy(srcrepo):
424 """return the list of cache file valuable to copy during a clone"""
424 """return the list of cache file valuable to copy during a clone"""
425 # In local clones we're copying all nodes, not just served
425 # In local clones we're copying all nodes, not just served
426 # ones. Therefore copy all branch caches over.
426 # ones. Therefore copy all branch caches over.
427 cachefiles = ['branch2']
427 cachefiles = ['branch2']
428 cachefiles += ['branch2-%s' % f for f in repoview.filtertable]
428 cachefiles += ['branch2-%s' % f for f in repoview.filtertable]
429 cachefiles += ['rbc-names-v1', 'rbc-revs-v1']
429 cachefiles += ['rbc-names-v1', 'rbc-revs-v1']
430 cachefiles += ['tags2']
430 cachefiles += ['tags2']
431 cachefiles += ['tags2-%s' % f for f in repoview.filtertable]
431 cachefiles += ['tags2-%s' % f for f in repoview.filtertable]
432 cachefiles += ['hgtagsfnodes1']
432 cachefiles += ['hgtagsfnodes1']
433 return cachefiles
433 return cachefiles
434
434
435 def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
435 def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
436 update=True, stream=False, branch=None, shareopts=None):
436 update=True, stream=False, branch=None, shareopts=None):
437 """Make a copy of an existing repository.
437 """Make a copy of an existing repository.
438
438
439 Create a copy of an existing repository in a new directory. The
439 Create a copy of an existing repository in a new directory. The
440 source and destination are URLs, as passed to the repository
440 source and destination are URLs, as passed to the repository
441 function. Returns a pair of repository peers, the source and
441 function. Returns a pair of repository peers, the source and
442 newly created destination.
442 newly created destination.
443
443
444 The location of the source is added to the new repository's
444 The location of the source is added to the new repository's
445 .hg/hgrc file, as the default to be used for future pulls and
445 .hg/hgrc file, as the default to be used for future pulls and
446 pushes.
446 pushes.
447
447
448 If an exception is raised, the partly cloned/updated destination
448 If an exception is raised, the partly cloned/updated destination
449 repository will be deleted.
449 repository will be deleted.
450
450
451 Arguments:
451 Arguments:
452
452
453 source: repository object or URL
453 source: repository object or URL
454
454
455 dest: URL of destination repository to create (defaults to base
455 dest: URL of destination repository to create (defaults to base
456 name of source repository)
456 name of source repository)
457
457
458 pull: always pull from source repository, even in local case or if the
458 pull: always pull from source repository, even in local case or if the
459 server prefers streaming
459 server prefers streaming
460
460
461 stream: stream raw data uncompressed from repository (fast over
461 stream: stream raw data uncompressed from repository (fast over
462 LAN, slow over WAN)
462 LAN, slow over WAN)
463
463
464 rev: revision to clone up to (implies pull=True)
464 rev: revision to clone up to (implies pull=True)
465
465
466 update: update working directory after clone completes, if
466 update: update working directory after clone completes, if
467 destination is local repository (True means update to default rev,
467 destination is local repository (True means update to default rev,
468 anything else is treated as a revision)
468 anything else is treated as a revision)
469
469
470 branch: branches to clone
470 branch: branches to clone
471
471
472 shareopts: dict of options to control auto sharing behavior. The "pool" key
472 shareopts: dict of options to control auto sharing behavior. The "pool" key
473 activates auto sharing mode and defines the directory for stores. The
473 activates auto sharing mode and defines the directory for stores. The
474 "mode" key determines how to construct the directory name of the shared
474 "mode" key determines how to construct the directory name of the shared
475 repository. "identity" means the name is derived from the node of the first
475 repository. "identity" means the name is derived from the node of the first
476 changeset in the repository. "remote" means the name is derived from the
476 changeset in the repository. "remote" means the name is derived from the
477 remote's path/URL. Defaults to "identity."
477 remote's path/URL. Defaults to "identity."
478 """
478 """
479
479
480 if isinstance(source, bytes):
480 if isinstance(source, bytes):
481 origsource = ui.expandpath(source)
481 origsource = ui.expandpath(source)
482 source, branch = parseurl(origsource, branch)
482 source, branch = parseurl(origsource, branch)
483 srcpeer = peer(ui, peeropts, source)
483 srcpeer = peer(ui, peeropts, source)
484 else:
484 else:
485 srcpeer = source.peer() # in case we were called with a localrepo
485 srcpeer = source.peer() # in case we were called with a localrepo
486 branch = (None, branch or [])
486 branch = (None, branch or [])
487 origsource = source = srcpeer.url()
487 origsource = source = srcpeer.url()
488 rev, checkout = addbranchrevs(srcpeer, srcpeer, branch, rev)
488 rev, checkout = addbranchrevs(srcpeer, srcpeer, branch, rev)
489
489
490 if dest is None:
490 if dest is None:
491 dest = defaultdest(source)
491 dest = defaultdest(source)
492 if dest:
492 if dest:
493 ui.status(_("destination directory: %s\n") % dest)
493 ui.status(_("destination directory: %s\n") % dest)
494 else:
494 else:
495 dest = ui.expandpath(dest)
495 dest = ui.expandpath(dest)
496
496
497 dest = util.urllocalpath(dest)
497 dest = util.urllocalpath(dest)
498 source = util.urllocalpath(source)
498 source = util.urllocalpath(source)
499
499
500 if not dest:
500 if not dest:
501 raise error.Abort(_("empty destination path is not valid"))
501 raise error.Abort(_("empty destination path is not valid"))
502
502
503 destvfs = vfsmod.vfs(dest, expandpath=True)
503 destvfs = vfsmod.vfs(dest, expandpath=True)
504 if destvfs.lexists():
504 if destvfs.lexists():
505 if not destvfs.isdir():
505 if not destvfs.isdir():
506 raise error.Abort(_("destination '%s' already exists") % dest)
506 raise error.Abort(_("destination '%s' already exists") % dest)
507 elif destvfs.listdir():
507 elif destvfs.listdir():
508 raise error.Abort(_("destination '%s' is not empty") % dest)
508 raise error.Abort(_("destination '%s' is not empty") % dest)
509
509
510 shareopts = shareopts or {}
510 shareopts = shareopts or {}
511 sharepool = shareopts.get('pool')
511 sharepool = shareopts.get('pool')
512 sharenamemode = shareopts.get('mode')
512 sharenamemode = shareopts.get('mode')
513 if sharepool and islocal(dest):
513 if sharepool and islocal(dest):
514 sharepath = None
514 sharepath = None
515 if sharenamemode == 'identity':
515 if sharenamemode == 'identity':
516 # Resolve the name from the initial changeset in the remote
516 # Resolve the name from the initial changeset in the remote
517 # repository. This returns nullid when the remote is empty. It
517 # repository. This returns nullid when the remote is empty. It
518 # raises RepoLookupError if revision 0 is filtered or otherwise
518 # raises RepoLookupError if revision 0 is filtered or otherwise
519 # not available. If we fail to resolve, sharing is not enabled.
519 # not available. If we fail to resolve, sharing is not enabled.
520 try:
520 try:
521 rootnode = srcpeer.lookup('0')
521 rootnode = srcpeer.lookup('0')
522 if rootnode != node.nullid:
522 if rootnode != node.nullid:
523 sharepath = os.path.join(sharepool, node.hex(rootnode))
523 sharepath = os.path.join(sharepool, node.hex(rootnode))
524 else:
524 else:
525 ui.status(_('(not using pooled storage: '
525 ui.status(_('(not using pooled storage: '
526 'remote appears to be empty)\n'))
526 'remote appears to be empty)\n'))
527 except error.RepoLookupError:
527 except error.RepoLookupError:
528 ui.status(_('(not using pooled storage: '
528 ui.status(_('(not using pooled storage: '
529 'unable to resolve identity of remote)\n'))
529 'unable to resolve identity of remote)\n'))
530 elif sharenamemode == 'remote':
530 elif sharenamemode == 'remote':
531 sharepath = os.path.join(
531 sharepath = os.path.join(
532 sharepool, hashlib.sha1(source).hexdigest())
532 sharepool, hashlib.sha1(source).hexdigest())
533 else:
533 else:
534 raise error.Abort(_('unknown share naming mode: %s') %
534 raise error.Abort(_('unknown share naming mode: %s') %
535 sharenamemode)
535 sharenamemode)
536
536
537 if sharepath:
537 if sharepath:
538 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
538 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
539 dest, pull=pull, rev=rev, update=update,
539 dest, pull=pull, rev=rev, update=update,
540 stream=stream)
540 stream=stream)
541
541
542 srclock = destlock = cleandir = None
542 srclock = destlock = cleandir = None
543 srcrepo = srcpeer.local()
543 srcrepo = srcpeer.local()
544 try:
544 try:
545 abspath = origsource
545 abspath = origsource
546 if islocal(origsource):
546 if islocal(origsource):
547 abspath = os.path.abspath(util.urllocalpath(origsource))
547 abspath = os.path.abspath(util.urllocalpath(origsource))
548
548
549 if islocal(dest):
549 if islocal(dest):
550 cleandir = dest
550 cleandir = dest
551
551
552 copy = False
552 copy = False
553 if (srcrepo and srcrepo.cancopy() and islocal(dest)
553 if (srcrepo and srcrepo.cancopy() and islocal(dest)
554 and not phases.hassecret(srcrepo)):
554 and not phases.hassecret(srcrepo)):
555 copy = not pull and not rev
555 copy = not pull and not rev
556
556
557 if copy:
557 if copy:
558 try:
558 try:
559 # we use a lock here because if we race with commit, we
559 # we use a lock here because if we race with commit, we
560 # can end up with extra data in the cloned revlogs that's
560 # can end up with extra data in the cloned revlogs that's
561 # not pointed to by changesets, thus causing verify to
561 # not pointed to by changesets, thus causing verify to
562 # fail
562 # fail
563 srclock = srcrepo.lock(wait=False)
563 srclock = srcrepo.lock(wait=False)
564 except error.LockError:
564 except error.LockError:
565 copy = False
565 copy = False
566
566
567 if copy:
567 if copy:
568 srcrepo.hook('preoutgoing', throw=True, source='clone')
568 srcrepo.hook('preoutgoing', throw=True, source='clone')
569 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
569 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
570 if not os.path.exists(dest):
570 if not os.path.exists(dest):
571 os.mkdir(dest)
571 os.mkdir(dest)
572 else:
572 else:
573 # only clean up directories we create ourselves
573 # only clean up directories we create ourselves
574 cleandir = hgdir
574 cleandir = hgdir
575 try:
575 try:
576 destpath = hgdir
576 destpath = hgdir
577 util.makedir(destpath, notindexed=True)
577 util.makedir(destpath, notindexed=True)
578 except OSError as inst:
578 except OSError as inst:
579 if inst.errno == errno.EEXIST:
579 if inst.errno == errno.EEXIST:
580 cleandir = None
580 cleandir = None
581 raise error.Abort(_("destination '%s' already exists")
581 raise error.Abort(_("destination '%s' already exists")
582 % dest)
582 % dest)
583 raise
583 raise
584
584
585 destlock = copystore(ui, srcrepo, destpath)
585 destlock = copystore(ui, srcrepo, destpath)
586 # copy bookmarks over
586 # copy bookmarks over
587 srcbookmarks = srcrepo.vfs.join('bookmarks')
587 srcbookmarks = srcrepo.vfs.join('bookmarks')
588 dstbookmarks = os.path.join(destpath, 'bookmarks')
588 dstbookmarks = os.path.join(destpath, 'bookmarks')
589 if os.path.exists(srcbookmarks):
589 if os.path.exists(srcbookmarks):
590 util.copyfile(srcbookmarks, dstbookmarks)
590 util.copyfile(srcbookmarks, dstbookmarks)
591
591
592 dstcachedir = os.path.join(destpath, 'cache')
592 dstcachedir = os.path.join(destpath, 'cache')
593 for cache in _cachetocopy(srcrepo):
593 for cache in _cachetocopy(srcrepo):
594 _copycache(srcrepo, dstcachedir, cache)
594 _copycache(srcrepo, dstcachedir, cache)
595
595
596 # we need to re-init the repo after manually copying the data
596 # we need to re-init the repo after manually copying the data
597 # into it
597 # into it
598 destpeer = peer(srcrepo, peeropts, dest)
598 destpeer = peer(srcrepo, peeropts, dest)
599 srcrepo.hook('outgoing', source='clone',
599 srcrepo.hook('outgoing', source='clone',
600 node=node.hex(node.nullid))
600 node=node.hex(node.nullid))
601 else:
601 else:
602 try:
602 try:
603 destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
603 destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
604 # only pass ui when no srcrepo
604 # only pass ui when no srcrepo
605 except OSError as inst:
605 except OSError as inst:
606 if inst.errno == errno.EEXIST:
606 if inst.errno == errno.EEXIST:
607 cleandir = None
607 cleandir = None
608 raise error.Abort(_("destination '%s' already exists")
608 raise error.Abort(_("destination '%s' already exists")
609 % dest)
609 % dest)
610 raise
610 raise
611
611
612 revs = None
612 revs = None
613 if rev:
613 if rev:
614 if not srcpeer.capable('lookup'):
614 if not srcpeer.capable('lookup'):
615 raise error.Abort(_("src repository does not support "
615 raise error.Abort(_("src repository does not support "
616 "revision lookup and so doesn't "
616 "revision lookup and so doesn't "
617 "support clone by revision"))
617 "support clone by revision"))
618 revs = [srcpeer.lookup(r) for r in rev]
618 revs = [srcpeer.lookup(r) for r in rev]
619 checkout = revs[0]
619 checkout = revs[0]
620 local = destpeer.local()
620 local = destpeer.local()
621 if local:
621 if local:
622 if not stream:
622 if not stream:
623 if pull:
623 if pull:
624 stream = False
624 stream = False
625 else:
625 else:
626 stream = None
626 stream = None
627 # internal config: ui.quietbookmarkmove
627 # internal config: ui.quietbookmarkmove
628 overrides = {('ui', 'quietbookmarkmove'): True}
628 overrides = {('ui', 'quietbookmarkmove'): True}
629 with local.ui.configoverride(overrides, 'clone'):
629 with local.ui.configoverride(overrides, 'clone'):
630 exchange.pull(local, srcpeer, revs,
630 exchange.pull(local, srcpeer, revs,
631 streamclonerequested=stream)
631 streamclonerequested=stream)
632 elif srcrepo:
632 elif srcrepo:
633 exchange.push(srcrepo, destpeer, revs=revs,
633 exchange.push(srcrepo, destpeer, revs=revs,
634 bookmarks=srcrepo._bookmarks.keys())
634 bookmarks=srcrepo._bookmarks.keys())
635 else:
635 else:
636 raise error.Abort(_("clone from remote to remote not supported")
636 raise error.Abort(_("clone from remote to remote not supported")
637 )
637 )
638
638
639 cleandir = None
639 cleandir = None
640
640
641 destrepo = destpeer.local()
641 destrepo = destpeer.local()
642 if destrepo:
642 if destrepo:
643 template = uimod.samplehgrcs['cloned']
643 template = uimod.samplehgrcs['cloned']
644 fp = destrepo.vfs("hgrc", "w", text=True)
644 fp = destrepo.vfs("hgrc", "w", text=True)
645 u = util.url(abspath)
645 u = util.url(abspath)
646 u.passwd = None
646 u.passwd = None
647 defaulturl = str(u)
647 defaulturl = str(u)
648 fp.write(template % defaulturl)
648 fp.write(template % defaulturl)
649 fp.close()
649 fp.close()
650
650
651 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
651 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
652
652
653 if update:
653 if update:
654 if update is not True:
654 if update is not True:
655 checkout = srcpeer.lookup(update)
655 checkout = srcpeer.lookup(update)
656 uprev = None
656 uprev = None
657 status = None
657 status = None
658 if checkout is not None:
658 if checkout is not None:
659 try:
659 try:
660 uprev = destrepo.lookup(checkout)
660 uprev = destrepo.lookup(checkout)
661 except error.RepoLookupError:
661 except error.RepoLookupError:
662 if update is not True:
662 if update is not True:
663 try:
663 try:
664 uprev = destrepo.lookup(update)
664 uprev = destrepo.lookup(update)
665 except error.RepoLookupError:
665 except error.RepoLookupError:
666 pass
666 pass
667 if uprev is None:
667 if uprev is None:
668 try:
668 try:
669 uprev = destrepo._bookmarks['@']
669 uprev = destrepo._bookmarks['@']
670 update = '@'
670 update = '@'
671 bn = destrepo[uprev].branch()
671 bn = destrepo[uprev].branch()
672 if bn == 'default':
672 if bn == 'default':
673 status = _("updating to bookmark @\n")
673 status = _("updating to bookmark @\n")
674 else:
674 else:
675 status = (_("updating to bookmark @ on branch %s\n")
675 status = (_("updating to bookmark @ on branch %s\n")
676 % bn)
676 % bn)
677 except KeyError:
677 except KeyError:
678 try:
678 try:
679 uprev = destrepo.branchtip('default')
679 uprev = destrepo.branchtip('default')
680 except error.RepoLookupError:
680 except error.RepoLookupError:
681 uprev = destrepo.lookup('tip')
681 uprev = destrepo.lookup('tip')
682 if not status:
682 if not status:
683 bn = destrepo[uprev].branch()
683 bn = destrepo[uprev].branch()
684 status = _("updating to branch %s\n") % bn
684 status = _("updating to branch %s\n") % bn
685 destrepo.ui.status(status)
685 destrepo.ui.status(status)
686 _update(destrepo, uprev)
686 _update(destrepo, uprev)
687 if update in destrepo._bookmarks:
687 if update in destrepo._bookmarks:
688 bookmarks.activate(destrepo, update)
688 bookmarks.activate(destrepo, update)
689 finally:
689 finally:
690 release(srclock, destlock)
690 release(srclock, destlock)
691 if cleandir is not None:
691 if cleandir is not None:
692 shutil.rmtree(cleandir, True)
692 shutil.rmtree(cleandir, True)
693 if srcpeer is not None:
693 if srcpeer is not None:
694 srcpeer.close()
694 srcpeer.close()
695 return srcpeer, destpeer
695 return srcpeer, destpeer
696
696
697 def _showstats(repo, stats, quietempty=False):
697 def _showstats(repo, stats, quietempty=False):
698 if quietempty and not any(stats):
698 if quietempty and not any(stats):
699 return
699 return
700 repo.ui.status(_("%d files updated, %d files merged, "
700 repo.ui.status(_("%d files updated, %d files merged, "
701 "%d files removed, %d files unresolved\n") % stats)
701 "%d files removed, %d files unresolved\n") % stats)
702
702
703 def updaterepo(repo, node, overwrite, updatecheck=None):
703 def updaterepo(repo, node, overwrite, updatecheck=None):
704 """Update the working directory to node.
704 """Update the working directory to node.
705
705
706 When overwrite is set, changes are clobbered, merged else
706 When overwrite is set, changes are clobbered, merged else
707
707
708 returns stats (see pydoc mercurial.merge.applyupdates)"""
708 returns stats (see pydoc mercurial.merge.applyupdates)"""
709 return mergemod.update(repo, node, False, overwrite,
709 return mergemod.update(repo, node, False, overwrite,
710 labels=['working copy', 'destination'],
710 labels=['working copy', 'destination'],
711 updatecheck=updatecheck)
711 updatecheck=updatecheck)
712
712
713 def update(repo, node, quietempty=False, updatecheck=None):
713 def update(repo, node, quietempty=False, updatecheck=None):
714 """update the working directory to node"""
714 """update the working directory to node"""
715 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
715 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
716 _showstats(repo, stats, quietempty)
716 _showstats(repo, stats, quietempty)
717 if stats[3]:
717 if stats[3]:
718 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
718 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
719 return stats[3] > 0
719 return stats[3] > 0
720
720
721 # naming conflict in clone()
721 # naming conflict in clone()
722 _update = update
722 _update = update
723
723
724 def clean(repo, node, show_stats=True, quietempty=False):
724 def clean(repo, node, show_stats=True, quietempty=False):
725 """forcibly switch the working directory to node, clobbering changes"""
725 """forcibly switch the working directory to node, clobbering changes"""
726 stats = updaterepo(repo, node, True)
726 stats = updaterepo(repo, node, True)
727 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
727 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
728 if show_stats:
728 if show_stats:
729 _showstats(repo, stats, quietempty)
729 _showstats(repo, stats, quietempty)
730 return stats[3] > 0
730 return stats[3] > 0
731
731
732 # naming conflict in updatetotally()
732 # naming conflict in updatetotally()
733 _clean = clean
733 _clean = clean
734
734
735 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
735 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
736 """Update the working directory with extra care for non-file components
736 """Update the working directory with extra care for non-file components
737
737
738 This takes care of non-file components below:
738 This takes care of non-file components below:
739
739
740 :bookmark: might be advanced or (in)activated
740 :bookmark: might be advanced or (in)activated
741
741
742 This takes arguments below:
742 This takes arguments below:
743
743
744 :checkout: to which revision the working directory is updated
744 :checkout: to which revision the working directory is updated
745 :brev: a name, which might be a bookmark to be activated after updating
745 :brev: a name, which might be a bookmark to be activated after updating
746 :clean: whether changes in the working directory can be discarded
746 :clean: whether changes in the working directory can be discarded
747 :updatecheck: how to deal with a dirty working directory
747 :updatecheck: how to deal with a dirty working directory
748
748
749 Valid values for updatecheck are (None => linear):
749 Valid values for updatecheck are (None => linear):
750
750
751 * abort: abort if the working directory is dirty
751 * abort: abort if the working directory is dirty
752 * none: don't check (merge working directory changes into destination)
752 * none: don't check (merge working directory changes into destination)
753 * linear: check that update is linear before merging working directory
753 * linear: check that update is linear before merging working directory
754 changes into destination
754 changes into destination
755 * noconflict: check that the update does not result in file merges
755 * noconflict: check that the update does not result in file merges
756
756
757 This returns whether conflict is detected at updating or not.
757 This returns whether conflict is detected at updating or not.
758 """
758 """
759 if updatecheck is None:
759 if updatecheck is None:
760 updatecheck = ui.config('experimental', 'updatecheck')
760 updatecheck = ui.config('experimental', 'updatecheck')
761 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
761 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
762 # If not configured, or invalid value configured
762 # If not configured, or invalid value configured
763 updatecheck = 'linear'
763 updatecheck = 'linear'
764 with repo.wlock():
764 with repo.wlock():
765 movemarkfrom = None
765 movemarkfrom = None
766 warndest = False
766 warndest = False
767 if checkout is None:
767 if checkout is None:
768 updata = destutil.destupdate(repo, clean=clean)
768 updata = destutil.destupdate(repo, clean=clean)
769 checkout, movemarkfrom, brev = updata
769 checkout, movemarkfrom, brev = updata
770 warndest = True
770 warndest = True
771
771
772 if clean:
772 if clean:
773 ret = _clean(repo, checkout)
773 ret = _clean(repo, checkout)
774 else:
774 else:
775 if updatecheck == 'abort':
775 if updatecheck == 'abort':
776 cmdutil.bailifchanged(repo, merge=False)
776 cmdutil.bailifchanged(repo, merge=False)
777 updatecheck = 'none'
777 updatecheck = 'none'
778 ret = _update(repo, checkout, updatecheck=updatecheck)
778 ret = _update(repo, checkout, updatecheck=updatecheck)
779
779
780 if not ret and movemarkfrom:
780 if not ret and movemarkfrom:
781 if movemarkfrom == repo['.'].node():
781 if movemarkfrom == repo['.'].node():
782 pass # no-op update
782 pass # no-op update
783 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
783 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
784 b = ui.label(repo._activebookmark, 'bookmarks.active')
784 b = ui.label(repo._activebookmark, 'bookmarks.active')
785 ui.status(_("updating bookmark %s\n") % b)
785 ui.status(_("updating bookmark %s\n") % b)
786 else:
786 else:
787 # this can happen with a non-linear update
787 # this can happen with a non-linear update
788 b = ui.label(repo._activebookmark, 'bookmarks')
788 b = ui.label(repo._activebookmark, 'bookmarks')
789 ui.status(_("(leaving bookmark %s)\n") % b)
789 ui.status(_("(leaving bookmark %s)\n") % b)
790 bookmarks.deactivate(repo)
790 bookmarks.deactivate(repo)
791 elif brev in repo._bookmarks:
791 elif brev in repo._bookmarks:
792 if brev != repo._activebookmark:
792 if brev != repo._activebookmark:
793 b = ui.label(brev, 'bookmarks.active')
793 b = ui.label(brev, 'bookmarks.active')
794 ui.status(_("(activating bookmark %s)\n") % b)
794 ui.status(_("(activating bookmark %s)\n") % b)
795 bookmarks.activate(repo, brev)
795 bookmarks.activate(repo, brev)
796 elif brev:
796 elif brev:
797 if repo._activebookmark:
797 if repo._activebookmark:
798 b = ui.label(repo._activebookmark, 'bookmarks')
798 b = ui.label(repo._activebookmark, 'bookmarks')
799 ui.status(_("(leaving bookmark %s)\n") % b)
799 ui.status(_("(leaving bookmark %s)\n") % b)
800 bookmarks.deactivate(repo)
800 bookmarks.deactivate(repo)
801
801
802 if warndest:
802 if warndest:
803 destutil.statusotherdests(ui, repo)
803 destutil.statusotherdests(ui, repo)
804
804
805 return ret
805 return ret
806
806
807 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None):
807 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None):
808 """Branch merge with node, resolving changes. Return true if any
808 """Branch merge with node, resolving changes. Return true if any
809 unresolved conflicts."""
809 unresolved conflicts."""
810 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce,
810 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce,
811 labels=labels)
811 labels=labels)
812 _showstats(repo, stats)
812 _showstats(repo, stats)
813 if stats[3]:
813 if stats[3]:
814 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
814 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
815 "or 'hg update -C .' to abandon\n"))
815 "or 'hg update -C .' to abandon\n"))
816 elif remind:
816 elif remind:
817 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
817 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
818 return stats[3] > 0
818 return stats[3] > 0
819
819
820 def _incoming(displaychlist, subreporecurse, ui, repo, source,
820 def _incoming(displaychlist, subreporecurse, ui, repo, source,
821 opts, buffered=False):
821 opts, buffered=False):
822 """
822 """
823 Helper for incoming / gincoming.
823 Helper for incoming / gincoming.
824 displaychlist gets called with
824 displaychlist gets called with
825 (remoterepo, incomingchangesetlist, displayer) parameters,
825 (remoterepo, incomingchangesetlist, displayer) parameters,
826 and is supposed to contain only code that can't be unified.
826 and is supposed to contain only code that can't be unified.
827 """
827 """
828 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
828 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
829 other = peer(repo, opts, source)
829 other = peer(repo, opts, source)
830 ui.status(_('comparing with %s\n') % util.hidepassword(source))
830 ui.status(_('comparing with %s\n') % util.hidepassword(source))
831 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
831 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
832
832
833 if revs:
833 if revs:
834 revs = [other.lookup(rev) for rev in revs]
834 revs = [other.lookup(rev) for rev in revs]
835 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
835 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
836 revs, opts["bundle"], opts["force"])
836 revs, opts["bundle"], opts["force"])
837 try:
837 try:
838 if not chlist:
838 if not chlist:
839 ui.status(_("no changes found\n"))
839 ui.status(_("no changes found\n"))
840 return subreporecurse()
840 return subreporecurse()
841 ui.pager('incoming')
841 ui.pager('incoming')
842 displayer = cmdutil.show_changeset(ui, other, opts, buffered)
842 displayer = cmdutil.show_changeset(ui, other, opts, buffered)
843 displaychlist(other, chlist, displayer)
843 displaychlist(other, chlist, displayer)
844 displayer.close()
844 displayer.close()
845 finally:
845 finally:
846 cleanupfn()
846 cleanupfn()
847 subreporecurse()
847 subreporecurse()
848 return 0 # exit code is zero since we found incoming changes
848 return 0 # exit code is zero since we found incoming changes
849
849
850 def incoming(ui, repo, source, opts):
850 def incoming(ui, repo, source, opts):
851 def subreporecurse():
851 def subreporecurse():
852 ret = 1
852 ret = 1
853 if opts.get('subrepos'):
853 if opts.get('subrepos'):
854 ctx = repo[None]
854 ctx = repo[None]
855 for subpath in sorted(ctx.substate):
855 for subpath in sorted(ctx.substate):
856 sub = ctx.sub(subpath)
856 sub = ctx.sub(subpath)
857 ret = min(ret, sub.incoming(ui, source, opts))
857 ret = min(ret, sub.incoming(ui, source, opts))
858 return ret
858 return ret
859
859
860 def display(other, chlist, displayer):
860 def display(other, chlist, displayer):
861 limit = cmdutil.loglimit(opts)
861 limit = cmdutil.loglimit(opts)
862 if opts.get('newest_first'):
862 if opts.get('newest_first'):
863 chlist.reverse()
863 chlist.reverse()
864 count = 0
864 count = 0
865 for n in chlist:
865 for n in chlist:
866 if limit is not None and count >= limit:
866 if limit is not None and count >= limit:
867 break
867 break
868 parents = [p for p in other.changelog.parents(n) if p != nullid]
868 parents = [p for p in other.changelog.parents(n) if p != nullid]
869 if opts.get('no_merges') and len(parents) == 2:
869 if opts.get('no_merges') and len(parents) == 2:
870 continue
870 continue
871 count += 1
871 count += 1
872 displayer.show(other[n])
872 displayer.show(other[n])
873 return _incoming(display, subreporecurse, ui, repo, source, opts)
873 return _incoming(display, subreporecurse, ui, repo, source, opts)
874
874
875 def _outgoing(ui, repo, dest, opts):
875 def _outgoing(ui, repo, dest, opts):
876 dest = ui.expandpath(dest or 'default-push', dest or 'default')
876 dest = ui.expandpath(dest or 'default-push', dest or 'default')
877 dest, branches = parseurl(dest, opts.get('branch'))
877 dest, branches = parseurl(dest, opts.get('branch'))
878 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
878 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
879 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
879 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
880 if revs:
880 if revs:
881 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
881 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
882
882
883 other = peer(repo, opts, dest)
883 other = peer(repo, opts, dest)
884 outgoing = discovery.findcommonoutgoing(repo, other, revs,
884 outgoing = discovery.findcommonoutgoing(repo, other, revs,
885 force=opts.get('force'))
885 force=opts.get('force'))
886 o = outgoing.missing
886 o = outgoing.missing
887 if not o:
887 if not o:
888 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
888 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
889 return o, other
889 return o, other
890
890
891 def outgoing(ui, repo, dest, opts):
891 def outgoing(ui, repo, dest, opts):
892 def recurse():
892 def recurse():
893 ret = 1
893 ret = 1
894 if opts.get('subrepos'):
894 if opts.get('subrepos'):
895 ctx = repo[None]
895 ctx = repo[None]
896 for subpath in sorted(ctx.substate):
896 for subpath in sorted(ctx.substate):
897 sub = ctx.sub(subpath)
897 sub = ctx.sub(subpath)
898 ret = min(ret, sub.outgoing(ui, dest, opts))
898 ret = min(ret, sub.outgoing(ui, dest, opts))
899 return ret
899 return ret
900
900
901 limit = cmdutil.loglimit(opts)
901 limit = cmdutil.loglimit(opts)
902 o, other = _outgoing(ui, repo, dest, opts)
902 o, other = _outgoing(ui, repo, dest, opts)
903 if not o:
903 if not o:
904 cmdutil.outgoinghooks(ui, repo, other, opts, o)
904 cmdutil.outgoinghooks(ui, repo, other, opts, o)
905 return recurse()
905 return recurse()
906
906
907 if opts.get('newest_first'):
907 if opts.get('newest_first'):
908 o.reverse()
908 o.reverse()
909 ui.pager('outgoing')
909 ui.pager('outgoing')
910 displayer = cmdutil.show_changeset(ui, repo, opts)
910 displayer = cmdutil.show_changeset(ui, repo, opts)
911 count = 0
911 count = 0
912 for n in o:
912 for n in o:
913 if limit is not None and count >= limit:
913 if limit is not None and count >= limit:
914 break
914 break
915 parents = [p for p in repo.changelog.parents(n) if p != nullid]
915 parents = [p for p in repo.changelog.parents(n) if p != nullid]
916 if opts.get('no_merges') and len(parents) == 2:
916 if opts.get('no_merges') and len(parents) == 2:
917 continue
917 continue
918 count += 1
918 count += 1
919 displayer.show(repo[n])
919 displayer.show(repo[n])
920 displayer.close()
920 displayer.close()
921 cmdutil.outgoinghooks(ui, repo, other, opts, o)
921 cmdutil.outgoinghooks(ui, repo, other, opts, o)
922 recurse()
922 recurse()
923 return 0 # exit code is zero since we found outgoing changes
923 return 0 # exit code is zero since we found outgoing changes
924
924
925 def verify(repo):
925 def verify(repo):
926 """verify the consistency of a repository"""
926 """verify the consistency of a repository"""
927 ret = verifymod.verify(repo)
927 ret = verifymod.verify(repo)
928
928
929 # Broken subrepo references in hidden csets don't seem worth worrying about,
929 # Broken subrepo references in hidden csets don't seem worth worrying about,
930 # since they can't be pushed/pulled, and --hidden can be used if they are a
930 # since they can't be pushed/pulled, and --hidden can be used if they are a
931 # concern.
931 # concern.
932
932
933 # pathto() is needed for -R case
933 # pathto() is needed for -R case
934 revs = repo.revs("filelog(%s)",
934 revs = repo.revs("filelog(%s)",
935 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
935 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
936
936
937 if revs:
937 if revs:
938 repo.ui.status(_('checking subrepo links\n'))
938 repo.ui.status(_('checking subrepo links\n'))
939 for rev in revs:
939 for rev in revs:
940 ctx = repo[rev]
940 ctx = repo[rev]
941 try:
941 try:
942 for subpath in ctx.substate:
942 for subpath in ctx.substate:
943 try:
943 try:
944 ret = (ctx.sub(subpath, allowcreate=False).verify()
944 ret = (ctx.sub(subpath, allowcreate=False).verify()
945 or ret)
945 or ret)
946 except error.RepoError as e:
946 except error.RepoError as e:
947 repo.ui.warn(('%s: %s\n') % (rev, e))
947 repo.ui.warn(('%s: %s\n') % (rev, e))
948 except Exception:
948 except Exception:
949 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
949 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
950 node.short(ctx.node()))
950 node.short(ctx.node()))
951
951
952 return ret
952 return ret
953
953
954 def remoteui(src, opts):
954 def remoteui(src, opts):
955 'build a remote ui from ui or repo and opts'
955 'build a remote ui from ui or repo and opts'
956 if util.safehasattr(src, 'baseui'): # looks like a repository
956 if util.safehasattr(src, 'baseui'): # looks like a repository
957 dst = src.baseui.copy() # drop repo-specific config
957 dst = src.baseui.copy() # drop repo-specific config
958 src = src.ui # copy target options from repo
958 src = src.ui # copy target options from repo
959 else: # assume it's a global ui object
959 else: # assume it's a global ui object
960 dst = src.copy() # keep all global options
960 dst = src.copy() # keep all global options
961
961
962 # copy ssh-specific options
962 # copy ssh-specific options
963 for o in 'ssh', 'remotecmd':
963 for o in 'ssh', 'remotecmd':
964 v = opts.get(o) or src.config('ui', o)
964 v = opts.get(o) or src.config('ui', o)
965 if v:
965 if v:
966 dst.setconfig("ui", o, v, 'copied')
966 dst.setconfig("ui", o, v, 'copied')
967
967
968 # copy bundle-specific options
968 # copy bundle-specific options
969 r = src.config('bundle', 'mainreporoot')
969 r = src.config('bundle', 'mainreporoot')
970 if r:
970 if r:
971 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
971 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
972
972
973 # copy selected local settings to the remote ui
973 # copy selected local settings to the remote ui
974 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
974 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
975 for key, val in src.configitems(sect):
975 for key, val in src.configitems(sect):
976 dst.setconfig(sect, key, val, 'copied')
976 dst.setconfig(sect, key, val, 'copied')
977 v = src.config('web', 'cacerts')
977 v = src.config('web', 'cacerts')
978 if v:
978 if v:
979 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
979 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
980
980
981 return dst
981 return dst
982
982
983 # Files of interest
983 # Files of interest
984 # Used to check if the repository has changed looking at mtime and size of
984 # Used to check if the repository has changed looking at mtime and size of
985 # these files.
985 # these files.
986 foi = [('spath', '00changelog.i'),
986 foi = [('spath', '00changelog.i'),
987 ('spath', 'phaseroots'), # ! phase can change content at the same size
987 ('spath', 'phaseroots'), # ! phase can change content at the same size
988 ('spath', 'obsstore'),
988 ('spath', 'obsstore'),
989 ('path', 'bookmarks'), # ! bookmark can change content at the same size
989 ('path', 'bookmarks'), # ! bookmark can change content at the same size
990 ]
990 ]
991
991
992 class cachedlocalrepo(object):
992 class cachedlocalrepo(object):
993 """Holds a localrepository that can be cached and reused."""
993 """Holds a localrepository that can be cached and reused."""
994
994
995 def __init__(self, repo):
995 def __init__(self, repo):
996 """Create a new cached repo from an existing repo.
996 """Create a new cached repo from an existing repo.
997
997
998 We assume the passed in repo was recently created. If the
998 We assume the passed in repo was recently created. If the
999 repo has changed between when it was created and when it was
999 repo has changed between when it was created and when it was
1000 turned into a cache, it may not refresh properly.
1000 turned into a cache, it may not refresh properly.
1001 """
1001 """
1002 assert isinstance(repo, localrepo.localrepository)
1002 assert isinstance(repo, localrepo.localrepository)
1003 self._repo = repo
1003 self._repo = repo
1004 self._state, self.mtime = self._repostate()
1004 self._state, self.mtime = self._repostate()
1005 self._filtername = repo.filtername
1005 self._filtername = repo.filtername
1006
1006
1007 def fetch(self):
1007 def fetch(self):
1008 """Refresh (if necessary) and return a repository.
1008 """Refresh (if necessary) and return a repository.
1009
1009
1010 If the cached instance is out of date, it will be recreated
1010 If the cached instance is out of date, it will be recreated
1011 automatically and returned.
1011 automatically and returned.
1012
1012
1013 Returns a tuple of the repo and a boolean indicating whether a new
1013 Returns a tuple of the repo and a boolean indicating whether a new
1014 repo instance was created.
1014 repo instance was created.
1015 """
1015 """
1016 # We compare the mtimes and sizes of some well-known files to
1016 # We compare the mtimes and sizes of some well-known files to
1017 # determine if the repo changed. This is not precise, as mtimes
1017 # determine if the repo changed. This is not precise, as mtimes
1018 # are susceptible to clock skew and imprecise filesystems and
1018 # are susceptible to clock skew and imprecise filesystems and
1019 # file content can change while maintaining the same size.
1019 # file content can change while maintaining the same size.
1020
1020
1021 state, mtime = self._repostate()
1021 state, mtime = self._repostate()
1022 if state == self._state:
1022 if state == self._state:
1023 return self._repo, False
1023 return self._repo, False
1024
1024
1025 repo = repository(self._repo.baseui, self._repo.url())
1025 repo = repository(self._repo.baseui, self._repo.url())
1026 if self._filtername:
1026 if self._filtername:
1027 self._repo = repo.filtered(self._filtername)
1027 self._repo = repo.filtered(self._filtername)
1028 else:
1028 else:
1029 self._repo = repo.unfiltered()
1029 self._repo = repo.unfiltered()
1030 self._state = state
1030 self._state = state
1031 self.mtime = mtime
1031 self.mtime = mtime
1032
1032
1033 return self._repo, True
1033 return self._repo, True
1034
1034
1035 def _repostate(self):
1035 def _repostate(self):
1036 state = []
1036 state = []
1037 maxmtime = -1
1037 maxmtime = -1
1038 for attr, fname in foi:
1038 for attr, fname in foi:
1039 prefix = getattr(self._repo, attr)
1039 prefix = getattr(self._repo, attr)
1040 p = os.path.join(prefix, fname)
1040 p = os.path.join(prefix, fname)
1041 try:
1041 try:
1042 st = os.stat(p)
1042 st = os.stat(p)
1043 except OSError:
1043 except OSError:
1044 st = os.stat(prefix)
1044 st = os.stat(prefix)
1045 state.append((st.st_mtime, st.st_size))
1045 state.append((st.st_mtime, st.st_size))
1046 maxmtime = max(maxmtime, st.st_mtime)
1046 maxmtime = max(maxmtime, st.st_mtime)
1047
1047
1048 return tuple(state), maxmtime
1048 return tuple(state), maxmtime
1049
1049
1050 def copy(self):
1050 def copy(self):
1051 """Obtain a copy of this class instance.
1051 """Obtain a copy of this class instance.
1052
1052
1053 A new localrepository instance is obtained. The new instance should be
1053 A new localrepository instance is obtained. The new instance should be
1054 completely independent of the original.
1054 completely independent of the original.
1055 """
1055 """
1056 repo = repository(self._repo.baseui, self._repo.origroot)
1056 repo = repository(self._repo.baseui, self._repo.origroot)
1057 if self._filtername:
1057 if self._filtername:
1058 repo = repo.filtered(self._filtername)
1058 repo = repo.filtered(self._filtername)
1059 else:
1059 else:
1060 repo = repo.unfiltered()
1060 repo = repo.unfiltered()
1061 c = cachedlocalrepo(repo)
1061 c = cachedlocalrepo(repo)
1062 c._state = self._state
1062 c._state = self._state
1063 c.mtime = self.mtime
1063 c.mtime = self.mtime
1064 return c
1064 return c
General Comments 0
You need to be logged in to leave comments. Login now