##// END OF EJS Templates
clone: for local clones, copy branchcache from the right location (issue4286)...
Siddharth Agarwal -
r22263:ab0c42d2 default
parent child Browse files
Show More
@@ -1,661 +1,661 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from i18n import _
9 from i18n import _
10 from lock import release
10 from lock import release
11 from node import hex, nullid
11 from node import hex, nullid
12 import localrepo, bundlerepo, unionrepo, httppeer, sshpeer, statichttprepo
12 import localrepo, bundlerepo, unionrepo, httppeer, sshpeer, statichttprepo
13 import bookmarks, lock, util, extensions, error, node, scmutil, phases, url
13 import bookmarks, lock, util, extensions, error, node, scmutil, phases, url
14 import cmdutil, discovery
14 import cmdutil, discovery
15 import merge as mergemod
15 import merge as mergemod
16 import verify as verifymod
16 import verify as verifymod
17 import errno, os, shutil
17 import errno, os, shutil
18
18
19 def _local(path):
19 def _local(path):
20 path = util.expandpath(util.urllocalpath(path))
20 path = util.expandpath(util.urllocalpath(path))
21 return (os.path.isfile(path) and bundlerepo or localrepo)
21 return (os.path.isfile(path) and bundlerepo or localrepo)
22
22
23 def addbranchrevs(lrepo, other, branches, revs):
23 def addbranchrevs(lrepo, other, branches, revs):
24 peer = other.peer() # a courtesy to callers using a localrepo for other
24 peer = other.peer() # a courtesy to callers using a localrepo for other
25 hashbranch, branches = branches
25 hashbranch, branches = branches
26 if not hashbranch and not branches:
26 if not hashbranch and not branches:
27 return revs or None, revs and revs[0] or None
27 return revs or None, revs and revs[0] or None
28 revs = revs and list(revs) or []
28 revs = revs and list(revs) or []
29 if not peer.capable('branchmap'):
29 if not peer.capable('branchmap'):
30 if branches:
30 if branches:
31 raise util.Abort(_("remote branch lookup not supported"))
31 raise util.Abort(_("remote branch lookup not supported"))
32 revs.append(hashbranch)
32 revs.append(hashbranch)
33 return revs, revs[0]
33 return revs, revs[0]
34 branchmap = peer.branchmap()
34 branchmap = peer.branchmap()
35
35
36 def primary(branch):
36 def primary(branch):
37 if branch == '.':
37 if branch == '.':
38 if not lrepo:
38 if not lrepo:
39 raise util.Abort(_("dirstate branch not accessible"))
39 raise util.Abort(_("dirstate branch not accessible"))
40 branch = lrepo.dirstate.branch()
40 branch = lrepo.dirstate.branch()
41 if branch in branchmap:
41 if branch in branchmap:
42 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
42 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
43 return True
43 return True
44 else:
44 else:
45 return False
45 return False
46
46
47 for branch in branches:
47 for branch in branches:
48 if not primary(branch):
48 if not primary(branch):
49 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
49 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
50 if hashbranch:
50 if hashbranch:
51 if not primary(hashbranch):
51 if not primary(hashbranch):
52 revs.append(hashbranch)
52 revs.append(hashbranch)
53 return revs, revs[0]
53 return revs, revs[0]
54
54
55 def parseurl(path, branches=None):
55 def parseurl(path, branches=None):
56 '''parse url#branch, returning (url, (branch, branches))'''
56 '''parse url#branch, returning (url, (branch, branches))'''
57
57
58 u = util.url(path)
58 u = util.url(path)
59 branch = None
59 branch = None
60 if u.fragment:
60 if u.fragment:
61 branch = u.fragment
61 branch = u.fragment
62 u.fragment = None
62 u.fragment = None
63 return str(u), (branch, branches or [])
63 return str(u), (branch, branches or [])
64
64
65 schemes = {
65 schemes = {
66 'bundle': bundlerepo,
66 'bundle': bundlerepo,
67 'union': unionrepo,
67 'union': unionrepo,
68 'file': _local,
68 'file': _local,
69 'http': httppeer,
69 'http': httppeer,
70 'https': httppeer,
70 'https': httppeer,
71 'ssh': sshpeer,
71 'ssh': sshpeer,
72 'static-http': statichttprepo,
72 'static-http': statichttprepo,
73 }
73 }
74
74
75 def _peerlookup(path):
75 def _peerlookup(path):
76 u = util.url(path)
76 u = util.url(path)
77 scheme = u.scheme or 'file'
77 scheme = u.scheme or 'file'
78 thing = schemes.get(scheme) or schemes['file']
78 thing = schemes.get(scheme) or schemes['file']
79 try:
79 try:
80 return thing(path)
80 return thing(path)
81 except TypeError:
81 except TypeError:
82 return thing
82 return thing
83
83
84 def islocal(repo):
84 def islocal(repo):
85 '''return true if repo (or path pointing to repo) is local'''
85 '''return true if repo (or path pointing to repo) is local'''
86 if isinstance(repo, str):
86 if isinstance(repo, str):
87 try:
87 try:
88 return _peerlookup(repo).islocal(repo)
88 return _peerlookup(repo).islocal(repo)
89 except AttributeError:
89 except AttributeError:
90 return False
90 return False
91 return repo.local()
91 return repo.local()
92
92
93 def openpath(ui, path):
93 def openpath(ui, path):
94 '''open path with open if local, url.open if remote'''
94 '''open path with open if local, url.open if remote'''
95 pathurl = util.url(path, parsequery=False, parsefragment=False)
95 pathurl = util.url(path, parsequery=False, parsefragment=False)
96 if pathurl.islocal():
96 if pathurl.islocal():
97 return util.posixfile(pathurl.localpath(), 'rb')
97 return util.posixfile(pathurl.localpath(), 'rb')
98 else:
98 else:
99 return url.open(ui, path)
99 return url.open(ui, path)
100
100
101 # a list of (ui, repo) functions called for wire peer initialization
101 # a list of (ui, repo) functions called for wire peer initialization
102 wirepeersetupfuncs = []
102 wirepeersetupfuncs = []
103
103
104 def _peerorrepo(ui, path, create=False):
104 def _peerorrepo(ui, path, create=False):
105 """return a repository object for the specified path"""
105 """return a repository object for the specified path"""
106 obj = _peerlookup(path).instance(ui, path, create)
106 obj = _peerlookup(path).instance(ui, path, create)
107 ui = getattr(obj, "ui", ui)
107 ui = getattr(obj, "ui", ui)
108 for name, module in extensions.extensions(ui):
108 for name, module in extensions.extensions(ui):
109 hook = getattr(module, 'reposetup', None)
109 hook = getattr(module, 'reposetup', None)
110 if hook:
110 if hook:
111 hook(ui, obj)
111 hook(ui, obj)
112 if not obj.local():
112 if not obj.local():
113 for f in wirepeersetupfuncs:
113 for f in wirepeersetupfuncs:
114 f(ui, obj)
114 f(ui, obj)
115 return obj
115 return obj
116
116
117 def repository(ui, path='', create=False):
117 def repository(ui, path='', create=False):
118 """return a repository object for the specified path"""
118 """return a repository object for the specified path"""
119 peer = _peerorrepo(ui, path, create)
119 peer = _peerorrepo(ui, path, create)
120 repo = peer.local()
120 repo = peer.local()
121 if not repo:
121 if not repo:
122 raise util.Abort(_("repository '%s' is not local") %
122 raise util.Abort(_("repository '%s' is not local") %
123 (path or peer.url()))
123 (path or peer.url()))
124 return repo.filtered('visible')
124 return repo.filtered('visible')
125
125
126 def peer(uiorrepo, opts, path, create=False):
126 def peer(uiorrepo, opts, path, create=False):
127 '''return a repository peer for the specified path'''
127 '''return a repository peer for the specified path'''
128 rui = remoteui(uiorrepo, opts)
128 rui = remoteui(uiorrepo, opts)
129 return _peerorrepo(rui, path, create).peer()
129 return _peerorrepo(rui, path, create).peer()
130
130
131 def defaultdest(source):
131 def defaultdest(source):
132 '''return default destination of clone if none is given
132 '''return default destination of clone if none is given
133
133
134 >>> defaultdest('foo')
134 >>> defaultdest('foo')
135 'foo'
135 'foo'
136 >>> defaultdest('/foo/bar')
136 >>> defaultdest('/foo/bar')
137 'bar'
137 'bar'
138 >>> defaultdest('/')
138 >>> defaultdest('/')
139 ''
139 ''
140 >>> defaultdest('')
140 >>> defaultdest('')
141 ''
141 ''
142 >>> defaultdest('http://example.org/')
142 >>> defaultdest('http://example.org/')
143 ''
143 ''
144 >>> defaultdest('http://example.org/foo/')
144 >>> defaultdest('http://example.org/foo/')
145 'foo'
145 'foo'
146 '''
146 '''
147 path = util.url(source).path
147 path = util.url(source).path
148 if not path:
148 if not path:
149 return ''
149 return ''
150 return os.path.basename(os.path.normpath(path))
150 return os.path.basename(os.path.normpath(path))
151
151
152 def share(ui, source, dest=None, update=True):
152 def share(ui, source, dest=None, update=True):
153 '''create a shared repository'''
153 '''create a shared repository'''
154
154
155 if not islocal(source):
155 if not islocal(source):
156 raise util.Abort(_('can only share local repositories'))
156 raise util.Abort(_('can only share local repositories'))
157
157
158 if not dest:
158 if not dest:
159 dest = defaultdest(source)
159 dest = defaultdest(source)
160 else:
160 else:
161 dest = ui.expandpath(dest)
161 dest = ui.expandpath(dest)
162
162
163 if isinstance(source, str):
163 if isinstance(source, str):
164 origsource = ui.expandpath(source)
164 origsource = ui.expandpath(source)
165 source, branches = parseurl(origsource)
165 source, branches = parseurl(origsource)
166 srcrepo = repository(ui, source)
166 srcrepo = repository(ui, source)
167 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
167 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
168 else:
168 else:
169 srcrepo = source.local()
169 srcrepo = source.local()
170 origsource = source = srcrepo.url()
170 origsource = source = srcrepo.url()
171 checkout = None
171 checkout = None
172
172
173 sharedpath = srcrepo.sharedpath # if our source is already sharing
173 sharedpath = srcrepo.sharedpath # if our source is already sharing
174
174
175 destwvfs = scmutil.vfs(dest, realpath=True)
175 destwvfs = scmutil.vfs(dest, realpath=True)
176 destvfs = scmutil.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
176 destvfs = scmutil.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
177
177
178 if destvfs.lexists():
178 if destvfs.lexists():
179 raise util.Abort(_('destination already exists'))
179 raise util.Abort(_('destination already exists'))
180
180
181 if not destwvfs.isdir():
181 if not destwvfs.isdir():
182 destwvfs.mkdir()
182 destwvfs.mkdir()
183 destvfs.makedir()
183 destvfs.makedir()
184
184
185 requirements = ''
185 requirements = ''
186 try:
186 try:
187 requirements = srcrepo.opener.read('requires')
187 requirements = srcrepo.opener.read('requires')
188 except IOError, inst:
188 except IOError, inst:
189 if inst.errno != errno.ENOENT:
189 if inst.errno != errno.ENOENT:
190 raise
190 raise
191
191
192 requirements += 'shared\n'
192 requirements += 'shared\n'
193 destvfs.write('requires', requirements)
193 destvfs.write('requires', requirements)
194 destvfs.write('sharedpath', sharedpath)
194 destvfs.write('sharedpath', sharedpath)
195
195
196 r = repository(ui, destwvfs.base)
196 r = repository(ui, destwvfs.base)
197
197
198 default = srcrepo.ui.config('paths', 'default')
198 default = srcrepo.ui.config('paths', 'default')
199 if default:
199 if default:
200 fp = r.opener("hgrc", "w", text=True)
200 fp = r.opener("hgrc", "w", text=True)
201 fp.write("[paths]\n")
201 fp.write("[paths]\n")
202 fp.write("default = %s\n" % default)
202 fp.write("default = %s\n" % default)
203 fp.close()
203 fp.close()
204
204
205 if update:
205 if update:
206 r.ui.status(_("updating working directory\n"))
206 r.ui.status(_("updating working directory\n"))
207 if update is not True:
207 if update is not True:
208 checkout = update
208 checkout = update
209 for test in (checkout, 'default', 'tip'):
209 for test in (checkout, 'default', 'tip'):
210 if test is None:
210 if test is None:
211 continue
211 continue
212 try:
212 try:
213 uprev = r.lookup(test)
213 uprev = r.lookup(test)
214 break
214 break
215 except error.RepoLookupError:
215 except error.RepoLookupError:
216 continue
216 continue
217 _update(r, uprev)
217 _update(r, uprev)
218
218
219 def copystore(ui, srcrepo, destpath):
219 def copystore(ui, srcrepo, destpath):
220 '''copy files from store of srcrepo in destpath
220 '''copy files from store of srcrepo in destpath
221
221
222 returns destlock
222 returns destlock
223 '''
223 '''
224 destlock = None
224 destlock = None
225 try:
225 try:
226 hardlink = None
226 hardlink = None
227 num = 0
227 num = 0
228 srcpublishing = srcrepo.ui.configbool('phases', 'publish', True)
228 srcpublishing = srcrepo.ui.configbool('phases', 'publish', True)
229 srcvfs = scmutil.vfs(srcrepo.sharedpath)
229 srcvfs = scmutil.vfs(srcrepo.sharedpath)
230 dstvfs = scmutil.vfs(destpath)
230 dstvfs = scmutil.vfs(destpath)
231 for f in srcrepo.store.copylist():
231 for f in srcrepo.store.copylist():
232 if srcpublishing and f.endswith('phaseroots'):
232 if srcpublishing and f.endswith('phaseroots'):
233 continue
233 continue
234 dstbase = os.path.dirname(f)
234 dstbase = os.path.dirname(f)
235 if dstbase and not dstvfs.exists(dstbase):
235 if dstbase and not dstvfs.exists(dstbase):
236 dstvfs.mkdir(dstbase)
236 dstvfs.mkdir(dstbase)
237 if srcvfs.exists(f):
237 if srcvfs.exists(f):
238 if f.endswith('data'):
238 if f.endswith('data'):
239 # 'dstbase' may be empty (e.g. revlog format 0)
239 # 'dstbase' may be empty (e.g. revlog format 0)
240 lockfile = os.path.join(dstbase, "lock")
240 lockfile = os.path.join(dstbase, "lock")
241 # lock to avoid premature writing to the target
241 # lock to avoid premature writing to the target
242 destlock = lock.lock(dstvfs, lockfile)
242 destlock = lock.lock(dstvfs, lockfile)
243 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
243 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
244 hardlink)
244 hardlink)
245 num += n
245 num += n
246 if hardlink:
246 if hardlink:
247 ui.debug("linked %d files\n" % num)
247 ui.debug("linked %d files\n" % num)
248 else:
248 else:
249 ui.debug("copied %d files\n" % num)
249 ui.debug("copied %d files\n" % num)
250 return destlock
250 return destlock
251 except: # re-raises
251 except: # re-raises
252 release(destlock)
252 release(destlock)
253 raise
253 raise
254
254
255 def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
255 def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
256 update=True, stream=False, branch=None):
256 update=True, stream=False, branch=None):
257 """Make a copy of an existing repository.
257 """Make a copy of an existing repository.
258
258
259 Create a copy of an existing repository in a new directory. The
259 Create a copy of an existing repository in a new directory. The
260 source and destination are URLs, as passed to the repository
260 source and destination are URLs, as passed to the repository
261 function. Returns a pair of repository peers, the source and
261 function. Returns a pair of repository peers, the source and
262 newly created destination.
262 newly created destination.
263
263
264 The location of the source is added to the new repository's
264 The location of the source is added to the new repository's
265 .hg/hgrc file, as the default to be used for future pulls and
265 .hg/hgrc file, as the default to be used for future pulls and
266 pushes.
266 pushes.
267
267
268 If an exception is raised, the partly cloned/updated destination
268 If an exception is raised, the partly cloned/updated destination
269 repository will be deleted.
269 repository will be deleted.
270
270
271 Arguments:
271 Arguments:
272
272
273 source: repository object or URL
273 source: repository object or URL
274
274
275 dest: URL of destination repository to create (defaults to base
275 dest: URL of destination repository to create (defaults to base
276 name of source repository)
276 name of source repository)
277
277
278 pull: always pull from source repository, even in local case
278 pull: always pull from source repository, even in local case
279
279
280 stream: stream raw data uncompressed from repository (fast over
280 stream: stream raw data uncompressed from repository (fast over
281 LAN, slow over WAN)
281 LAN, slow over WAN)
282
282
283 rev: revision to clone up to (implies pull=True)
283 rev: revision to clone up to (implies pull=True)
284
284
285 update: update working directory after clone completes, if
285 update: update working directory after clone completes, if
286 destination is local repository (True means update to default rev,
286 destination is local repository (True means update to default rev,
287 anything else is treated as a revision)
287 anything else is treated as a revision)
288
288
289 branch: branches to clone
289 branch: branches to clone
290 """
290 """
291
291
292 if isinstance(source, str):
292 if isinstance(source, str):
293 origsource = ui.expandpath(source)
293 origsource = ui.expandpath(source)
294 source, branch = parseurl(origsource, branch)
294 source, branch = parseurl(origsource, branch)
295 srcpeer = peer(ui, peeropts, source)
295 srcpeer = peer(ui, peeropts, source)
296 else:
296 else:
297 srcpeer = source.peer() # in case we were called with a localrepo
297 srcpeer = source.peer() # in case we were called with a localrepo
298 branch = (None, branch or [])
298 branch = (None, branch or [])
299 origsource = source = srcpeer.url()
299 origsource = source = srcpeer.url()
300 rev, checkout = addbranchrevs(srcpeer, srcpeer, branch, rev)
300 rev, checkout = addbranchrevs(srcpeer, srcpeer, branch, rev)
301
301
302 if dest is None:
302 if dest is None:
303 dest = defaultdest(source)
303 dest = defaultdest(source)
304 if dest:
304 if dest:
305 ui.status(_("destination directory: %s\n") % dest)
305 ui.status(_("destination directory: %s\n") % dest)
306 else:
306 else:
307 dest = ui.expandpath(dest)
307 dest = ui.expandpath(dest)
308
308
309 dest = util.urllocalpath(dest)
309 dest = util.urllocalpath(dest)
310 source = util.urllocalpath(source)
310 source = util.urllocalpath(source)
311
311
312 if not dest:
312 if not dest:
313 raise util.Abort(_("empty destination path is not valid"))
313 raise util.Abort(_("empty destination path is not valid"))
314
314
315 destvfs = scmutil.vfs(dest, expandpath=True)
315 destvfs = scmutil.vfs(dest, expandpath=True)
316 if destvfs.lexists():
316 if destvfs.lexists():
317 if not destvfs.isdir():
317 if not destvfs.isdir():
318 raise util.Abort(_("destination '%s' already exists") % dest)
318 raise util.Abort(_("destination '%s' already exists") % dest)
319 elif destvfs.listdir():
319 elif destvfs.listdir():
320 raise util.Abort(_("destination '%s' is not empty") % dest)
320 raise util.Abort(_("destination '%s' is not empty") % dest)
321
321
322 srclock = destlock = cleandir = None
322 srclock = destlock = cleandir = None
323 srcrepo = srcpeer.local()
323 srcrepo = srcpeer.local()
324 try:
324 try:
325 abspath = origsource
325 abspath = origsource
326 if islocal(origsource):
326 if islocal(origsource):
327 abspath = os.path.abspath(util.urllocalpath(origsource))
327 abspath = os.path.abspath(util.urllocalpath(origsource))
328
328
329 if islocal(dest):
329 if islocal(dest):
330 cleandir = dest
330 cleandir = dest
331
331
332 copy = False
332 copy = False
333 if (srcrepo and srcrepo.cancopy() and islocal(dest)
333 if (srcrepo and srcrepo.cancopy() and islocal(dest)
334 and not phases.hassecret(srcrepo)):
334 and not phases.hassecret(srcrepo)):
335 copy = not pull and not rev
335 copy = not pull and not rev
336
336
337 if copy:
337 if copy:
338 try:
338 try:
339 # we use a lock here because if we race with commit, we
339 # we use a lock here because if we race with commit, we
340 # can end up with extra data in the cloned revlogs that's
340 # can end up with extra data in the cloned revlogs that's
341 # not pointed to by changesets, thus causing verify to
341 # not pointed to by changesets, thus causing verify to
342 # fail
342 # fail
343 srclock = srcrepo.lock(wait=False)
343 srclock = srcrepo.lock(wait=False)
344 except error.LockError:
344 except error.LockError:
345 copy = False
345 copy = False
346
346
347 if copy:
347 if copy:
348 srcrepo.hook('preoutgoing', throw=True, source='clone')
348 srcrepo.hook('preoutgoing', throw=True, source='clone')
349 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
349 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
350 if not os.path.exists(dest):
350 if not os.path.exists(dest):
351 os.mkdir(dest)
351 os.mkdir(dest)
352 else:
352 else:
353 # only clean up directories we create ourselves
353 # only clean up directories we create ourselves
354 cleandir = hgdir
354 cleandir = hgdir
355 try:
355 try:
356 destpath = hgdir
356 destpath = hgdir
357 util.makedir(destpath, notindexed=True)
357 util.makedir(destpath, notindexed=True)
358 except OSError, inst:
358 except OSError, inst:
359 if inst.errno == errno.EEXIST:
359 if inst.errno == errno.EEXIST:
360 cleandir = None
360 cleandir = None
361 raise util.Abort(_("destination '%s' already exists")
361 raise util.Abort(_("destination '%s' already exists")
362 % dest)
362 % dest)
363 raise
363 raise
364
364
365 destlock = copystore(ui, srcrepo, destpath)
365 destlock = copystore(ui, srcrepo, destpath)
366
366
367 # Recomputing branch cache might be slow on big repos,
367 # Recomputing branch cache might be slow on big repos,
368 # so just copy it
368 # so just copy it
369 dstcachedir = os.path.join(destpath, 'cache')
369 dstcachedir = os.path.join(destpath, 'cache')
370 srcbranchcache = srcrepo.sjoin('cache/branch2')
370 srcbranchcache = srcrepo.join('cache/branch2')
371 dstbranchcache = os.path.join(dstcachedir, 'branch2')
371 dstbranchcache = os.path.join(dstcachedir, 'branch2')
372 if os.path.exists(srcbranchcache):
372 if os.path.exists(srcbranchcache):
373 if not os.path.exists(dstcachedir):
373 if not os.path.exists(dstcachedir):
374 os.mkdir(dstcachedir)
374 os.mkdir(dstcachedir)
375 util.copyfile(srcbranchcache, dstbranchcache)
375 util.copyfile(srcbranchcache, dstbranchcache)
376
376
377 # we need to re-init the repo after manually copying the data
377 # we need to re-init the repo after manually copying the data
378 # into it
378 # into it
379 destpeer = peer(srcrepo, peeropts, dest)
379 destpeer = peer(srcrepo, peeropts, dest)
380 srcrepo.hook('outgoing', source='clone',
380 srcrepo.hook('outgoing', source='clone',
381 node=node.hex(node.nullid))
381 node=node.hex(node.nullid))
382 else:
382 else:
383 try:
383 try:
384 destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
384 destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
385 # only pass ui when no srcrepo
385 # only pass ui when no srcrepo
386 except OSError, inst:
386 except OSError, inst:
387 if inst.errno == errno.EEXIST:
387 if inst.errno == errno.EEXIST:
388 cleandir = None
388 cleandir = None
389 raise util.Abort(_("destination '%s' already exists")
389 raise util.Abort(_("destination '%s' already exists")
390 % dest)
390 % dest)
391 raise
391 raise
392
392
393 revs = None
393 revs = None
394 if rev:
394 if rev:
395 if not srcpeer.capable('lookup'):
395 if not srcpeer.capable('lookup'):
396 raise util.Abort(_("src repository does not support "
396 raise util.Abort(_("src repository does not support "
397 "revision lookup and so doesn't "
397 "revision lookup and so doesn't "
398 "support clone by revision"))
398 "support clone by revision"))
399 revs = [srcpeer.lookup(r) for r in rev]
399 revs = [srcpeer.lookup(r) for r in rev]
400 checkout = revs[0]
400 checkout = revs[0]
401 if destpeer.local():
401 if destpeer.local():
402 destpeer.local().clone(srcpeer, heads=revs, stream=stream)
402 destpeer.local().clone(srcpeer, heads=revs, stream=stream)
403 elif srcrepo:
403 elif srcrepo:
404 srcrepo.push(destpeer, revs=revs)
404 srcrepo.push(destpeer, revs=revs)
405 else:
405 else:
406 raise util.Abort(_("clone from remote to remote not supported"))
406 raise util.Abort(_("clone from remote to remote not supported"))
407
407
408 cleandir = None
408 cleandir = None
409
409
410 # clone all bookmarks except divergent ones
410 # clone all bookmarks except divergent ones
411 destrepo = destpeer.local()
411 destrepo = destpeer.local()
412 if destrepo and srcpeer.capable("pushkey"):
412 if destrepo and srcpeer.capable("pushkey"):
413 rb = srcpeer.listkeys('bookmarks')
413 rb = srcpeer.listkeys('bookmarks')
414 marks = destrepo._bookmarks
414 marks = destrepo._bookmarks
415 for k, n in rb.iteritems():
415 for k, n in rb.iteritems():
416 try:
416 try:
417 m = destrepo.lookup(n)
417 m = destrepo.lookup(n)
418 marks[k] = m
418 marks[k] = m
419 except error.RepoLookupError:
419 except error.RepoLookupError:
420 pass
420 pass
421 if rb:
421 if rb:
422 marks.write()
422 marks.write()
423 elif srcrepo and destpeer.capable("pushkey"):
423 elif srcrepo and destpeer.capable("pushkey"):
424 for k, n in srcrepo._bookmarks.iteritems():
424 for k, n in srcrepo._bookmarks.iteritems():
425 destpeer.pushkey('bookmarks', k, '', hex(n))
425 destpeer.pushkey('bookmarks', k, '', hex(n))
426
426
427 if destrepo:
427 if destrepo:
428 fp = destrepo.opener("hgrc", "w", text=True)
428 fp = destrepo.opener("hgrc", "w", text=True)
429 fp.write("[paths]\n")
429 fp.write("[paths]\n")
430 u = util.url(abspath)
430 u = util.url(abspath)
431 u.passwd = None
431 u.passwd = None
432 defaulturl = str(u)
432 defaulturl = str(u)
433 fp.write("default = %s\n" % defaulturl)
433 fp.write("default = %s\n" % defaulturl)
434 fp.close()
434 fp.close()
435
435
436 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
436 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
437
437
438 if update:
438 if update:
439 if update is not True:
439 if update is not True:
440 checkout = srcpeer.lookup(update)
440 checkout = srcpeer.lookup(update)
441 uprev = None
441 uprev = None
442 status = None
442 status = None
443 if checkout is not None:
443 if checkout is not None:
444 try:
444 try:
445 uprev = destrepo.lookup(checkout)
445 uprev = destrepo.lookup(checkout)
446 except error.RepoLookupError:
446 except error.RepoLookupError:
447 pass
447 pass
448 if uprev is None:
448 if uprev is None:
449 try:
449 try:
450 uprev = destrepo._bookmarks['@']
450 uprev = destrepo._bookmarks['@']
451 update = '@'
451 update = '@'
452 bn = destrepo[uprev].branch()
452 bn = destrepo[uprev].branch()
453 if bn == 'default':
453 if bn == 'default':
454 status = _("updating to bookmark @\n")
454 status = _("updating to bookmark @\n")
455 else:
455 else:
456 status = (_("updating to bookmark @ on branch %s\n")
456 status = (_("updating to bookmark @ on branch %s\n")
457 % bn)
457 % bn)
458 except KeyError:
458 except KeyError:
459 try:
459 try:
460 uprev = destrepo.branchtip('default')
460 uprev = destrepo.branchtip('default')
461 except error.RepoLookupError:
461 except error.RepoLookupError:
462 uprev = destrepo.lookup('tip')
462 uprev = destrepo.lookup('tip')
463 if not status:
463 if not status:
464 bn = destrepo[uprev].branch()
464 bn = destrepo[uprev].branch()
465 status = _("updating to branch %s\n") % bn
465 status = _("updating to branch %s\n") % bn
466 destrepo.ui.status(status)
466 destrepo.ui.status(status)
467 _update(destrepo, uprev)
467 _update(destrepo, uprev)
468 if update in destrepo._bookmarks:
468 if update in destrepo._bookmarks:
469 bookmarks.setcurrent(destrepo, update)
469 bookmarks.setcurrent(destrepo, update)
470 finally:
470 finally:
471 release(srclock, destlock)
471 release(srclock, destlock)
472 if cleandir is not None:
472 if cleandir is not None:
473 shutil.rmtree(cleandir, True)
473 shutil.rmtree(cleandir, True)
474 if srcpeer is not None:
474 if srcpeer is not None:
475 srcpeer.close()
475 srcpeer.close()
476 return srcpeer, destpeer
476 return srcpeer, destpeer
477
477
478 def _showstats(repo, stats):
478 def _showstats(repo, stats):
479 repo.ui.status(_("%d files updated, %d files merged, "
479 repo.ui.status(_("%d files updated, %d files merged, "
480 "%d files removed, %d files unresolved\n") % stats)
480 "%d files removed, %d files unresolved\n") % stats)
481
481
482 def updaterepo(repo, node, overwrite):
482 def updaterepo(repo, node, overwrite):
483 """Update the working directory to node.
483 """Update the working directory to node.
484
484
485 When overwrite is set, changes are clobbered, merged else
485 When overwrite is set, changes are clobbered, merged else
486
486
487 returns stats (see pydoc mercurial.merge.applyupdates)"""
487 returns stats (see pydoc mercurial.merge.applyupdates)"""
488 return mergemod.update(repo, node, False, overwrite, None,
488 return mergemod.update(repo, node, False, overwrite, None,
489 labels=['working copy', 'destination'])
489 labels=['working copy', 'destination'])
490
490
491 def update(repo, node):
491 def update(repo, node):
492 """update the working directory to node, merging linear changes"""
492 """update the working directory to node, merging linear changes"""
493 stats = updaterepo(repo, node, False)
493 stats = updaterepo(repo, node, False)
494 _showstats(repo, stats)
494 _showstats(repo, stats)
495 if stats[3]:
495 if stats[3]:
496 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
496 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
497 return stats[3] > 0
497 return stats[3] > 0
498
498
499 # naming conflict in clone()
499 # naming conflict in clone()
500 _update = update
500 _update = update
501
501
502 def clean(repo, node, show_stats=True):
502 def clean(repo, node, show_stats=True):
503 """forcibly switch the working directory to node, clobbering changes"""
503 """forcibly switch the working directory to node, clobbering changes"""
504 stats = updaterepo(repo, node, True)
504 stats = updaterepo(repo, node, True)
505 util.unlinkpath(repo.join('graftstate'), ignoremissing=True)
505 util.unlinkpath(repo.join('graftstate'), ignoremissing=True)
506 if show_stats:
506 if show_stats:
507 _showstats(repo, stats)
507 _showstats(repo, stats)
508 return stats[3] > 0
508 return stats[3] > 0
509
509
510 def merge(repo, node, force=None, remind=True):
510 def merge(repo, node, force=None, remind=True):
511 """Branch merge with node, resolving changes. Return true if any
511 """Branch merge with node, resolving changes. Return true if any
512 unresolved conflicts."""
512 unresolved conflicts."""
513 stats = mergemod.update(repo, node, True, force, False)
513 stats = mergemod.update(repo, node, True, force, False)
514 _showstats(repo, stats)
514 _showstats(repo, stats)
515 if stats[3]:
515 if stats[3]:
516 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
516 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
517 "or 'hg update -C .' to abandon\n"))
517 "or 'hg update -C .' to abandon\n"))
518 elif remind:
518 elif remind:
519 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
519 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
520 return stats[3] > 0
520 return stats[3] > 0
521
521
522 def _incoming(displaychlist, subreporecurse, ui, repo, source,
522 def _incoming(displaychlist, subreporecurse, ui, repo, source,
523 opts, buffered=False):
523 opts, buffered=False):
524 """
524 """
525 Helper for incoming / gincoming.
525 Helper for incoming / gincoming.
526 displaychlist gets called with
526 displaychlist gets called with
527 (remoterepo, incomingchangesetlist, displayer) parameters,
527 (remoterepo, incomingchangesetlist, displayer) parameters,
528 and is supposed to contain only code that can't be unified.
528 and is supposed to contain only code that can't be unified.
529 """
529 """
530 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
530 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
531 other = peer(repo, opts, source)
531 other = peer(repo, opts, source)
532 ui.status(_('comparing with %s\n') % util.hidepassword(source))
532 ui.status(_('comparing with %s\n') % util.hidepassword(source))
533 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
533 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
534
534
535 if revs:
535 if revs:
536 revs = [other.lookup(rev) for rev in revs]
536 revs = [other.lookup(rev) for rev in revs]
537 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
537 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
538 revs, opts["bundle"], opts["force"])
538 revs, opts["bundle"], opts["force"])
539 try:
539 try:
540 if not chlist:
540 if not chlist:
541 ui.status(_("no changes found\n"))
541 ui.status(_("no changes found\n"))
542 return subreporecurse()
542 return subreporecurse()
543
543
544 displayer = cmdutil.show_changeset(ui, other, opts, buffered)
544 displayer = cmdutil.show_changeset(ui, other, opts, buffered)
545 displaychlist(other, chlist, displayer)
545 displaychlist(other, chlist, displayer)
546 displayer.close()
546 displayer.close()
547 finally:
547 finally:
548 cleanupfn()
548 cleanupfn()
549 subreporecurse()
549 subreporecurse()
550 return 0 # exit code is zero since we found incoming changes
550 return 0 # exit code is zero since we found incoming changes
551
551
552 def incoming(ui, repo, source, opts):
552 def incoming(ui, repo, source, opts):
553 def subreporecurse():
553 def subreporecurse():
554 ret = 1
554 ret = 1
555 if opts.get('subrepos'):
555 if opts.get('subrepos'):
556 ctx = repo[None]
556 ctx = repo[None]
557 for subpath in sorted(ctx.substate):
557 for subpath in sorted(ctx.substate):
558 sub = ctx.sub(subpath)
558 sub = ctx.sub(subpath)
559 ret = min(ret, sub.incoming(ui, source, opts))
559 ret = min(ret, sub.incoming(ui, source, opts))
560 return ret
560 return ret
561
561
562 def display(other, chlist, displayer):
562 def display(other, chlist, displayer):
563 limit = cmdutil.loglimit(opts)
563 limit = cmdutil.loglimit(opts)
564 if opts.get('newest_first'):
564 if opts.get('newest_first'):
565 chlist.reverse()
565 chlist.reverse()
566 count = 0
566 count = 0
567 for n in chlist:
567 for n in chlist:
568 if limit is not None and count >= limit:
568 if limit is not None and count >= limit:
569 break
569 break
570 parents = [p for p in other.changelog.parents(n) if p != nullid]
570 parents = [p for p in other.changelog.parents(n) if p != nullid]
571 if opts.get('no_merges') and len(parents) == 2:
571 if opts.get('no_merges') and len(parents) == 2:
572 continue
572 continue
573 count += 1
573 count += 1
574 displayer.show(other[n])
574 displayer.show(other[n])
575 return _incoming(display, subreporecurse, ui, repo, source, opts)
575 return _incoming(display, subreporecurse, ui, repo, source, opts)
576
576
577 def _outgoing(ui, repo, dest, opts):
577 def _outgoing(ui, repo, dest, opts):
578 dest = ui.expandpath(dest or 'default-push', dest or 'default')
578 dest = ui.expandpath(dest or 'default-push', dest or 'default')
579 dest, branches = parseurl(dest, opts.get('branch'))
579 dest, branches = parseurl(dest, opts.get('branch'))
580 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
580 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
581 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
581 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
582 if revs:
582 if revs:
583 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
583 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
584
584
585 other = peer(repo, opts, dest)
585 other = peer(repo, opts, dest)
586 outgoing = discovery.findcommonoutgoing(repo.unfiltered(), other, revs,
586 outgoing = discovery.findcommonoutgoing(repo.unfiltered(), other, revs,
587 force=opts.get('force'))
587 force=opts.get('force'))
588 o = outgoing.missing
588 o = outgoing.missing
589 if not o:
589 if not o:
590 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
590 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
591 return o, other
591 return o, other
592
592
593 def outgoing(ui, repo, dest, opts):
593 def outgoing(ui, repo, dest, opts):
594 def recurse():
594 def recurse():
595 ret = 1
595 ret = 1
596 if opts.get('subrepos'):
596 if opts.get('subrepos'):
597 ctx = repo[None]
597 ctx = repo[None]
598 for subpath in sorted(ctx.substate):
598 for subpath in sorted(ctx.substate):
599 sub = ctx.sub(subpath)
599 sub = ctx.sub(subpath)
600 ret = min(ret, sub.outgoing(ui, dest, opts))
600 ret = min(ret, sub.outgoing(ui, dest, opts))
601 return ret
601 return ret
602
602
603 limit = cmdutil.loglimit(opts)
603 limit = cmdutil.loglimit(opts)
604 o, other = _outgoing(ui, repo, dest, opts)
604 o, other = _outgoing(ui, repo, dest, opts)
605 if not o:
605 if not o:
606 cmdutil.outgoinghooks(ui, repo, other, opts, o)
606 cmdutil.outgoinghooks(ui, repo, other, opts, o)
607 return recurse()
607 return recurse()
608
608
609 if opts.get('newest_first'):
609 if opts.get('newest_first'):
610 o.reverse()
610 o.reverse()
611 displayer = cmdutil.show_changeset(ui, repo, opts)
611 displayer = cmdutil.show_changeset(ui, repo, opts)
612 count = 0
612 count = 0
613 for n in o:
613 for n in o:
614 if limit is not None and count >= limit:
614 if limit is not None and count >= limit:
615 break
615 break
616 parents = [p for p in repo.changelog.parents(n) if p != nullid]
616 parents = [p for p in repo.changelog.parents(n) if p != nullid]
617 if opts.get('no_merges') and len(parents) == 2:
617 if opts.get('no_merges') and len(parents) == 2:
618 continue
618 continue
619 count += 1
619 count += 1
620 displayer.show(repo[n])
620 displayer.show(repo[n])
621 displayer.close()
621 displayer.close()
622 cmdutil.outgoinghooks(ui, repo, other, opts, o)
622 cmdutil.outgoinghooks(ui, repo, other, opts, o)
623 recurse()
623 recurse()
624 return 0 # exit code is zero since we found outgoing changes
624 return 0 # exit code is zero since we found outgoing changes
625
625
626 def revert(repo, node, choose):
626 def revert(repo, node, choose):
627 """revert changes to revision in node without updating dirstate"""
627 """revert changes to revision in node without updating dirstate"""
628 return mergemod.update(repo, node, False, True, choose)[3] > 0
628 return mergemod.update(repo, node, False, True, choose)[3] > 0
629
629
630 def verify(repo):
630 def verify(repo):
631 """verify the consistency of a repository"""
631 """verify the consistency of a repository"""
632 return verifymod.verify(repo)
632 return verifymod.verify(repo)
633
633
634 def remoteui(src, opts):
634 def remoteui(src, opts):
635 'build a remote ui from ui or repo and opts'
635 'build a remote ui from ui or repo and opts'
636 if util.safehasattr(src, 'baseui'): # looks like a repository
636 if util.safehasattr(src, 'baseui'): # looks like a repository
637 dst = src.baseui.copy() # drop repo-specific config
637 dst = src.baseui.copy() # drop repo-specific config
638 src = src.ui # copy target options from repo
638 src = src.ui # copy target options from repo
639 else: # assume it's a global ui object
639 else: # assume it's a global ui object
640 dst = src.copy() # keep all global options
640 dst = src.copy() # keep all global options
641
641
642 # copy ssh-specific options
642 # copy ssh-specific options
643 for o in 'ssh', 'remotecmd':
643 for o in 'ssh', 'remotecmd':
644 v = opts.get(o) or src.config('ui', o)
644 v = opts.get(o) or src.config('ui', o)
645 if v:
645 if v:
646 dst.setconfig("ui", o, v, 'copied')
646 dst.setconfig("ui", o, v, 'copied')
647
647
648 # copy bundle-specific options
648 # copy bundle-specific options
649 r = src.config('bundle', 'mainreporoot')
649 r = src.config('bundle', 'mainreporoot')
650 if r:
650 if r:
651 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
651 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
652
652
653 # copy selected local settings to the remote ui
653 # copy selected local settings to the remote ui
654 for sect in ('auth', 'hostfingerprints', 'http_proxy'):
654 for sect in ('auth', 'hostfingerprints', 'http_proxy'):
655 for key, val in src.configitems(sect):
655 for key, val in src.configitems(sect):
656 dst.setconfig(sect, key, val, 'copied')
656 dst.setconfig(sect, key, val, 'copied')
657 v = src.config('web', 'cacerts')
657 v = src.config('web', 'cacerts')
658 if v:
658 if v:
659 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
659 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
660
660
661 return dst
661 return dst
General Comments 0
You need to be logged in to leave comments. Login now