##// END OF EJS Templates
hg: use vfs functions in destination repository with share...
Chinmay Joshi -
r21800:219af152 default
parent child Browse files
Show More
@@ -1,659 +1,660 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from i18n import _
9 from i18n import _
10 from lock import release
10 from lock import release
11 from node import hex, nullid
11 from node import hex, nullid
12 import localrepo, bundlerepo, unionrepo, httppeer, sshpeer, statichttprepo
12 import localrepo, bundlerepo, unionrepo, httppeer, sshpeer, statichttprepo
13 import bookmarks, lock, util, extensions, error, node, scmutil, phases, url
13 import bookmarks, lock, util, extensions, error, node, scmutil, phases, url
14 import cmdutil, discovery
14 import cmdutil, discovery
15 import merge as mergemod
15 import merge as mergemod
16 import verify as verifymod
16 import verify as verifymod
17 import errno, os, shutil
17 import errno, os, shutil
18
18
19 def _local(path):
19 def _local(path):
20 path = util.expandpath(util.urllocalpath(path))
20 path = util.expandpath(util.urllocalpath(path))
21 return (os.path.isfile(path) and bundlerepo or localrepo)
21 return (os.path.isfile(path) and bundlerepo or localrepo)
22
22
23 def addbranchrevs(lrepo, other, branches, revs):
23 def addbranchrevs(lrepo, other, branches, revs):
24 peer = other.peer() # a courtesy to callers using a localrepo for other
24 peer = other.peer() # a courtesy to callers using a localrepo for other
25 hashbranch, branches = branches
25 hashbranch, branches = branches
26 if not hashbranch and not branches:
26 if not hashbranch and not branches:
27 return revs or None, revs and revs[0] or None
27 return revs or None, revs and revs[0] or None
28 revs = revs and list(revs) or []
28 revs = revs and list(revs) or []
29 if not peer.capable('branchmap'):
29 if not peer.capable('branchmap'):
30 if branches:
30 if branches:
31 raise util.Abort(_("remote branch lookup not supported"))
31 raise util.Abort(_("remote branch lookup not supported"))
32 revs.append(hashbranch)
32 revs.append(hashbranch)
33 return revs, revs[0]
33 return revs, revs[0]
34 branchmap = peer.branchmap()
34 branchmap = peer.branchmap()
35
35
36 def primary(branch):
36 def primary(branch):
37 if branch == '.':
37 if branch == '.':
38 if not lrepo:
38 if not lrepo:
39 raise util.Abort(_("dirstate branch not accessible"))
39 raise util.Abort(_("dirstate branch not accessible"))
40 branch = lrepo.dirstate.branch()
40 branch = lrepo.dirstate.branch()
41 if branch in branchmap:
41 if branch in branchmap:
42 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
42 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
43 return True
43 return True
44 else:
44 else:
45 return False
45 return False
46
46
47 for branch in branches:
47 for branch in branches:
48 if not primary(branch):
48 if not primary(branch):
49 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
49 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
50 if hashbranch:
50 if hashbranch:
51 if not primary(hashbranch):
51 if not primary(hashbranch):
52 revs.append(hashbranch)
52 revs.append(hashbranch)
53 return revs, revs[0]
53 return revs, revs[0]
54
54
55 def parseurl(path, branches=None):
55 def parseurl(path, branches=None):
56 '''parse url#branch, returning (url, (branch, branches))'''
56 '''parse url#branch, returning (url, (branch, branches))'''
57
57
58 u = util.url(path)
58 u = util.url(path)
59 branch = None
59 branch = None
60 if u.fragment:
60 if u.fragment:
61 branch = u.fragment
61 branch = u.fragment
62 u.fragment = None
62 u.fragment = None
63 return str(u), (branch, branches or [])
63 return str(u), (branch, branches or [])
64
64
65 schemes = {
65 schemes = {
66 'bundle': bundlerepo,
66 'bundle': bundlerepo,
67 'union': unionrepo,
67 'union': unionrepo,
68 'file': _local,
68 'file': _local,
69 'http': httppeer,
69 'http': httppeer,
70 'https': httppeer,
70 'https': httppeer,
71 'ssh': sshpeer,
71 'ssh': sshpeer,
72 'static-http': statichttprepo,
72 'static-http': statichttprepo,
73 }
73 }
74
74
75 def _peerlookup(path):
75 def _peerlookup(path):
76 u = util.url(path)
76 u = util.url(path)
77 scheme = u.scheme or 'file'
77 scheme = u.scheme or 'file'
78 thing = schemes.get(scheme) or schemes['file']
78 thing = schemes.get(scheme) or schemes['file']
79 try:
79 try:
80 return thing(path)
80 return thing(path)
81 except TypeError:
81 except TypeError:
82 return thing
82 return thing
83
83
84 def islocal(repo):
84 def islocal(repo):
85 '''return true if repo (or path pointing to repo) is local'''
85 '''return true if repo (or path pointing to repo) is local'''
86 if isinstance(repo, str):
86 if isinstance(repo, str):
87 try:
87 try:
88 return _peerlookup(repo).islocal(repo)
88 return _peerlookup(repo).islocal(repo)
89 except AttributeError:
89 except AttributeError:
90 return False
90 return False
91 return repo.local()
91 return repo.local()
92
92
93 def openpath(ui, path):
93 def openpath(ui, path):
94 '''open path with open if local, url.open if remote'''
94 '''open path with open if local, url.open if remote'''
95 pathurl = util.url(path, parsequery=False, parsefragment=False)
95 pathurl = util.url(path, parsequery=False, parsefragment=False)
96 if pathurl.islocal():
96 if pathurl.islocal():
97 return util.posixfile(pathurl.localpath(), 'rb')
97 return util.posixfile(pathurl.localpath(), 'rb')
98 else:
98 else:
99 return url.open(ui, path)
99 return url.open(ui, path)
100
100
101 # a list of (ui, repo) functions called for wire peer initialization
101 # a list of (ui, repo) functions called for wire peer initialization
102 wirepeersetupfuncs = []
102 wirepeersetupfuncs = []
103
103
104 def _peerorrepo(ui, path, create=False):
104 def _peerorrepo(ui, path, create=False):
105 """return a repository object for the specified path"""
105 """return a repository object for the specified path"""
106 obj = _peerlookup(path).instance(ui, path, create)
106 obj = _peerlookup(path).instance(ui, path, create)
107 ui = getattr(obj, "ui", ui)
107 ui = getattr(obj, "ui", ui)
108 for name, module in extensions.extensions(ui):
108 for name, module in extensions.extensions(ui):
109 hook = getattr(module, 'reposetup', None)
109 hook = getattr(module, 'reposetup', None)
110 if hook:
110 if hook:
111 hook(ui, obj)
111 hook(ui, obj)
112 if not obj.local():
112 if not obj.local():
113 for f in wirepeersetupfuncs:
113 for f in wirepeersetupfuncs:
114 f(ui, obj)
114 f(ui, obj)
115 return obj
115 return obj
116
116
117 def repository(ui, path='', create=False):
117 def repository(ui, path='', create=False):
118 """return a repository object for the specified path"""
118 """return a repository object for the specified path"""
119 peer = _peerorrepo(ui, path, create)
119 peer = _peerorrepo(ui, path, create)
120 repo = peer.local()
120 repo = peer.local()
121 if not repo:
121 if not repo:
122 raise util.Abort(_("repository '%s' is not local") %
122 raise util.Abort(_("repository '%s' is not local") %
123 (path or peer.url()))
123 (path or peer.url()))
124 return repo.filtered('visible')
124 return repo.filtered('visible')
125
125
126 def peer(uiorrepo, opts, path, create=False):
126 def peer(uiorrepo, opts, path, create=False):
127 '''return a repository peer for the specified path'''
127 '''return a repository peer for the specified path'''
128 rui = remoteui(uiorrepo, opts)
128 rui = remoteui(uiorrepo, opts)
129 return _peerorrepo(rui, path, create).peer()
129 return _peerorrepo(rui, path, create).peer()
130
130
131 def defaultdest(source):
131 def defaultdest(source):
132 '''return default destination of clone if none is given
132 '''return default destination of clone if none is given
133
133
134 >>> defaultdest('foo')
134 >>> defaultdest('foo')
135 'foo'
135 'foo'
136 >>> defaultdest('/foo/bar')
136 >>> defaultdest('/foo/bar')
137 'bar'
137 'bar'
138 >>> defaultdest('/')
138 >>> defaultdest('/')
139 ''
139 ''
140 >>> defaultdest('')
140 >>> defaultdest('')
141 ''
141 ''
142 >>> defaultdest('http://example.org/')
142 >>> defaultdest('http://example.org/')
143 ''
143 ''
144 >>> defaultdest('http://example.org/foo/')
144 >>> defaultdest('http://example.org/foo/')
145 'foo'
145 'foo'
146 '''
146 '''
147 path = util.url(source).path
147 path = util.url(source).path
148 if not path:
148 if not path:
149 return ''
149 return ''
150 return os.path.basename(os.path.normpath(path))
150 return os.path.basename(os.path.normpath(path))
151
151
152 def share(ui, source, dest=None, update=True):
152 def share(ui, source, dest=None, update=True):
153 '''create a shared repository'''
153 '''create a shared repository'''
154
154
155 if not islocal(source):
155 if not islocal(source):
156 raise util.Abort(_('can only share local repositories'))
156 raise util.Abort(_('can only share local repositories'))
157
157
158 if not dest:
158 if not dest:
159 dest = defaultdest(source)
159 dest = defaultdest(source)
160 else:
160 else:
161 dest = ui.expandpath(dest)
161 dest = ui.expandpath(dest)
162
162
163 if isinstance(source, str):
163 if isinstance(source, str):
164 origsource = ui.expandpath(source)
164 origsource = ui.expandpath(source)
165 source, branches = parseurl(origsource)
165 source, branches = parseurl(origsource)
166 srcrepo = repository(ui, source)
166 srcrepo = repository(ui, source)
167 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
167 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
168 else:
168 else:
169 srcrepo = source.local()
169 srcrepo = source.local()
170 origsource = source = srcrepo.url()
170 origsource = source = srcrepo.url()
171 checkout = None
171 checkout = None
172
172
173 sharedpath = srcrepo.sharedpath # if our source is already sharing
173 sharedpath = srcrepo.sharedpath # if our source is already sharing
174
174
175 root = os.path.realpath(dest)
175 root = os.path.realpath(dest)
176 roothg = os.path.join(root, '.hg')
176 roothg = os.path.join(root, '.hg')
177 destwvfs = scmutil.vfs(dest, realpath=True)
177
178
178 if os.path.exists(roothg):
179 if os.path.exists(roothg):
179 raise util.Abort(_('destination already exists'))
180 raise util.Abort(_('destination already exists'))
180
181
181 if not os.path.isdir(root):
182 if not destwvfs.isdir():
182 os.mkdir(root)
183 destwvfs.mkdir()
183 util.makedir(roothg, notindexed=True)
184 util.makedir(roothg, notindexed=True)
184
185
185 requirements = ''
186 requirements = ''
186 try:
187 try:
187 requirements = srcrepo.opener.read('requires')
188 requirements = srcrepo.opener.read('requires')
188 except IOError, inst:
189 except IOError, inst:
189 if inst.errno != errno.ENOENT:
190 if inst.errno != errno.ENOENT:
190 raise
191 raise
191
192
192 requirements += 'shared\n'
193 requirements += 'shared\n'
193 util.writefile(os.path.join(roothg, 'requires'), requirements)
194 util.writefile(os.path.join(roothg, 'requires'), requirements)
194 util.writefile(os.path.join(roothg, 'sharedpath'), sharedpath)
195 util.writefile(os.path.join(roothg, 'sharedpath'), sharedpath)
195
196
196 r = repository(ui, root)
197 r = repository(ui, destwvfs.base)
197
198
198 default = srcrepo.ui.config('paths', 'default')
199 default = srcrepo.ui.config('paths', 'default')
199 if default:
200 if default:
200 fp = r.opener("hgrc", "w", text=True)
201 fp = r.opener("hgrc", "w", text=True)
201 fp.write("[paths]\n")
202 fp.write("[paths]\n")
202 fp.write("default = %s\n" % default)
203 fp.write("default = %s\n" % default)
203 fp.close()
204 fp.close()
204
205
205 if update:
206 if update:
206 r.ui.status(_("updating working directory\n"))
207 r.ui.status(_("updating working directory\n"))
207 if update is not True:
208 if update is not True:
208 checkout = update
209 checkout = update
209 for test in (checkout, 'default', 'tip'):
210 for test in (checkout, 'default', 'tip'):
210 if test is None:
211 if test is None:
211 continue
212 continue
212 try:
213 try:
213 uprev = r.lookup(test)
214 uprev = r.lookup(test)
214 break
215 break
215 except error.RepoLookupError:
216 except error.RepoLookupError:
216 continue
217 continue
217 _update(r, uprev)
218 _update(r, uprev)
218
219
219 def copystore(ui, srcrepo, destpath):
220 def copystore(ui, srcrepo, destpath):
220 '''copy files from store of srcrepo in destpath
221 '''copy files from store of srcrepo in destpath
221
222
222 returns destlock
223 returns destlock
223 '''
224 '''
224 destlock = None
225 destlock = None
225 try:
226 try:
226 hardlink = None
227 hardlink = None
227 num = 0
228 num = 0
228 srcpublishing = srcrepo.ui.configbool('phases', 'publish', True)
229 srcpublishing = srcrepo.ui.configbool('phases', 'publish', True)
229 srcvfs = scmutil.vfs(srcrepo.sharedpath)
230 srcvfs = scmutil.vfs(srcrepo.sharedpath)
230 dstvfs = scmutil.vfs(destpath)
231 dstvfs = scmutil.vfs(destpath)
231 for f in srcrepo.store.copylist():
232 for f in srcrepo.store.copylist():
232 if srcpublishing and f.endswith('phaseroots'):
233 if srcpublishing and f.endswith('phaseroots'):
233 continue
234 continue
234 dstbase = os.path.dirname(f)
235 dstbase = os.path.dirname(f)
235 if dstbase and not dstvfs.exists(dstbase):
236 if dstbase and not dstvfs.exists(dstbase):
236 dstvfs.mkdir(dstbase)
237 dstvfs.mkdir(dstbase)
237 if srcvfs.exists(f):
238 if srcvfs.exists(f):
238 if f.endswith('data'):
239 if f.endswith('data'):
239 # 'dstbase' may be empty (e.g. revlog format 0)
240 # 'dstbase' may be empty (e.g. revlog format 0)
240 lockfile = os.path.join(dstbase, "lock")
241 lockfile = os.path.join(dstbase, "lock")
241 # lock to avoid premature writing to the target
242 # lock to avoid premature writing to the target
242 destlock = lock.lock(dstvfs, lockfile)
243 destlock = lock.lock(dstvfs, lockfile)
243 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
244 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
244 hardlink)
245 hardlink)
245 num += n
246 num += n
246 if hardlink:
247 if hardlink:
247 ui.debug("linked %d files\n" % num)
248 ui.debug("linked %d files\n" % num)
248 else:
249 else:
249 ui.debug("copied %d files\n" % num)
250 ui.debug("copied %d files\n" % num)
250 return destlock
251 return destlock
251 except: # re-raises
252 except: # re-raises
252 release(destlock)
253 release(destlock)
253 raise
254 raise
254
255
255 def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
256 def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
256 update=True, stream=False, branch=None):
257 update=True, stream=False, branch=None):
257 """Make a copy of an existing repository.
258 """Make a copy of an existing repository.
258
259
259 Create a copy of an existing repository in a new directory. The
260 Create a copy of an existing repository in a new directory. The
260 source and destination are URLs, as passed to the repository
261 source and destination are URLs, as passed to the repository
261 function. Returns a pair of repository peers, the source and
262 function. Returns a pair of repository peers, the source and
262 newly created destination.
263 newly created destination.
263
264
264 The location of the source is added to the new repository's
265 The location of the source is added to the new repository's
265 .hg/hgrc file, as the default to be used for future pulls and
266 .hg/hgrc file, as the default to be used for future pulls and
266 pushes.
267 pushes.
267
268
268 If an exception is raised, the partly cloned/updated destination
269 If an exception is raised, the partly cloned/updated destination
269 repository will be deleted.
270 repository will be deleted.
270
271
271 Arguments:
272 Arguments:
272
273
273 source: repository object or URL
274 source: repository object or URL
274
275
275 dest: URL of destination repository to create (defaults to base
276 dest: URL of destination repository to create (defaults to base
276 name of source repository)
277 name of source repository)
277
278
278 pull: always pull from source repository, even in local case
279 pull: always pull from source repository, even in local case
279
280
280 stream: stream raw data uncompressed from repository (fast over
281 stream: stream raw data uncompressed from repository (fast over
281 LAN, slow over WAN)
282 LAN, slow over WAN)
282
283
283 rev: revision to clone up to (implies pull=True)
284 rev: revision to clone up to (implies pull=True)
284
285
285 update: update working directory after clone completes, if
286 update: update working directory after clone completes, if
286 destination is local repository (True means update to default rev,
287 destination is local repository (True means update to default rev,
287 anything else is treated as a revision)
288 anything else is treated as a revision)
288
289
289 branch: branches to clone
290 branch: branches to clone
290 """
291 """
291
292
292 if isinstance(source, str):
293 if isinstance(source, str):
293 origsource = ui.expandpath(source)
294 origsource = ui.expandpath(source)
294 source, branch = parseurl(origsource, branch)
295 source, branch = parseurl(origsource, branch)
295 srcpeer = peer(ui, peeropts, source)
296 srcpeer = peer(ui, peeropts, source)
296 else:
297 else:
297 srcpeer = source.peer() # in case we were called with a localrepo
298 srcpeer = source.peer() # in case we were called with a localrepo
298 branch = (None, branch or [])
299 branch = (None, branch or [])
299 origsource = source = srcpeer.url()
300 origsource = source = srcpeer.url()
300 rev, checkout = addbranchrevs(srcpeer, srcpeer, branch, rev)
301 rev, checkout = addbranchrevs(srcpeer, srcpeer, branch, rev)
301
302
302 if dest is None:
303 if dest is None:
303 dest = defaultdest(source)
304 dest = defaultdest(source)
304 if dest:
305 if dest:
305 ui.status(_("destination directory: %s\n") % dest)
306 ui.status(_("destination directory: %s\n") % dest)
306 else:
307 else:
307 dest = ui.expandpath(dest)
308 dest = ui.expandpath(dest)
308
309
309 dest = util.urllocalpath(dest)
310 dest = util.urllocalpath(dest)
310 source = util.urllocalpath(source)
311 source = util.urllocalpath(source)
311
312
312 if not dest:
313 if not dest:
313 raise util.Abort(_("empty destination path is not valid"))
314 raise util.Abort(_("empty destination path is not valid"))
314 if os.path.exists(dest):
315 if os.path.exists(dest):
315 if not os.path.isdir(dest):
316 if not os.path.isdir(dest):
316 raise util.Abort(_("destination '%s' already exists") % dest)
317 raise util.Abort(_("destination '%s' already exists") % dest)
317 elif os.listdir(dest):
318 elif os.listdir(dest):
318 raise util.Abort(_("destination '%s' is not empty") % dest)
319 raise util.Abort(_("destination '%s' is not empty") % dest)
319
320
320 srclock = destlock = cleandir = None
321 srclock = destlock = cleandir = None
321 srcrepo = srcpeer.local()
322 srcrepo = srcpeer.local()
322 try:
323 try:
323 abspath = origsource
324 abspath = origsource
324 if islocal(origsource):
325 if islocal(origsource):
325 abspath = os.path.abspath(util.urllocalpath(origsource))
326 abspath = os.path.abspath(util.urllocalpath(origsource))
326
327
327 if islocal(dest):
328 if islocal(dest):
328 cleandir = dest
329 cleandir = dest
329
330
330 copy = False
331 copy = False
331 if (srcrepo and srcrepo.cancopy() and islocal(dest)
332 if (srcrepo and srcrepo.cancopy() and islocal(dest)
332 and not phases.hassecret(srcrepo)):
333 and not phases.hassecret(srcrepo)):
333 copy = not pull and not rev
334 copy = not pull and not rev
334
335
335 if copy:
336 if copy:
336 try:
337 try:
337 # we use a lock here because if we race with commit, we
338 # we use a lock here because if we race with commit, we
338 # can end up with extra data in the cloned revlogs that's
339 # can end up with extra data in the cloned revlogs that's
339 # not pointed to by changesets, thus causing verify to
340 # not pointed to by changesets, thus causing verify to
340 # fail
341 # fail
341 srclock = srcrepo.lock(wait=False)
342 srclock = srcrepo.lock(wait=False)
342 except error.LockError:
343 except error.LockError:
343 copy = False
344 copy = False
344
345
345 if copy:
346 if copy:
346 srcrepo.hook('preoutgoing', throw=True, source='clone')
347 srcrepo.hook('preoutgoing', throw=True, source='clone')
347 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
348 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
348 if not os.path.exists(dest):
349 if not os.path.exists(dest):
349 os.mkdir(dest)
350 os.mkdir(dest)
350 else:
351 else:
351 # only clean up directories we create ourselves
352 # only clean up directories we create ourselves
352 cleandir = hgdir
353 cleandir = hgdir
353 try:
354 try:
354 destpath = hgdir
355 destpath = hgdir
355 util.makedir(destpath, notindexed=True)
356 util.makedir(destpath, notindexed=True)
356 except OSError, inst:
357 except OSError, inst:
357 if inst.errno == errno.EEXIST:
358 if inst.errno == errno.EEXIST:
358 cleandir = None
359 cleandir = None
359 raise util.Abort(_("destination '%s' already exists")
360 raise util.Abort(_("destination '%s' already exists")
360 % dest)
361 % dest)
361 raise
362 raise
362
363
363 destlock = copystore(ui, srcrepo, destpath)
364 destlock = copystore(ui, srcrepo, destpath)
364
365
365 # Recomputing branch cache might be slow on big repos,
366 # Recomputing branch cache might be slow on big repos,
366 # so just copy it
367 # so just copy it
367 dstcachedir = os.path.join(destpath, 'cache')
368 dstcachedir = os.path.join(destpath, 'cache')
368 srcbranchcache = srcrepo.sjoin('cache/branch2')
369 srcbranchcache = srcrepo.sjoin('cache/branch2')
369 dstbranchcache = os.path.join(dstcachedir, 'branch2')
370 dstbranchcache = os.path.join(dstcachedir, 'branch2')
370 if os.path.exists(srcbranchcache):
371 if os.path.exists(srcbranchcache):
371 if not os.path.exists(dstcachedir):
372 if not os.path.exists(dstcachedir):
372 os.mkdir(dstcachedir)
373 os.mkdir(dstcachedir)
373 util.copyfile(srcbranchcache, dstbranchcache)
374 util.copyfile(srcbranchcache, dstbranchcache)
374
375
375 # we need to re-init the repo after manually copying the data
376 # we need to re-init the repo after manually copying the data
376 # into it
377 # into it
377 destpeer = peer(srcrepo, peeropts, dest)
378 destpeer = peer(srcrepo, peeropts, dest)
378 srcrepo.hook('outgoing', source='clone',
379 srcrepo.hook('outgoing', source='clone',
379 node=node.hex(node.nullid))
380 node=node.hex(node.nullid))
380 else:
381 else:
381 try:
382 try:
382 destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
383 destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
383 # only pass ui when no srcrepo
384 # only pass ui when no srcrepo
384 except OSError, inst:
385 except OSError, inst:
385 if inst.errno == errno.EEXIST:
386 if inst.errno == errno.EEXIST:
386 cleandir = None
387 cleandir = None
387 raise util.Abort(_("destination '%s' already exists")
388 raise util.Abort(_("destination '%s' already exists")
388 % dest)
389 % dest)
389 raise
390 raise
390
391
391 revs = None
392 revs = None
392 if rev:
393 if rev:
393 if not srcpeer.capable('lookup'):
394 if not srcpeer.capable('lookup'):
394 raise util.Abort(_("src repository does not support "
395 raise util.Abort(_("src repository does not support "
395 "revision lookup and so doesn't "
396 "revision lookup and so doesn't "
396 "support clone by revision"))
397 "support clone by revision"))
397 revs = [srcpeer.lookup(r) for r in rev]
398 revs = [srcpeer.lookup(r) for r in rev]
398 checkout = revs[0]
399 checkout = revs[0]
399 if destpeer.local():
400 if destpeer.local():
400 destpeer.local().clone(srcpeer, heads=revs, stream=stream)
401 destpeer.local().clone(srcpeer, heads=revs, stream=stream)
401 elif srcrepo:
402 elif srcrepo:
402 srcrepo.push(destpeer, revs=revs)
403 srcrepo.push(destpeer, revs=revs)
403 else:
404 else:
404 raise util.Abort(_("clone from remote to remote not supported"))
405 raise util.Abort(_("clone from remote to remote not supported"))
405
406
406 cleandir = None
407 cleandir = None
407
408
408 # clone all bookmarks except divergent ones
409 # clone all bookmarks except divergent ones
409 destrepo = destpeer.local()
410 destrepo = destpeer.local()
410 if destrepo and srcpeer.capable("pushkey"):
411 if destrepo and srcpeer.capable("pushkey"):
411 rb = srcpeer.listkeys('bookmarks')
412 rb = srcpeer.listkeys('bookmarks')
412 marks = destrepo._bookmarks
413 marks = destrepo._bookmarks
413 for k, n in rb.iteritems():
414 for k, n in rb.iteritems():
414 try:
415 try:
415 m = destrepo.lookup(n)
416 m = destrepo.lookup(n)
416 marks[k] = m
417 marks[k] = m
417 except error.RepoLookupError:
418 except error.RepoLookupError:
418 pass
419 pass
419 if rb:
420 if rb:
420 marks.write()
421 marks.write()
421 elif srcrepo and destpeer.capable("pushkey"):
422 elif srcrepo and destpeer.capable("pushkey"):
422 for k, n in srcrepo._bookmarks.iteritems():
423 for k, n in srcrepo._bookmarks.iteritems():
423 destpeer.pushkey('bookmarks', k, '', hex(n))
424 destpeer.pushkey('bookmarks', k, '', hex(n))
424
425
425 if destrepo:
426 if destrepo:
426 fp = destrepo.opener("hgrc", "w", text=True)
427 fp = destrepo.opener("hgrc", "w", text=True)
427 fp.write("[paths]\n")
428 fp.write("[paths]\n")
428 u = util.url(abspath)
429 u = util.url(abspath)
429 u.passwd = None
430 u.passwd = None
430 defaulturl = str(u)
431 defaulturl = str(u)
431 fp.write("default = %s\n" % defaulturl)
432 fp.write("default = %s\n" % defaulturl)
432 fp.close()
433 fp.close()
433
434
434 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
435 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
435
436
436 if update:
437 if update:
437 if update is not True:
438 if update is not True:
438 checkout = srcpeer.lookup(update)
439 checkout = srcpeer.lookup(update)
439 uprev = None
440 uprev = None
440 status = None
441 status = None
441 if checkout is not None:
442 if checkout is not None:
442 try:
443 try:
443 uprev = destrepo.lookup(checkout)
444 uprev = destrepo.lookup(checkout)
444 except error.RepoLookupError:
445 except error.RepoLookupError:
445 pass
446 pass
446 if uprev is None:
447 if uprev is None:
447 try:
448 try:
448 uprev = destrepo._bookmarks['@']
449 uprev = destrepo._bookmarks['@']
449 update = '@'
450 update = '@'
450 bn = destrepo[uprev].branch()
451 bn = destrepo[uprev].branch()
451 if bn == 'default':
452 if bn == 'default':
452 status = _("updating to bookmark @\n")
453 status = _("updating to bookmark @\n")
453 else:
454 else:
454 status = (_("updating to bookmark @ on branch %s\n")
455 status = (_("updating to bookmark @ on branch %s\n")
455 % bn)
456 % bn)
456 except KeyError:
457 except KeyError:
457 try:
458 try:
458 uprev = destrepo.branchtip('default')
459 uprev = destrepo.branchtip('default')
459 except error.RepoLookupError:
460 except error.RepoLookupError:
460 uprev = destrepo.lookup('tip')
461 uprev = destrepo.lookup('tip')
461 if not status:
462 if not status:
462 bn = destrepo[uprev].branch()
463 bn = destrepo[uprev].branch()
463 status = _("updating to branch %s\n") % bn
464 status = _("updating to branch %s\n") % bn
464 destrepo.ui.status(status)
465 destrepo.ui.status(status)
465 _update(destrepo, uprev)
466 _update(destrepo, uprev)
466 if update in destrepo._bookmarks:
467 if update in destrepo._bookmarks:
467 bookmarks.setcurrent(destrepo, update)
468 bookmarks.setcurrent(destrepo, update)
468 finally:
469 finally:
469 release(srclock, destlock)
470 release(srclock, destlock)
470 if cleandir is not None:
471 if cleandir is not None:
471 shutil.rmtree(cleandir, True)
472 shutil.rmtree(cleandir, True)
472 if srcpeer is not None:
473 if srcpeer is not None:
473 srcpeer.close()
474 srcpeer.close()
474 return srcpeer, destpeer
475 return srcpeer, destpeer
475
476
476 def _showstats(repo, stats):
477 def _showstats(repo, stats):
477 repo.ui.status(_("%d files updated, %d files merged, "
478 repo.ui.status(_("%d files updated, %d files merged, "
478 "%d files removed, %d files unresolved\n") % stats)
479 "%d files removed, %d files unresolved\n") % stats)
479
480
480 def updaterepo(repo, node, overwrite):
481 def updaterepo(repo, node, overwrite):
481 """Update the working directory to node.
482 """Update the working directory to node.
482
483
483 When overwrite is set, changes are clobbered, merged else
484 When overwrite is set, changes are clobbered, merged else
484
485
485 returns stats (see pydoc mercurial.merge.applyupdates)"""
486 returns stats (see pydoc mercurial.merge.applyupdates)"""
486 return mergemod.update(repo, node, False, overwrite, None,
487 return mergemod.update(repo, node, False, overwrite, None,
487 labels=['working copy', 'destination'])
488 labels=['working copy', 'destination'])
488
489
489 def update(repo, node):
490 def update(repo, node):
490 """update the working directory to node, merging linear changes"""
491 """update the working directory to node, merging linear changes"""
491 stats = updaterepo(repo, node, False)
492 stats = updaterepo(repo, node, False)
492 _showstats(repo, stats)
493 _showstats(repo, stats)
493 if stats[3]:
494 if stats[3]:
494 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
495 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
495 return stats[3] > 0
496 return stats[3] > 0
496
497
497 # naming conflict in clone()
498 # naming conflict in clone()
498 _update = update
499 _update = update
499
500
500 def clean(repo, node, show_stats=True):
501 def clean(repo, node, show_stats=True):
501 """forcibly switch the working directory to node, clobbering changes"""
502 """forcibly switch the working directory to node, clobbering changes"""
502 stats = updaterepo(repo, node, True)
503 stats = updaterepo(repo, node, True)
503 util.unlinkpath(repo.join('graftstate'), ignoremissing=True)
504 util.unlinkpath(repo.join('graftstate'), ignoremissing=True)
504 if show_stats:
505 if show_stats:
505 _showstats(repo, stats)
506 _showstats(repo, stats)
506 return stats[3] > 0
507 return stats[3] > 0
507
508
508 def merge(repo, node, force=None, remind=True):
509 def merge(repo, node, force=None, remind=True):
509 """Branch merge with node, resolving changes. Return true if any
510 """Branch merge with node, resolving changes. Return true if any
510 unresolved conflicts."""
511 unresolved conflicts."""
511 stats = mergemod.update(repo, node, True, force, False)
512 stats = mergemod.update(repo, node, True, force, False)
512 _showstats(repo, stats)
513 _showstats(repo, stats)
513 if stats[3]:
514 if stats[3]:
514 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
515 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
515 "or 'hg update -C .' to abandon\n"))
516 "or 'hg update -C .' to abandon\n"))
516 elif remind:
517 elif remind:
517 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
518 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
518 return stats[3] > 0
519 return stats[3] > 0
519
520
520 def _incoming(displaychlist, subreporecurse, ui, repo, source,
521 def _incoming(displaychlist, subreporecurse, ui, repo, source,
521 opts, buffered=False):
522 opts, buffered=False):
522 """
523 """
523 Helper for incoming / gincoming.
524 Helper for incoming / gincoming.
524 displaychlist gets called with
525 displaychlist gets called with
525 (remoterepo, incomingchangesetlist, displayer) parameters,
526 (remoterepo, incomingchangesetlist, displayer) parameters,
526 and is supposed to contain only code that can't be unified.
527 and is supposed to contain only code that can't be unified.
527 """
528 """
528 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
529 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
529 other = peer(repo, opts, source)
530 other = peer(repo, opts, source)
530 ui.status(_('comparing with %s\n') % util.hidepassword(source))
531 ui.status(_('comparing with %s\n') % util.hidepassword(source))
531 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
532 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
532
533
533 if revs:
534 if revs:
534 revs = [other.lookup(rev) for rev in revs]
535 revs = [other.lookup(rev) for rev in revs]
535 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
536 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
536 revs, opts["bundle"], opts["force"])
537 revs, opts["bundle"], opts["force"])
537 try:
538 try:
538 if not chlist:
539 if not chlist:
539 ui.status(_("no changes found\n"))
540 ui.status(_("no changes found\n"))
540 return subreporecurse()
541 return subreporecurse()
541
542
542 displayer = cmdutil.show_changeset(ui, other, opts, buffered)
543 displayer = cmdutil.show_changeset(ui, other, opts, buffered)
543 displaychlist(other, chlist, displayer)
544 displaychlist(other, chlist, displayer)
544 displayer.close()
545 displayer.close()
545 finally:
546 finally:
546 cleanupfn()
547 cleanupfn()
547 subreporecurse()
548 subreporecurse()
548 return 0 # exit code is zero since we found incoming changes
549 return 0 # exit code is zero since we found incoming changes
549
550
550 def incoming(ui, repo, source, opts):
551 def incoming(ui, repo, source, opts):
551 def subreporecurse():
552 def subreporecurse():
552 ret = 1
553 ret = 1
553 if opts.get('subrepos'):
554 if opts.get('subrepos'):
554 ctx = repo[None]
555 ctx = repo[None]
555 for subpath in sorted(ctx.substate):
556 for subpath in sorted(ctx.substate):
556 sub = ctx.sub(subpath)
557 sub = ctx.sub(subpath)
557 ret = min(ret, sub.incoming(ui, source, opts))
558 ret = min(ret, sub.incoming(ui, source, opts))
558 return ret
559 return ret
559
560
560 def display(other, chlist, displayer):
561 def display(other, chlist, displayer):
561 limit = cmdutil.loglimit(opts)
562 limit = cmdutil.loglimit(opts)
562 if opts.get('newest_first'):
563 if opts.get('newest_first'):
563 chlist.reverse()
564 chlist.reverse()
564 count = 0
565 count = 0
565 for n in chlist:
566 for n in chlist:
566 if limit is not None and count >= limit:
567 if limit is not None and count >= limit:
567 break
568 break
568 parents = [p for p in other.changelog.parents(n) if p != nullid]
569 parents = [p for p in other.changelog.parents(n) if p != nullid]
569 if opts.get('no_merges') and len(parents) == 2:
570 if opts.get('no_merges') and len(parents) == 2:
570 continue
571 continue
571 count += 1
572 count += 1
572 displayer.show(other[n])
573 displayer.show(other[n])
573 return _incoming(display, subreporecurse, ui, repo, source, opts)
574 return _incoming(display, subreporecurse, ui, repo, source, opts)
574
575
575 def _outgoing(ui, repo, dest, opts):
576 def _outgoing(ui, repo, dest, opts):
576 dest = ui.expandpath(dest or 'default-push', dest or 'default')
577 dest = ui.expandpath(dest or 'default-push', dest or 'default')
577 dest, branches = parseurl(dest, opts.get('branch'))
578 dest, branches = parseurl(dest, opts.get('branch'))
578 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
579 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
579 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
580 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
580 if revs:
581 if revs:
581 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
582 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
582
583
583 other = peer(repo, opts, dest)
584 other = peer(repo, opts, dest)
584 outgoing = discovery.findcommonoutgoing(repo.unfiltered(), other, revs,
585 outgoing = discovery.findcommonoutgoing(repo.unfiltered(), other, revs,
585 force=opts.get('force'))
586 force=opts.get('force'))
586 o = outgoing.missing
587 o = outgoing.missing
587 if not o:
588 if not o:
588 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
589 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
589 return o, other
590 return o, other
590
591
591 def outgoing(ui, repo, dest, opts):
592 def outgoing(ui, repo, dest, opts):
592 def recurse():
593 def recurse():
593 ret = 1
594 ret = 1
594 if opts.get('subrepos'):
595 if opts.get('subrepos'):
595 ctx = repo[None]
596 ctx = repo[None]
596 for subpath in sorted(ctx.substate):
597 for subpath in sorted(ctx.substate):
597 sub = ctx.sub(subpath)
598 sub = ctx.sub(subpath)
598 ret = min(ret, sub.outgoing(ui, dest, opts))
599 ret = min(ret, sub.outgoing(ui, dest, opts))
599 return ret
600 return ret
600
601
601 limit = cmdutil.loglimit(opts)
602 limit = cmdutil.loglimit(opts)
602 o, other = _outgoing(ui, repo, dest, opts)
603 o, other = _outgoing(ui, repo, dest, opts)
603 if not o:
604 if not o:
604 cmdutil.outgoinghooks(ui, repo, other, opts, o)
605 cmdutil.outgoinghooks(ui, repo, other, opts, o)
605 return recurse()
606 return recurse()
606
607
607 if opts.get('newest_first'):
608 if opts.get('newest_first'):
608 o.reverse()
609 o.reverse()
609 displayer = cmdutil.show_changeset(ui, repo, opts)
610 displayer = cmdutil.show_changeset(ui, repo, opts)
610 count = 0
611 count = 0
611 for n in o:
612 for n in o:
612 if limit is not None and count >= limit:
613 if limit is not None and count >= limit:
613 break
614 break
614 parents = [p for p in repo.changelog.parents(n) if p != nullid]
615 parents = [p for p in repo.changelog.parents(n) if p != nullid]
615 if opts.get('no_merges') and len(parents) == 2:
616 if opts.get('no_merges') and len(parents) == 2:
616 continue
617 continue
617 count += 1
618 count += 1
618 displayer.show(repo[n])
619 displayer.show(repo[n])
619 displayer.close()
620 displayer.close()
620 cmdutil.outgoinghooks(ui, repo, other, opts, o)
621 cmdutil.outgoinghooks(ui, repo, other, opts, o)
621 recurse()
622 recurse()
622 return 0 # exit code is zero since we found outgoing changes
623 return 0 # exit code is zero since we found outgoing changes
623
624
624 def revert(repo, node, choose):
625 def revert(repo, node, choose):
625 """revert changes to revision in node without updating dirstate"""
626 """revert changes to revision in node without updating dirstate"""
626 return mergemod.update(repo, node, False, True, choose)[3] > 0
627 return mergemod.update(repo, node, False, True, choose)[3] > 0
627
628
628 def verify(repo):
629 def verify(repo):
629 """verify the consistency of a repository"""
630 """verify the consistency of a repository"""
630 return verifymod.verify(repo)
631 return verifymod.verify(repo)
631
632
632 def remoteui(src, opts):
633 def remoteui(src, opts):
633 'build a remote ui from ui or repo and opts'
634 'build a remote ui from ui or repo and opts'
634 if util.safehasattr(src, 'baseui'): # looks like a repository
635 if util.safehasattr(src, 'baseui'): # looks like a repository
635 dst = src.baseui.copy() # drop repo-specific config
636 dst = src.baseui.copy() # drop repo-specific config
636 src = src.ui # copy target options from repo
637 src = src.ui # copy target options from repo
637 else: # assume it's a global ui object
638 else: # assume it's a global ui object
638 dst = src.copy() # keep all global options
639 dst = src.copy() # keep all global options
639
640
640 # copy ssh-specific options
641 # copy ssh-specific options
641 for o in 'ssh', 'remotecmd':
642 for o in 'ssh', 'remotecmd':
642 v = opts.get(o) or src.config('ui', o)
643 v = opts.get(o) or src.config('ui', o)
643 if v:
644 if v:
644 dst.setconfig("ui", o, v, 'copied')
645 dst.setconfig("ui", o, v, 'copied')
645
646
646 # copy bundle-specific options
647 # copy bundle-specific options
647 r = src.config('bundle', 'mainreporoot')
648 r = src.config('bundle', 'mainreporoot')
648 if r:
649 if r:
649 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
650 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
650
651
651 # copy selected local settings to the remote ui
652 # copy selected local settings to the remote ui
652 for sect in ('auth', 'hostfingerprints', 'http_proxy'):
653 for sect in ('auth', 'hostfingerprints', 'http_proxy'):
653 for key, val in src.configitems(sect):
654 for key, val in src.configitems(sect):
654 dst.setconfig(sect, key, val, 'copied')
655 dst.setconfig(sect, key, val, 'copied')
655 v = src.config('web', 'cacerts')
656 v = src.config('web', 'cacerts')
656 if v:
657 if v:
657 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
658 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
658
659
659 return dst
660 return dst
General Comments 0
You need to be logged in to leave comments. Login now