##// END OF EJS Templates
clone: add support for storing remotenames while cloning...
Pulkit Goyal -
r35332:773a9a06 default
parent child Browse files
Show More
@@ -1,1103 +1,1107
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13 import os
13 import os
14 import shutil
14 import shutil
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import nullid
17 from .node import nullid
18
18
19 from . import (
19 from . import (
20 bookmarks,
20 bookmarks,
21 bundlerepo,
21 bundlerepo,
22 cmdutil,
22 cmdutil,
23 destutil,
23 destutil,
24 discovery,
24 discovery,
25 error,
25 error,
26 exchange,
26 exchange,
27 extensions,
27 extensions,
28 httppeer,
28 httppeer,
29 localrepo,
29 localrepo,
30 lock,
30 lock,
31 merge as mergemod,
31 merge as mergemod,
32 node,
32 node,
33 phases,
33 phases,
34 remotenames,
34 repoview,
35 repoview,
35 scmutil,
36 scmutil,
36 sshpeer,
37 sshpeer,
37 statichttprepo,
38 statichttprepo,
38 ui as uimod,
39 ui as uimod,
39 unionrepo,
40 unionrepo,
40 url,
41 url,
41 util,
42 util,
42 verify as verifymod,
43 verify as verifymod,
43 vfs as vfsmod,
44 vfs as vfsmod,
44 )
45 )
45
46
46 release = lock.release
47 release = lock.release
47
48
48 # shared features
49 # shared features
49 sharedbookmarks = 'bookmarks'
50 sharedbookmarks = 'bookmarks'
50
51
51 def _local(path):
52 def _local(path):
52 path = util.expandpath(util.urllocalpath(path))
53 path = util.expandpath(util.urllocalpath(path))
53 return (os.path.isfile(path) and bundlerepo or localrepo)
54 return (os.path.isfile(path) and bundlerepo or localrepo)
54
55
55 def addbranchrevs(lrepo, other, branches, revs):
56 def addbranchrevs(lrepo, other, branches, revs):
56 peer = other.peer() # a courtesy to callers using a localrepo for other
57 peer = other.peer() # a courtesy to callers using a localrepo for other
57 hashbranch, branches = branches
58 hashbranch, branches = branches
58 if not hashbranch and not branches:
59 if not hashbranch and not branches:
59 x = revs or None
60 x = revs or None
60 if util.safehasattr(revs, 'first'):
61 if util.safehasattr(revs, 'first'):
61 y = revs.first()
62 y = revs.first()
62 elif revs:
63 elif revs:
63 y = revs[0]
64 y = revs[0]
64 else:
65 else:
65 y = None
66 y = None
66 return x, y
67 return x, y
67 if revs:
68 if revs:
68 revs = list(revs)
69 revs = list(revs)
69 else:
70 else:
70 revs = []
71 revs = []
71
72
72 if not peer.capable('branchmap'):
73 if not peer.capable('branchmap'):
73 if branches:
74 if branches:
74 raise error.Abort(_("remote branch lookup not supported"))
75 raise error.Abort(_("remote branch lookup not supported"))
75 revs.append(hashbranch)
76 revs.append(hashbranch)
76 return revs, revs[0]
77 return revs, revs[0]
77 branchmap = peer.branchmap()
78 branchmap = peer.branchmap()
78
79
79 def primary(branch):
80 def primary(branch):
80 if branch == '.':
81 if branch == '.':
81 if not lrepo:
82 if not lrepo:
82 raise error.Abort(_("dirstate branch not accessible"))
83 raise error.Abort(_("dirstate branch not accessible"))
83 branch = lrepo.dirstate.branch()
84 branch = lrepo.dirstate.branch()
84 if branch in branchmap:
85 if branch in branchmap:
85 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
86 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
86 return True
87 return True
87 else:
88 else:
88 return False
89 return False
89
90
90 for branch in branches:
91 for branch in branches:
91 if not primary(branch):
92 if not primary(branch):
92 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
93 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
93 if hashbranch:
94 if hashbranch:
94 if not primary(hashbranch):
95 if not primary(hashbranch):
95 revs.append(hashbranch)
96 revs.append(hashbranch)
96 return revs, revs[0]
97 return revs, revs[0]
97
98
98 def parseurl(path, branches=None):
99 def parseurl(path, branches=None):
99 '''parse url#branch, returning (url, (branch, branches))'''
100 '''parse url#branch, returning (url, (branch, branches))'''
100
101
101 u = util.url(path)
102 u = util.url(path)
102 branch = None
103 branch = None
103 if u.fragment:
104 if u.fragment:
104 branch = u.fragment
105 branch = u.fragment
105 u.fragment = None
106 u.fragment = None
106 return bytes(u), (branch, branches or [])
107 return bytes(u), (branch, branches or [])
107
108
108 schemes = {
109 schemes = {
109 'bundle': bundlerepo,
110 'bundle': bundlerepo,
110 'union': unionrepo,
111 'union': unionrepo,
111 'file': _local,
112 'file': _local,
112 'http': httppeer,
113 'http': httppeer,
113 'https': httppeer,
114 'https': httppeer,
114 'ssh': sshpeer,
115 'ssh': sshpeer,
115 'static-http': statichttprepo,
116 'static-http': statichttprepo,
116 }
117 }
117
118
118 def _peerlookup(path):
119 def _peerlookup(path):
119 u = util.url(path)
120 u = util.url(path)
120 scheme = u.scheme or 'file'
121 scheme = u.scheme or 'file'
121 thing = schemes.get(scheme) or schemes['file']
122 thing = schemes.get(scheme) or schemes['file']
122 try:
123 try:
123 return thing(path)
124 return thing(path)
124 except TypeError:
125 except TypeError:
125 # we can't test callable(thing) because 'thing' can be an unloaded
126 # we can't test callable(thing) because 'thing' can be an unloaded
126 # module that implements __call__
127 # module that implements __call__
127 if not util.safehasattr(thing, 'instance'):
128 if not util.safehasattr(thing, 'instance'):
128 raise
129 raise
129 return thing
130 return thing
130
131
131 def islocal(repo):
132 def islocal(repo):
132 '''return true if repo (or path pointing to repo) is local'''
133 '''return true if repo (or path pointing to repo) is local'''
133 if isinstance(repo, bytes):
134 if isinstance(repo, bytes):
134 try:
135 try:
135 return _peerlookup(repo).islocal(repo)
136 return _peerlookup(repo).islocal(repo)
136 except AttributeError:
137 except AttributeError:
137 return False
138 return False
138 return repo.local()
139 return repo.local()
139
140
140 def openpath(ui, path):
141 def openpath(ui, path):
141 '''open path with open if local, url.open if remote'''
142 '''open path with open if local, url.open if remote'''
142 pathurl = util.url(path, parsequery=False, parsefragment=False)
143 pathurl = util.url(path, parsequery=False, parsefragment=False)
143 if pathurl.islocal():
144 if pathurl.islocal():
144 return util.posixfile(pathurl.localpath(), 'rb')
145 return util.posixfile(pathurl.localpath(), 'rb')
145 else:
146 else:
146 return url.open(ui, path)
147 return url.open(ui, path)
147
148
148 # a list of (ui, repo) functions called for wire peer initialization
149 # a list of (ui, repo) functions called for wire peer initialization
149 wirepeersetupfuncs = []
150 wirepeersetupfuncs = []
150
151
151 def _peerorrepo(ui, path, create=False, presetupfuncs=None):
152 def _peerorrepo(ui, path, create=False, presetupfuncs=None):
152 """return a repository object for the specified path"""
153 """return a repository object for the specified path"""
153 obj = _peerlookup(path).instance(ui, path, create)
154 obj = _peerlookup(path).instance(ui, path, create)
154 ui = getattr(obj, "ui", ui)
155 ui = getattr(obj, "ui", ui)
155 for f in presetupfuncs or []:
156 for f in presetupfuncs or []:
156 f(ui, obj)
157 f(ui, obj)
157 for name, module in extensions.extensions(ui):
158 for name, module in extensions.extensions(ui):
158 hook = getattr(module, 'reposetup', None)
159 hook = getattr(module, 'reposetup', None)
159 if hook:
160 if hook:
160 hook(ui, obj)
161 hook(ui, obj)
161 if not obj.local():
162 if not obj.local():
162 for f in wirepeersetupfuncs:
163 for f in wirepeersetupfuncs:
163 f(ui, obj)
164 f(ui, obj)
164 return obj
165 return obj
165
166
166 def repository(ui, path='', create=False, presetupfuncs=None):
167 def repository(ui, path='', create=False, presetupfuncs=None):
167 """return a repository object for the specified path"""
168 """return a repository object for the specified path"""
168 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs)
169 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs)
169 repo = peer.local()
170 repo = peer.local()
170 if not repo:
171 if not repo:
171 raise error.Abort(_("repository '%s' is not local") %
172 raise error.Abort(_("repository '%s' is not local") %
172 (path or peer.url()))
173 (path or peer.url()))
173 return repo.filtered('visible')
174 return repo.filtered('visible')
174
175
175 def peer(uiorrepo, opts, path, create=False):
176 def peer(uiorrepo, opts, path, create=False):
176 '''return a repository peer for the specified path'''
177 '''return a repository peer for the specified path'''
177 rui = remoteui(uiorrepo, opts)
178 rui = remoteui(uiorrepo, opts)
178 return _peerorrepo(rui, path, create).peer()
179 return _peerorrepo(rui, path, create).peer()
179
180
180 def defaultdest(source):
181 def defaultdest(source):
181 '''return default destination of clone if none is given
182 '''return default destination of clone if none is given
182
183
183 >>> defaultdest(b'foo')
184 >>> defaultdest(b'foo')
184 'foo'
185 'foo'
185 >>> defaultdest(b'/foo/bar')
186 >>> defaultdest(b'/foo/bar')
186 'bar'
187 'bar'
187 >>> defaultdest(b'/')
188 >>> defaultdest(b'/')
188 ''
189 ''
189 >>> defaultdest(b'')
190 >>> defaultdest(b'')
190 ''
191 ''
191 >>> defaultdest(b'http://example.org/')
192 >>> defaultdest(b'http://example.org/')
192 ''
193 ''
193 >>> defaultdest(b'http://example.org/foo/')
194 >>> defaultdest(b'http://example.org/foo/')
194 'foo'
195 'foo'
195 '''
196 '''
196 path = util.url(source).path
197 path = util.url(source).path
197 if not path:
198 if not path:
198 return ''
199 return ''
199 return os.path.basename(os.path.normpath(path))
200 return os.path.basename(os.path.normpath(path))
200
201
201 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
202 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
202 relative=False):
203 relative=False):
203 '''create a shared repository'''
204 '''create a shared repository'''
204
205
205 if not islocal(source):
206 if not islocal(source):
206 raise error.Abort(_('can only share local repositories'))
207 raise error.Abort(_('can only share local repositories'))
207
208
208 if not dest:
209 if not dest:
209 dest = defaultdest(source)
210 dest = defaultdest(source)
210 else:
211 else:
211 dest = ui.expandpath(dest)
212 dest = ui.expandpath(dest)
212
213
213 if isinstance(source, str):
214 if isinstance(source, str):
214 origsource = ui.expandpath(source)
215 origsource = ui.expandpath(source)
215 source, branches = parseurl(origsource)
216 source, branches = parseurl(origsource)
216 srcrepo = repository(ui, source)
217 srcrepo = repository(ui, source)
217 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
218 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
218 else:
219 else:
219 srcrepo = source.local()
220 srcrepo = source.local()
220 origsource = source = srcrepo.url()
221 origsource = source = srcrepo.url()
221 checkout = None
222 checkout = None
222
223
223 sharedpath = srcrepo.sharedpath # if our source is already sharing
224 sharedpath = srcrepo.sharedpath # if our source is already sharing
224
225
225 destwvfs = vfsmod.vfs(dest, realpath=True)
226 destwvfs = vfsmod.vfs(dest, realpath=True)
226 destvfs = vfsmod.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
227 destvfs = vfsmod.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
227
228
228 if destvfs.lexists():
229 if destvfs.lexists():
229 raise error.Abort(_('destination already exists'))
230 raise error.Abort(_('destination already exists'))
230
231
231 if not destwvfs.isdir():
232 if not destwvfs.isdir():
232 destwvfs.mkdir()
233 destwvfs.mkdir()
233 destvfs.makedir()
234 destvfs.makedir()
234
235
235 requirements = ''
236 requirements = ''
236 try:
237 try:
237 requirements = srcrepo.vfs.read('requires')
238 requirements = srcrepo.vfs.read('requires')
238 except IOError as inst:
239 except IOError as inst:
239 if inst.errno != errno.ENOENT:
240 if inst.errno != errno.ENOENT:
240 raise
241 raise
241
242
242 if relative:
243 if relative:
243 try:
244 try:
244 sharedpath = os.path.relpath(sharedpath, destvfs.base)
245 sharedpath = os.path.relpath(sharedpath, destvfs.base)
245 requirements += 'relshared\n'
246 requirements += 'relshared\n'
246 except (IOError, ValueError) as e:
247 except (IOError, ValueError) as e:
247 # ValueError is raised on Windows if the drive letters differ on
248 # ValueError is raised on Windows if the drive letters differ on
248 # each path
249 # each path
249 raise error.Abort(_('cannot calculate relative path'),
250 raise error.Abort(_('cannot calculate relative path'),
250 hint=str(e))
251 hint=str(e))
251 else:
252 else:
252 requirements += 'shared\n'
253 requirements += 'shared\n'
253
254
254 destvfs.write('requires', requirements)
255 destvfs.write('requires', requirements)
255 destvfs.write('sharedpath', sharedpath)
256 destvfs.write('sharedpath', sharedpath)
256
257
257 r = repository(ui, destwvfs.base)
258 r = repository(ui, destwvfs.base)
258 postshare(srcrepo, r, bookmarks=bookmarks, defaultpath=defaultpath)
259 postshare(srcrepo, r, bookmarks=bookmarks, defaultpath=defaultpath)
259 _postshareupdate(r, update, checkout=checkout)
260 _postshareupdate(r, update, checkout=checkout)
260 return r
261 return r
261
262
262 def unshare(ui, repo):
263 def unshare(ui, repo):
263 """convert a shared repository to a normal one
264 """convert a shared repository to a normal one
264
265
265 Copy the store data to the repo and remove the sharedpath data.
266 Copy the store data to the repo and remove the sharedpath data.
266 """
267 """
267
268
268 destlock = lock = None
269 destlock = lock = None
269 lock = repo.lock()
270 lock = repo.lock()
270 try:
271 try:
271 # we use locks here because if we race with commit, we
272 # we use locks here because if we race with commit, we
272 # can end up with extra data in the cloned revlogs that's
273 # can end up with extra data in the cloned revlogs that's
273 # not pointed to by changesets, thus causing verify to
274 # not pointed to by changesets, thus causing verify to
274 # fail
275 # fail
275
276
276 destlock = copystore(ui, repo, repo.path)
277 destlock = copystore(ui, repo, repo.path)
277
278
278 sharefile = repo.vfs.join('sharedpath')
279 sharefile = repo.vfs.join('sharedpath')
279 util.rename(sharefile, sharefile + '.old')
280 util.rename(sharefile, sharefile + '.old')
280
281
281 repo.requirements.discard('shared')
282 repo.requirements.discard('shared')
282 repo.requirements.discard('relshared')
283 repo.requirements.discard('relshared')
283 repo._writerequirements()
284 repo._writerequirements()
284 finally:
285 finally:
285 destlock and destlock.release()
286 destlock and destlock.release()
286 lock and lock.release()
287 lock and lock.release()
287
288
288 # update store, spath, svfs and sjoin of repo
289 # update store, spath, svfs and sjoin of repo
289 repo.unfiltered().__init__(repo.baseui, repo.root)
290 repo.unfiltered().__init__(repo.baseui, repo.root)
290
291
291 # TODO: figure out how to access subrepos that exist, but were previously
292 # TODO: figure out how to access subrepos that exist, but were previously
292 # removed from .hgsub
293 # removed from .hgsub
293 c = repo['.']
294 c = repo['.']
294 subs = c.substate
295 subs = c.substate
295 for s in sorted(subs):
296 for s in sorted(subs):
296 c.sub(s).unshare()
297 c.sub(s).unshare()
297
298
298 def postshare(sourcerepo, destrepo, bookmarks=True, defaultpath=None):
299 def postshare(sourcerepo, destrepo, bookmarks=True, defaultpath=None):
299 """Called after a new shared repo is created.
300 """Called after a new shared repo is created.
300
301
301 The new repo only has a requirements file and pointer to the source.
302 The new repo only has a requirements file and pointer to the source.
302 This function configures additional shared data.
303 This function configures additional shared data.
303
304
304 Extensions can wrap this function and write additional entries to
305 Extensions can wrap this function and write additional entries to
305 destrepo/.hg/shared to indicate additional pieces of data to be shared.
306 destrepo/.hg/shared to indicate additional pieces of data to be shared.
306 """
307 """
307 default = defaultpath or sourcerepo.ui.config('paths', 'default')
308 default = defaultpath or sourcerepo.ui.config('paths', 'default')
308 if default:
309 if default:
309 fp = destrepo.vfs("hgrc", "w", text=True)
310 fp = destrepo.vfs("hgrc", "w", text=True)
310 fp.write("[paths]\n")
311 fp.write("[paths]\n")
311 fp.write("default = %s\n" % default)
312 fp.write("default = %s\n" % default)
312 fp.close()
313 fp.close()
313
314
314 with destrepo.wlock():
315 with destrepo.wlock():
315 if bookmarks:
316 if bookmarks:
316 fp = destrepo.vfs('shared', 'w')
317 fp = destrepo.vfs('shared', 'w')
317 fp.write(sharedbookmarks + '\n')
318 fp.write(sharedbookmarks + '\n')
318 fp.close()
319 fp.close()
319
320
320 def _postshareupdate(repo, update, checkout=None):
321 def _postshareupdate(repo, update, checkout=None):
321 """Maybe perform a working directory update after a shared repo is created.
322 """Maybe perform a working directory update after a shared repo is created.
322
323
323 ``update`` can be a boolean or a revision to update to.
324 ``update`` can be a boolean or a revision to update to.
324 """
325 """
325 if not update:
326 if not update:
326 return
327 return
327
328
328 repo.ui.status(_("updating working directory\n"))
329 repo.ui.status(_("updating working directory\n"))
329 if update is not True:
330 if update is not True:
330 checkout = update
331 checkout = update
331 for test in (checkout, 'default', 'tip'):
332 for test in (checkout, 'default', 'tip'):
332 if test is None:
333 if test is None:
333 continue
334 continue
334 try:
335 try:
335 uprev = repo.lookup(test)
336 uprev = repo.lookup(test)
336 break
337 break
337 except error.RepoLookupError:
338 except error.RepoLookupError:
338 continue
339 continue
339 _update(repo, uprev)
340 _update(repo, uprev)
340
341
341 def copystore(ui, srcrepo, destpath):
342 def copystore(ui, srcrepo, destpath):
342 '''copy files from store of srcrepo in destpath
343 '''copy files from store of srcrepo in destpath
343
344
344 returns destlock
345 returns destlock
345 '''
346 '''
346 destlock = None
347 destlock = None
347 try:
348 try:
348 hardlink = None
349 hardlink = None
349 num = 0
350 num = 0
350 closetopic = [None]
351 closetopic = [None]
351 def prog(topic, pos):
352 def prog(topic, pos):
352 if pos is None:
353 if pos is None:
353 closetopic[0] = topic
354 closetopic[0] = topic
354 else:
355 else:
355 ui.progress(topic, pos + num)
356 ui.progress(topic, pos + num)
356 srcpublishing = srcrepo.publishing()
357 srcpublishing = srcrepo.publishing()
357 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
358 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
358 dstvfs = vfsmod.vfs(destpath)
359 dstvfs = vfsmod.vfs(destpath)
359 for f in srcrepo.store.copylist():
360 for f in srcrepo.store.copylist():
360 if srcpublishing and f.endswith('phaseroots'):
361 if srcpublishing and f.endswith('phaseroots'):
361 continue
362 continue
362 dstbase = os.path.dirname(f)
363 dstbase = os.path.dirname(f)
363 if dstbase and not dstvfs.exists(dstbase):
364 if dstbase and not dstvfs.exists(dstbase):
364 dstvfs.mkdir(dstbase)
365 dstvfs.mkdir(dstbase)
365 if srcvfs.exists(f):
366 if srcvfs.exists(f):
366 if f.endswith('data'):
367 if f.endswith('data'):
367 # 'dstbase' may be empty (e.g. revlog format 0)
368 # 'dstbase' may be empty (e.g. revlog format 0)
368 lockfile = os.path.join(dstbase, "lock")
369 lockfile = os.path.join(dstbase, "lock")
369 # lock to avoid premature writing to the target
370 # lock to avoid premature writing to the target
370 destlock = lock.lock(dstvfs, lockfile)
371 destlock = lock.lock(dstvfs, lockfile)
371 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
372 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
372 hardlink, progress=prog)
373 hardlink, progress=prog)
373 num += n
374 num += n
374 if hardlink:
375 if hardlink:
375 ui.debug("linked %d files\n" % num)
376 ui.debug("linked %d files\n" % num)
376 if closetopic[0]:
377 if closetopic[0]:
377 ui.progress(closetopic[0], None)
378 ui.progress(closetopic[0], None)
378 else:
379 else:
379 ui.debug("copied %d files\n" % num)
380 ui.debug("copied %d files\n" % num)
380 if closetopic[0]:
381 if closetopic[0]:
381 ui.progress(closetopic[0], None)
382 ui.progress(closetopic[0], None)
382 return destlock
383 return destlock
383 except: # re-raises
384 except: # re-raises
384 release(destlock)
385 release(destlock)
385 raise
386 raise
386
387
387 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
388 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
388 rev=None, update=True, stream=False):
389 rev=None, update=True, stream=False):
389 """Perform a clone using a shared repo.
390 """Perform a clone using a shared repo.
390
391
391 The store for the repository will be located at <sharepath>/.hg. The
392 The store for the repository will be located at <sharepath>/.hg. The
392 specified revisions will be cloned or pulled from "source". A shared repo
393 specified revisions will be cloned or pulled from "source". A shared repo
393 will be created at "dest" and a working copy will be created if "update" is
394 will be created at "dest" and a working copy will be created if "update" is
394 True.
395 True.
395 """
396 """
396 revs = None
397 revs = None
397 if rev:
398 if rev:
398 if not srcpeer.capable('lookup'):
399 if not srcpeer.capable('lookup'):
399 raise error.Abort(_("src repository does not support "
400 raise error.Abort(_("src repository does not support "
400 "revision lookup and so doesn't "
401 "revision lookup and so doesn't "
401 "support clone by revision"))
402 "support clone by revision"))
402 revs = [srcpeer.lookup(r) for r in rev]
403 revs = [srcpeer.lookup(r) for r in rev]
403
404
404 # Obtain a lock before checking for or cloning the pooled repo otherwise
405 # Obtain a lock before checking for or cloning the pooled repo otherwise
405 # 2 clients may race creating or populating it.
406 # 2 clients may race creating or populating it.
406 pooldir = os.path.dirname(sharepath)
407 pooldir = os.path.dirname(sharepath)
407 # lock class requires the directory to exist.
408 # lock class requires the directory to exist.
408 try:
409 try:
409 util.makedir(pooldir, False)
410 util.makedir(pooldir, False)
410 except OSError as e:
411 except OSError as e:
411 if e.errno != errno.EEXIST:
412 if e.errno != errno.EEXIST:
412 raise
413 raise
413
414
414 poolvfs = vfsmod.vfs(pooldir)
415 poolvfs = vfsmod.vfs(pooldir)
415 basename = os.path.basename(sharepath)
416 basename = os.path.basename(sharepath)
416
417
417 with lock.lock(poolvfs, '%s.lock' % basename):
418 with lock.lock(poolvfs, '%s.lock' % basename):
418 if os.path.exists(sharepath):
419 if os.path.exists(sharepath):
419 ui.status(_('(sharing from existing pooled repository %s)\n') %
420 ui.status(_('(sharing from existing pooled repository %s)\n') %
420 basename)
421 basename)
421 else:
422 else:
422 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
423 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
423 # Always use pull mode because hardlinks in share mode don't work
424 # Always use pull mode because hardlinks in share mode don't work
424 # well. Never update because working copies aren't necessary in
425 # well. Never update because working copies aren't necessary in
425 # share mode.
426 # share mode.
426 clone(ui, peeropts, source, dest=sharepath, pull=True,
427 clone(ui, peeropts, source, dest=sharepath, pull=True,
427 rev=rev, update=False, stream=stream)
428 rev=rev, update=False, stream=stream)
428
429
429 # Resolve the value to put in [paths] section for the source.
430 # Resolve the value to put in [paths] section for the source.
430 if islocal(source):
431 if islocal(source):
431 defaultpath = os.path.abspath(util.urllocalpath(source))
432 defaultpath = os.path.abspath(util.urllocalpath(source))
432 else:
433 else:
433 defaultpath = source
434 defaultpath = source
434
435
435 sharerepo = repository(ui, path=sharepath)
436 sharerepo = repository(ui, path=sharepath)
436 share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
437 share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
437 defaultpath=defaultpath)
438 defaultpath=defaultpath)
438
439
439 # We need to perform a pull against the dest repo to fetch bookmarks
440 # We need to perform a pull against the dest repo to fetch bookmarks
440 # and other non-store data that isn't shared by default. In the case of
441 # and other non-store data that isn't shared by default. In the case of
441 # non-existing shared repo, this means we pull from the remote twice. This
442 # non-existing shared repo, this means we pull from the remote twice. This
442 # is a bit weird. But at the time it was implemented, there wasn't an easy
443 # is a bit weird. But at the time it was implemented, there wasn't an easy
443 # way to pull just non-changegroup data.
444 # way to pull just non-changegroup data.
444 destrepo = repository(ui, path=dest)
445 destrepo = repository(ui, path=dest)
445 exchange.pull(destrepo, srcpeer, heads=revs)
446 exchange.pull(destrepo, srcpeer, heads=revs)
446
447
447 _postshareupdate(destrepo, update)
448 _postshareupdate(destrepo, update)
448
449
449 return srcpeer, peer(ui, peeropts, dest)
450 return srcpeer, peer(ui, peeropts, dest)
450
451
451 # Recomputing branch cache might be slow on big repos,
452 # Recomputing branch cache might be slow on big repos,
452 # so just copy it
453 # so just copy it
453 def _copycache(srcrepo, dstcachedir, fname):
454 def _copycache(srcrepo, dstcachedir, fname):
454 """copy a cache from srcrepo to destcachedir (if it exists)"""
455 """copy a cache from srcrepo to destcachedir (if it exists)"""
455 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
456 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
456 dstbranchcache = os.path.join(dstcachedir, fname)
457 dstbranchcache = os.path.join(dstcachedir, fname)
457 if os.path.exists(srcbranchcache):
458 if os.path.exists(srcbranchcache):
458 if not os.path.exists(dstcachedir):
459 if not os.path.exists(dstcachedir):
459 os.mkdir(dstcachedir)
460 os.mkdir(dstcachedir)
460 util.copyfile(srcbranchcache, dstbranchcache)
461 util.copyfile(srcbranchcache, dstbranchcache)
461
462
462 def _cachetocopy(srcrepo):
463 def _cachetocopy(srcrepo):
463 """return the list of cache file valuable to copy during a clone"""
464 """return the list of cache file valuable to copy during a clone"""
464 # In local clones we're copying all nodes, not just served
465 # In local clones we're copying all nodes, not just served
465 # ones. Therefore copy all branch caches over.
466 # ones. Therefore copy all branch caches over.
466 cachefiles = ['branch2']
467 cachefiles = ['branch2']
467 cachefiles += ['branch2-%s' % f for f in repoview.filtertable]
468 cachefiles += ['branch2-%s' % f for f in repoview.filtertable]
468 cachefiles += ['rbc-names-v1', 'rbc-revs-v1']
469 cachefiles += ['rbc-names-v1', 'rbc-revs-v1']
469 cachefiles += ['tags2']
470 cachefiles += ['tags2']
470 cachefiles += ['tags2-%s' % f for f in repoview.filtertable]
471 cachefiles += ['tags2-%s' % f for f in repoview.filtertable]
471 cachefiles += ['hgtagsfnodes1']
472 cachefiles += ['hgtagsfnodes1']
472 return cachefiles
473 return cachefiles
473
474
474 def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
475 def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
475 update=True, stream=False, branch=None, shareopts=None):
476 update=True, stream=False, branch=None, shareopts=None):
476 """Make a copy of an existing repository.
477 """Make a copy of an existing repository.
477
478
478 Create a copy of an existing repository in a new directory. The
479 Create a copy of an existing repository in a new directory. The
479 source and destination are URLs, as passed to the repository
480 source and destination are URLs, as passed to the repository
480 function. Returns a pair of repository peers, the source and
481 function. Returns a pair of repository peers, the source and
481 newly created destination.
482 newly created destination.
482
483
483 The location of the source is added to the new repository's
484 The location of the source is added to the new repository's
484 .hg/hgrc file, as the default to be used for future pulls and
485 .hg/hgrc file, as the default to be used for future pulls and
485 pushes.
486 pushes.
486
487
487 If an exception is raised, the partly cloned/updated destination
488 If an exception is raised, the partly cloned/updated destination
488 repository will be deleted.
489 repository will be deleted.
489
490
490 Arguments:
491 Arguments:
491
492
492 source: repository object or URL
493 source: repository object or URL
493
494
494 dest: URL of destination repository to create (defaults to base
495 dest: URL of destination repository to create (defaults to base
495 name of source repository)
496 name of source repository)
496
497
497 pull: always pull from source repository, even in local case or if the
498 pull: always pull from source repository, even in local case or if the
498 server prefers streaming
499 server prefers streaming
499
500
500 stream: stream raw data uncompressed from repository (fast over
501 stream: stream raw data uncompressed from repository (fast over
501 LAN, slow over WAN)
502 LAN, slow over WAN)
502
503
503 rev: revision to clone up to (implies pull=True)
504 rev: revision to clone up to (implies pull=True)
504
505
505 update: update working directory after clone completes, if
506 update: update working directory after clone completes, if
506 destination is local repository (True means update to default rev,
507 destination is local repository (True means update to default rev,
507 anything else is treated as a revision)
508 anything else is treated as a revision)
508
509
509 branch: branches to clone
510 branch: branches to clone
510
511
511 shareopts: dict of options to control auto sharing behavior. The "pool" key
512 shareopts: dict of options to control auto sharing behavior. The "pool" key
512 activates auto sharing mode and defines the directory for stores. The
513 activates auto sharing mode and defines the directory for stores. The
513 "mode" key determines how to construct the directory name of the shared
514 "mode" key determines how to construct the directory name of the shared
514 repository. "identity" means the name is derived from the node of the first
515 repository. "identity" means the name is derived from the node of the first
515 changeset in the repository. "remote" means the name is derived from the
516 changeset in the repository. "remote" means the name is derived from the
516 remote's path/URL. Defaults to "identity."
517 remote's path/URL. Defaults to "identity."
517 """
518 """
518
519
519 if isinstance(source, bytes):
520 if isinstance(source, bytes):
520 origsource = ui.expandpath(source)
521 origsource = ui.expandpath(source)
521 source, branch = parseurl(origsource, branch)
522 source, branch = parseurl(origsource, branch)
522 srcpeer = peer(ui, peeropts, source)
523 srcpeer = peer(ui, peeropts, source)
523 else:
524 else:
524 srcpeer = source.peer() # in case we were called with a localrepo
525 srcpeer = source.peer() # in case we were called with a localrepo
525 branch = (None, branch or [])
526 branch = (None, branch or [])
526 origsource = source = srcpeer.url()
527 origsource = source = srcpeer.url()
527 rev, checkout = addbranchrevs(srcpeer, srcpeer, branch, rev)
528 rev, checkout = addbranchrevs(srcpeer, srcpeer, branch, rev)
528
529
529 if dest is None:
530 if dest is None:
530 dest = defaultdest(source)
531 dest = defaultdest(source)
531 if dest:
532 if dest:
532 ui.status(_("destination directory: %s\n") % dest)
533 ui.status(_("destination directory: %s\n") % dest)
533 else:
534 else:
534 dest = ui.expandpath(dest)
535 dest = ui.expandpath(dest)
535
536
536 dest = util.urllocalpath(dest)
537 dest = util.urllocalpath(dest)
537 source = util.urllocalpath(source)
538 source = util.urllocalpath(source)
538
539
539 if not dest:
540 if not dest:
540 raise error.Abort(_("empty destination path is not valid"))
541 raise error.Abort(_("empty destination path is not valid"))
541
542
542 destvfs = vfsmod.vfs(dest, expandpath=True)
543 destvfs = vfsmod.vfs(dest, expandpath=True)
543 if destvfs.lexists():
544 if destvfs.lexists():
544 if not destvfs.isdir():
545 if not destvfs.isdir():
545 raise error.Abort(_("destination '%s' already exists") % dest)
546 raise error.Abort(_("destination '%s' already exists") % dest)
546 elif destvfs.listdir():
547 elif destvfs.listdir():
547 raise error.Abort(_("destination '%s' is not empty") % dest)
548 raise error.Abort(_("destination '%s' is not empty") % dest)
548
549
549 shareopts = shareopts or {}
550 shareopts = shareopts or {}
550 sharepool = shareopts.get('pool')
551 sharepool = shareopts.get('pool')
551 sharenamemode = shareopts.get('mode')
552 sharenamemode = shareopts.get('mode')
552 if sharepool and islocal(dest):
553 if sharepool and islocal(dest):
553 sharepath = None
554 sharepath = None
554 if sharenamemode == 'identity':
555 if sharenamemode == 'identity':
555 # Resolve the name from the initial changeset in the remote
556 # Resolve the name from the initial changeset in the remote
556 # repository. This returns nullid when the remote is empty. It
557 # repository. This returns nullid when the remote is empty. It
557 # raises RepoLookupError if revision 0 is filtered or otherwise
558 # raises RepoLookupError if revision 0 is filtered or otherwise
558 # not available. If we fail to resolve, sharing is not enabled.
559 # not available. If we fail to resolve, sharing is not enabled.
559 try:
560 try:
560 rootnode = srcpeer.lookup('0')
561 rootnode = srcpeer.lookup('0')
561 if rootnode != node.nullid:
562 if rootnode != node.nullid:
562 sharepath = os.path.join(sharepool, node.hex(rootnode))
563 sharepath = os.path.join(sharepool, node.hex(rootnode))
563 else:
564 else:
564 ui.status(_('(not using pooled storage: '
565 ui.status(_('(not using pooled storage: '
565 'remote appears to be empty)\n'))
566 'remote appears to be empty)\n'))
566 except error.RepoLookupError:
567 except error.RepoLookupError:
567 ui.status(_('(not using pooled storage: '
568 ui.status(_('(not using pooled storage: '
568 'unable to resolve identity of remote)\n'))
569 'unable to resolve identity of remote)\n'))
569 elif sharenamemode == 'remote':
570 elif sharenamemode == 'remote':
570 sharepath = os.path.join(
571 sharepath = os.path.join(
571 sharepool, hashlib.sha1(source).hexdigest())
572 sharepool, hashlib.sha1(source).hexdigest())
572 else:
573 else:
573 raise error.Abort(_('unknown share naming mode: %s') %
574 raise error.Abort(_('unknown share naming mode: %s') %
574 sharenamemode)
575 sharenamemode)
575
576
576 if sharepath:
577 if sharepath:
577 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
578 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
578 dest, pull=pull, rev=rev, update=update,
579 dest, pull=pull, rev=rev, update=update,
579 stream=stream)
580 stream=stream)
580
581
581 srclock = destlock = cleandir = None
582 srclock = destlock = cleandir = None
582 srcrepo = srcpeer.local()
583 srcrepo = srcpeer.local()
583 try:
584 try:
584 abspath = origsource
585 abspath = origsource
585 if islocal(origsource):
586 if islocal(origsource):
586 abspath = os.path.abspath(util.urllocalpath(origsource))
587 abspath = os.path.abspath(util.urllocalpath(origsource))
587
588
588 if islocal(dest):
589 if islocal(dest):
589 cleandir = dest
590 cleandir = dest
590
591
591 copy = False
592 copy = False
592 if (srcrepo and srcrepo.cancopy() and islocal(dest)
593 if (srcrepo and srcrepo.cancopy() and islocal(dest)
593 and not phases.hassecret(srcrepo)):
594 and not phases.hassecret(srcrepo)):
594 copy = not pull and not rev
595 copy = not pull and not rev
595
596
596 if copy:
597 if copy:
597 try:
598 try:
598 # we use a lock here because if we race with commit, we
599 # we use a lock here because if we race with commit, we
599 # can end up with extra data in the cloned revlogs that's
600 # can end up with extra data in the cloned revlogs that's
600 # not pointed to by changesets, thus causing verify to
601 # not pointed to by changesets, thus causing verify to
601 # fail
602 # fail
602 srclock = srcrepo.lock(wait=False)
603 srclock = srcrepo.lock(wait=False)
603 except error.LockError:
604 except error.LockError:
604 copy = False
605 copy = False
605
606
606 if copy:
607 if copy:
607 srcrepo.hook('preoutgoing', throw=True, source='clone')
608 srcrepo.hook('preoutgoing', throw=True, source='clone')
608 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
609 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
609 if not os.path.exists(dest):
610 if not os.path.exists(dest):
610 os.mkdir(dest)
611 os.mkdir(dest)
611 else:
612 else:
612 # only clean up directories we create ourselves
613 # only clean up directories we create ourselves
613 cleandir = hgdir
614 cleandir = hgdir
614 try:
615 try:
615 destpath = hgdir
616 destpath = hgdir
616 util.makedir(destpath, notindexed=True)
617 util.makedir(destpath, notindexed=True)
617 except OSError as inst:
618 except OSError as inst:
618 if inst.errno == errno.EEXIST:
619 if inst.errno == errno.EEXIST:
619 cleandir = None
620 cleandir = None
620 raise error.Abort(_("destination '%s' already exists")
621 raise error.Abort(_("destination '%s' already exists")
621 % dest)
622 % dest)
622 raise
623 raise
623
624
624 destlock = copystore(ui, srcrepo, destpath)
625 destlock = copystore(ui, srcrepo, destpath)
625 # copy bookmarks over
626 # copy bookmarks over
626 srcbookmarks = srcrepo.vfs.join('bookmarks')
627 srcbookmarks = srcrepo.vfs.join('bookmarks')
627 dstbookmarks = os.path.join(destpath, 'bookmarks')
628 dstbookmarks = os.path.join(destpath, 'bookmarks')
628 if os.path.exists(srcbookmarks):
629 if os.path.exists(srcbookmarks):
629 util.copyfile(srcbookmarks, dstbookmarks)
630 util.copyfile(srcbookmarks, dstbookmarks)
630
631
631 dstcachedir = os.path.join(destpath, 'cache')
632 dstcachedir = os.path.join(destpath, 'cache')
632 for cache in _cachetocopy(srcrepo):
633 for cache in _cachetocopy(srcrepo):
633 _copycache(srcrepo, dstcachedir, cache)
634 _copycache(srcrepo, dstcachedir, cache)
634
635
635 # we need to re-init the repo after manually copying the data
636 # we need to re-init the repo after manually copying the data
636 # into it
637 # into it
637 destpeer = peer(srcrepo, peeropts, dest)
638 destpeer = peer(srcrepo, peeropts, dest)
638 srcrepo.hook('outgoing', source='clone',
639 srcrepo.hook('outgoing', source='clone',
639 node=node.hex(node.nullid))
640 node=node.hex(node.nullid))
640 else:
641 else:
641 try:
642 try:
642 destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
643 destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
643 # only pass ui when no srcrepo
644 # only pass ui when no srcrepo
644 except OSError as inst:
645 except OSError as inst:
645 if inst.errno == errno.EEXIST:
646 if inst.errno == errno.EEXIST:
646 cleandir = None
647 cleandir = None
647 raise error.Abort(_("destination '%s' already exists")
648 raise error.Abort(_("destination '%s' already exists")
648 % dest)
649 % dest)
649 raise
650 raise
650
651
651 revs = None
652 revs = None
652 if rev:
653 if rev:
653 if not srcpeer.capable('lookup'):
654 if not srcpeer.capable('lookup'):
654 raise error.Abort(_("src repository does not support "
655 raise error.Abort(_("src repository does not support "
655 "revision lookup and so doesn't "
656 "revision lookup and so doesn't "
656 "support clone by revision"))
657 "support clone by revision"))
657 revs = [srcpeer.lookup(r) for r in rev]
658 revs = [srcpeer.lookup(r) for r in rev]
658 checkout = revs[0]
659 checkout = revs[0]
659 local = destpeer.local()
660 local = destpeer.local()
660 if local:
661 if local:
661 if not stream:
662 if not stream:
662 if pull:
663 if pull:
663 stream = False
664 stream = False
664 else:
665 else:
665 stream = None
666 stream = None
666 # internal config: ui.quietbookmarkmove
667 # internal config: ui.quietbookmarkmove
667 overrides = {('ui', 'quietbookmarkmove'): True}
668 overrides = {('ui', 'quietbookmarkmove'): True}
668 with local.ui.configoverride(overrides, 'clone'):
669 with local.ui.configoverride(overrides, 'clone'):
669 exchange.pull(local, srcpeer, revs,
670 exchange.pull(local, srcpeer, revs,
670 streamclonerequested=stream)
671 streamclonerequested=stream)
671 elif srcrepo:
672 elif srcrepo:
672 exchange.push(srcrepo, destpeer, revs=revs,
673 exchange.push(srcrepo, destpeer, revs=revs,
673 bookmarks=srcrepo._bookmarks.keys())
674 bookmarks=srcrepo._bookmarks.keys())
674 else:
675 else:
675 raise error.Abort(_("clone from remote to remote not supported")
676 raise error.Abort(_("clone from remote to remote not supported")
676 )
677 )
677
678
678 cleandir = None
679 cleandir = None
679
680
680 destrepo = destpeer.local()
681 destrepo = destpeer.local()
681 if destrepo:
682 if destrepo:
682 template = uimod.samplehgrcs['cloned']
683 template = uimod.samplehgrcs['cloned']
683 fp = destrepo.vfs("hgrc", "wb")
684 fp = destrepo.vfs("hgrc", "wb")
684 u = util.url(abspath)
685 u = util.url(abspath)
685 u.passwd = None
686 u.passwd = None
686 defaulturl = bytes(u)
687 defaulturl = bytes(u)
687 fp.write(util.tonativeeol(template % defaulturl))
688 fp.write(util.tonativeeol(template % defaulturl))
688 fp.close()
689 fp.close()
689
690
690 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
691 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
691
692
693 if ui.configbool('experimental', 'remotenames'):
694 remotenames.pullremotenames(destrepo, srcpeer)
695
692 if update:
696 if update:
693 if update is not True:
697 if update is not True:
694 checkout = srcpeer.lookup(update)
698 checkout = srcpeer.lookup(update)
695 uprev = None
699 uprev = None
696 status = None
700 status = None
697 if checkout is not None:
701 if checkout is not None:
698 try:
702 try:
699 uprev = destrepo.lookup(checkout)
703 uprev = destrepo.lookup(checkout)
700 except error.RepoLookupError:
704 except error.RepoLookupError:
701 if update is not True:
705 if update is not True:
702 try:
706 try:
703 uprev = destrepo.lookup(update)
707 uprev = destrepo.lookup(update)
704 except error.RepoLookupError:
708 except error.RepoLookupError:
705 pass
709 pass
706 if uprev is None:
710 if uprev is None:
707 try:
711 try:
708 uprev = destrepo._bookmarks['@']
712 uprev = destrepo._bookmarks['@']
709 update = '@'
713 update = '@'
710 bn = destrepo[uprev].branch()
714 bn = destrepo[uprev].branch()
711 if bn == 'default':
715 if bn == 'default':
712 status = _("updating to bookmark @\n")
716 status = _("updating to bookmark @\n")
713 else:
717 else:
714 status = (_("updating to bookmark @ on branch %s\n")
718 status = (_("updating to bookmark @ on branch %s\n")
715 % bn)
719 % bn)
716 except KeyError:
720 except KeyError:
717 try:
721 try:
718 uprev = destrepo.branchtip('default')
722 uprev = destrepo.branchtip('default')
719 except error.RepoLookupError:
723 except error.RepoLookupError:
720 uprev = destrepo.lookup('tip')
724 uprev = destrepo.lookup('tip')
721 if not status:
725 if not status:
722 bn = destrepo[uprev].branch()
726 bn = destrepo[uprev].branch()
723 status = _("updating to branch %s\n") % bn
727 status = _("updating to branch %s\n") % bn
724 destrepo.ui.status(status)
728 destrepo.ui.status(status)
725 _update(destrepo, uprev)
729 _update(destrepo, uprev)
726 if update in destrepo._bookmarks:
730 if update in destrepo._bookmarks:
727 bookmarks.activate(destrepo, update)
731 bookmarks.activate(destrepo, update)
728 finally:
732 finally:
729 release(srclock, destlock)
733 release(srclock, destlock)
730 if cleandir is not None:
734 if cleandir is not None:
731 shutil.rmtree(cleandir, True)
735 shutil.rmtree(cleandir, True)
732 if srcpeer is not None:
736 if srcpeer is not None:
733 srcpeer.close()
737 srcpeer.close()
734 return srcpeer, destpeer
738 return srcpeer, destpeer
735
739
736 def _showstats(repo, stats, quietempty=False):
740 def _showstats(repo, stats, quietempty=False):
737 if quietempty and not any(stats):
741 if quietempty and not any(stats):
738 return
742 return
739 repo.ui.status(_("%d files updated, %d files merged, "
743 repo.ui.status(_("%d files updated, %d files merged, "
740 "%d files removed, %d files unresolved\n") % stats)
744 "%d files removed, %d files unresolved\n") % stats)
741
745
742 def updaterepo(repo, node, overwrite, updatecheck=None):
746 def updaterepo(repo, node, overwrite, updatecheck=None):
743 """Update the working directory to node.
747 """Update the working directory to node.
744
748
745 When overwrite is set, changes are clobbered, merged else
749 When overwrite is set, changes are clobbered, merged else
746
750
747 returns stats (see pydoc mercurial.merge.applyupdates)"""
751 returns stats (see pydoc mercurial.merge.applyupdates)"""
748 return mergemod.update(repo, node, False, overwrite,
752 return mergemod.update(repo, node, False, overwrite,
749 labels=['working copy', 'destination'],
753 labels=['working copy', 'destination'],
750 updatecheck=updatecheck)
754 updatecheck=updatecheck)
751
755
752 def update(repo, node, quietempty=False, updatecheck=None):
756 def update(repo, node, quietempty=False, updatecheck=None):
753 """update the working directory to node"""
757 """update the working directory to node"""
754 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
758 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
755 _showstats(repo, stats, quietempty)
759 _showstats(repo, stats, quietempty)
756 if stats[3]:
760 if stats[3]:
757 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
761 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
758 return stats[3] > 0
762 return stats[3] > 0
759
763
760 # naming conflict in clone()
764 # naming conflict in clone()
761 _update = update
765 _update = update
762
766
763 def clean(repo, node, show_stats=True, quietempty=False):
767 def clean(repo, node, show_stats=True, quietempty=False):
764 """forcibly switch the working directory to node, clobbering changes"""
768 """forcibly switch the working directory to node, clobbering changes"""
765 stats = updaterepo(repo, node, True)
769 stats = updaterepo(repo, node, True)
766 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
770 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
767 if show_stats:
771 if show_stats:
768 _showstats(repo, stats, quietempty)
772 _showstats(repo, stats, quietempty)
769 return stats[3] > 0
773 return stats[3] > 0
770
774
771 # naming conflict in updatetotally()
775 # naming conflict in updatetotally()
772 _clean = clean
776 _clean = clean
773
777
774 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
778 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
775 """Update the working directory with extra care for non-file components
779 """Update the working directory with extra care for non-file components
776
780
777 This takes care of non-file components below:
781 This takes care of non-file components below:
778
782
779 :bookmark: might be advanced or (in)activated
783 :bookmark: might be advanced or (in)activated
780
784
781 This takes arguments below:
785 This takes arguments below:
782
786
783 :checkout: to which revision the working directory is updated
787 :checkout: to which revision the working directory is updated
784 :brev: a name, which might be a bookmark to be activated after updating
788 :brev: a name, which might be a bookmark to be activated after updating
785 :clean: whether changes in the working directory can be discarded
789 :clean: whether changes in the working directory can be discarded
786 :updatecheck: how to deal with a dirty working directory
790 :updatecheck: how to deal with a dirty working directory
787
791
788 Valid values for updatecheck are (None => linear):
792 Valid values for updatecheck are (None => linear):
789
793
790 * abort: abort if the working directory is dirty
794 * abort: abort if the working directory is dirty
791 * none: don't check (merge working directory changes into destination)
795 * none: don't check (merge working directory changes into destination)
792 * linear: check that update is linear before merging working directory
796 * linear: check that update is linear before merging working directory
793 changes into destination
797 changes into destination
794 * noconflict: check that the update does not result in file merges
798 * noconflict: check that the update does not result in file merges
795
799
796 This returns whether conflict is detected at updating or not.
800 This returns whether conflict is detected at updating or not.
797 """
801 """
798 if updatecheck is None:
802 if updatecheck is None:
799 updatecheck = ui.config('commands', 'update.check')
803 updatecheck = ui.config('commands', 'update.check')
800 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
804 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
801 # If not configured, or invalid value configured
805 # If not configured, or invalid value configured
802 updatecheck = 'linear'
806 updatecheck = 'linear'
803 with repo.wlock():
807 with repo.wlock():
804 movemarkfrom = None
808 movemarkfrom = None
805 warndest = False
809 warndest = False
806 if checkout is None:
810 if checkout is None:
807 updata = destutil.destupdate(repo, clean=clean)
811 updata = destutil.destupdate(repo, clean=clean)
808 checkout, movemarkfrom, brev = updata
812 checkout, movemarkfrom, brev = updata
809 warndest = True
813 warndest = True
810
814
811 if clean:
815 if clean:
812 ret = _clean(repo, checkout)
816 ret = _clean(repo, checkout)
813 else:
817 else:
814 if updatecheck == 'abort':
818 if updatecheck == 'abort':
815 cmdutil.bailifchanged(repo, merge=False)
819 cmdutil.bailifchanged(repo, merge=False)
816 updatecheck = 'none'
820 updatecheck = 'none'
817 ret = _update(repo, checkout, updatecheck=updatecheck)
821 ret = _update(repo, checkout, updatecheck=updatecheck)
818
822
819 if not ret and movemarkfrom:
823 if not ret and movemarkfrom:
820 if movemarkfrom == repo['.'].node():
824 if movemarkfrom == repo['.'].node():
821 pass # no-op update
825 pass # no-op update
822 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
826 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
823 b = ui.label(repo._activebookmark, 'bookmarks.active')
827 b = ui.label(repo._activebookmark, 'bookmarks.active')
824 ui.status(_("updating bookmark %s\n") % b)
828 ui.status(_("updating bookmark %s\n") % b)
825 else:
829 else:
826 # this can happen with a non-linear update
830 # this can happen with a non-linear update
827 b = ui.label(repo._activebookmark, 'bookmarks')
831 b = ui.label(repo._activebookmark, 'bookmarks')
828 ui.status(_("(leaving bookmark %s)\n") % b)
832 ui.status(_("(leaving bookmark %s)\n") % b)
829 bookmarks.deactivate(repo)
833 bookmarks.deactivate(repo)
830 elif brev in repo._bookmarks:
834 elif brev in repo._bookmarks:
831 if brev != repo._activebookmark:
835 if brev != repo._activebookmark:
832 b = ui.label(brev, 'bookmarks.active')
836 b = ui.label(brev, 'bookmarks.active')
833 ui.status(_("(activating bookmark %s)\n") % b)
837 ui.status(_("(activating bookmark %s)\n") % b)
834 bookmarks.activate(repo, brev)
838 bookmarks.activate(repo, brev)
835 elif brev:
839 elif brev:
836 if repo._activebookmark:
840 if repo._activebookmark:
837 b = ui.label(repo._activebookmark, 'bookmarks')
841 b = ui.label(repo._activebookmark, 'bookmarks')
838 ui.status(_("(leaving bookmark %s)\n") % b)
842 ui.status(_("(leaving bookmark %s)\n") % b)
839 bookmarks.deactivate(repo)
843 bookmarks.deactivate(repo)
840
844
841 if warndest:
845 if warndest:
842 destutil.statusotherdests(ui, repo)
846 destutil.statusotherdests(ui, repo)
843
847
844 return ret
848 return ret
845
849
846 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None):
850 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None):
847 """Branch merge with node, resolving changes. Return true if any
851 """Branch merge with node, resolving changes. Return true if any
848 unresolved conflicts."""
852 unresolved conflicts."""
849 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce,
853 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce,
850 labels=labels)
854 labels=labels)
851 _showstats(repo, stats)
855 _showstats(repo, stats)
852 if stats[3]:
856 if stats[3]:
853 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
857 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
854 "or 'hg update -C .' to abandon\n"))
858 "or 'hg update -C .' to abandon\n"))
855 elif remind:
859 elif remind:
856 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
860 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
857 return stats[3] > 0
861 return stats[3] > 0
858
862
859 def _incoming(displaychlist, subreporecurse, ui, repo, source,
863 def _incoming(displaychlist, subreporecurse, ui, repo, source,
860 opts, buffered=False):
864 opts, buffered=False):
861 """
865 """
862 Helper for incoming / gincoming.
866 Helper for incoming / gincoming.
863 displaychlist gets called with
867 displaychlist gets called with
864 (remoterepo, incomingchangesetlist, displayer) parameters,
868 (remoterepo, incomingchangesetlist, displayer) parameters,
865 and is supposed to contain only code that can't be unified.
869 and is supposed to contain only code that can't be unified.
866 """
870 """
867 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
871 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
868 other = peer(repo, opts, source)
872 other = peer(repo, opts, source)
869 ui.status(_('comparing with %s\n') % util.hidepassword(source))
873 ui.status(_('comparing with %s\n') % util.hidepassword(source))
870 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
874 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
871
875
872 if revs:
876 if revs:
873 revs = [other.lookup(rev) for rev in revs]
877 revs = [other.lookup(rev) for rev in revs]
874 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
878 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
875 revs, opts["bundle"], opts["force"])
879 revs, opts["bundle"], opts["force"])
876 try:
880 try:
877 if not chlist:
881 if not chlist:
878 ui.status(_("no changes found\n"))
882 ui.status(_("no changes found\n"))
879 return subreporecurse()
883 return subreporecurse()
880 ui.pager('incoming')
884 ui.pager('incoming')
881 displayer = cmdutil.show_changeset(ui, other, opts, buffered)
885 displayer = cmdutil.show_changeset(ui, other, opts, buffered)
882 displaychlist(other, chlist, displayer)
886 displaychlist(other, chlist, displayer)
883 displayer.close()
887 displayer.close()
884 finally:
888 finally:
885 cleanupfn()
889 cleanupfn()
886 subreporecurse()
890 subreporecurse()
887 return 0 # exit code is zero since we found incoming changes
891 return 0 # exit code is zero since we found incoming changes
888
892
889 def incoming(ui, repo, source, opts):
893 def incoming(ui, repo, source, opts):
890 def subreporecurse():
894 def subreporecurse():
891 ret = 1
895 ret = 1
892 if opts.get('subrepos'):
896 if opts.get('subrepos'):
893 ctx = repo[None]
897 ctx = repo[None]
894 for subpath in sorted(ctx.substate):
898 for subpath in sorted(ctx.substate):
895 sub = ctx.sub(subpath)
899 sub = ctx.sub(subpath)
896 ret = min(ret, sub.incoming(ui, source, opts))
900 ret = min(ret, sub.incoming(ui, source, opts))
897 return ret
901 return ret
898
902
899 def display(other, chlist, displayer):
903 def display(other, chlist, displayer):
900 limit = cmdutil.loglimit(opts)
904 limit = cmdutil.loglimit(opts)
901 if opts.get('newest_first'):
905 if opts.get('newest_first'):
902 chlist.reverse()
906 chlist.reverse()
903 count = 0
907 count = 0
904 for n in chlist:
908 for n in chlist:
905 if limit is not None and count >= limit:
909 if limit is not None and count >= limit:
906 break
910 break
907 parents = [p for p in other.changelog.parents(n) if p != nullid]
911 parents = [p for p in other.changelog.parents(n) if p != nullid]
908 if opts.get('no_merges') and len(parents) == 2:
912 if opts.get('no_merges') and len(parents) == 2:
909 continue
913 continue
910 count += 1
914 count += 1
911 displayer.show(other[n])
915 displayer.show(other[n])
912 return _incoming(display, subreporecurse, ui, repo, source, opts)
916 return _incoming(display, subreporecurse, ui, repo, source, opts)
913
917
914 def _outgoing(ui, repo, dest, opts):
918 def _outgoing(ui, repo, dest, opts):
915 dest = ui.expandpath(dest or 'default-push', dest or 'default')
919 dest = ui.expandpath(dest or 'default-push', dest or 'default')
916 dest, branches = parseurl(dest, opts.get('branch'))
920 dest, branches = parseurl(dest, opts.get('branch'))
917 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
921 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
918 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
922 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
919 if revs:
923 if revs:
920 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
924 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
921
925
922 other = peer(repo, opts, dest)
926 other = peer(repo, opts, dest)
923 outgoing = discovery.findcommonoutgoing(repo, other, revs,
927 outgoing = discovery.findcommonoutgoing(repo, other, revs,
924 force=opts.get('force'))
928 force=opts.get('force'))
925 o = outgoing.missing
929 o = outgoing.missing
926 if not o:
930 if not o:
927 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
931 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
928 return o, other
932 return o, other
929
933
930 def outgoing(ui, repo, dest, opts):
934 def outgoing(ui, repo, dest, opts):
931 def recurse():
935 def recurse():
932 ret = 1
936 ret = 1
933 if opts.get('subrepos'):
937 if opts.get('subrepos'):
934 ctx = repo[None]
938 ctx = repo[None]
935 for subpath in sorted(ctx.substate):
939 for subpath in sorted(ctx.substate):
936 sub = ctx.sub(subpath)
940 sub = ctx.sub(subpath)
937 ret = min(ret, sub.outgoing(ui, dest, opts))
941 ret = min(ret, sub.outgoing(ui, dest, opts))
938 return ret
942 return ret
939
943
940 limit = cmdutil.loglimit(opts)
944 limit = cmdutil.loglimit(opts)
941 o, other = _outgoing(ui, repo, dest, opts)
945 o, other = _outgoing(ui, repo, dest, opts)
942 if not o:
946 if not o:
943 cmdutil.outgoinghooks(ui, repo, other, opts, o)
947 cmdutil.outgoinghooks(ui, repo, other, opts, o)
944 return recurse()
948 return recurse()
945
949
946 if opts.get('newest_first'):
950 if opts.get('newest_first'):
947 o.reverse()
951 o.reverse()
948 ui.pager('outgoing')
952 ui.pager('outgoing')
949 displayer = cmdutil.show_changeset(ui, repo, opts)
953 displayer = cmdutil.show_changeset(ui, repo, opts)
950 count = 0
954 count = 0
951 for n in o:
955 for n in o:
952 if limit is not None and count >= limit:
956 if limit is not None and count >= limit:
953 break
957 break
954 parents = [p for p in repo.changelog.parents(n) if p != nullid]
958 parents = [p for p in repo.changelog.parents(n) if p != nullid]
955 if opts.get('no_merges') and len(parents) == 2:
959 if opts.get('no_merges') and len(parents) == 2:
956 continue
960 continue
957 count += 1
961 count += 1
958 displayer.show(repo[n])
962 displayer.show(repo[n])
959 displayer.close()
963 displayer.close()
960 cmdutil.outgoinghooks(ui, repo, other, opts, o)
964 cmdutil.outgoinghooks(ui, repo, other, opts, o)
961 recurse()
965 recurse()
962 return 0 # exit code is zero since we found outgoing changes
966 return 0 # exit code is zero since we found outgoing changes
963
967
964 def verify(repo):
968 def verify(repo):
965 """verify the consistency of a repository"""
969 """verify the consistency of a repository"""
966 ret = verifymod.verify(repo)
970 ret = verifymod.verify(repo)
967
971
968 # Broken subrepo references in hidden csets don't seem worth worrying about,
972 # Broken subrepo references in hidden csets don't seem worth worrying about,
969 # since they can't be pushed/pulled, and --hidden can be used if they are a
973 # since they can't be pushed/pulled, and --hidden can be used if they are a
970 # concern.
974 # concern.
971
975
972 # pathto() is needed for -R case
976 # pathto() is needed for -R case
973 revs = repo.revs("filelog(%s)",
977 revs = repo.revs("filelog(%s)",
974 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
978 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
975
979
976 if revs:
980 if revs:
977 repo.ui.status(_('checking subrepo links\n'))
981 repo.ui.status(_('checking subrepo links\n'))
978 for rev in revs:
982 for rev in revs:
979 ctx = repo[rev]
983 ctx = repo[rev]
980 try:
984 try:
981 for subpath in ctx.substate:
985 for subpath in ctx.substate:
982 try:
986 try:
983 ret = (ctx.sub(subpath, allowcreate=False).verify()
987 ret = (ctx.sub(subpath, allowcreate=False).verify()
984 or ret)
988 or ret)
985 except error.RepoError as e:
989 except error.RepoError as e:
986 repo.ui.warn(('%s: %s\n') % (rev, e))
990 repo.ui.warn(('%s: %s\n') % (rev, e))
987 except Exception:
991 except Exception:
988 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
992 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
989 node.short(ctx.node()))
993 node.short(ctx.node()))
990
994
991 return ret
995 return ret
992
996
993 def remoteui(src, opts):
997 def remoteui(src, opts):
994 'build a remote ui from ui or repo and opts'
998 'build a remote ui from ui or repo and opts'
995 if util.safehasattr(src, 'baseui'): # looks like a repository
999 if util.safehasattr(src, 'baseui'): # looks like a repository
996 dst = src.baseui.copy() # drop repo-specific config
1000 dst = src.baseui.copy() # drop repo-specific config
997 src = src.ui # copy target options from repo
1001 src = src.ui # copy target options from repo
998 else: # assume it's a global ui object
1002 else: # assume it's a global ui object
999 dst = src.copy() # keep all global options
1003 dst = src.copy() # keep all global options
1000
1004
1001 # copy ssh-specific options
1005 # copy ssh-specific options
1002 for o in 'ssh', 'remotecmd':
1006 for o in 'ssh', 'remotecmd':
1003 v = opts.get(o) or src.config('ui', o)
1007 v = opts.get(o) or src.config('ui', o)
1004 if v:
1008 if v:
1005 dst.setconfig("ui", o, v, 'copied')
1009 dst.setconfig("ui", o, v, 'copied')
1006
1010
1007 # copy bundle-specific options
1011 # copy bundle-specific options
1008 r = src.config('bundle', 'mainreporoot')
1012 r = src.config('bundle', 'mainreporoot')
1009 if r:
1013 if r:
1010 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
1014 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
1011
1015
1012 # copy selected local settings to the remote ui
1016 # copy selected local settings to the remote ui
1013 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
1017 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
1014 for key, val in src.configitems(sect):
1018 for key, val in src.configitems(sect):
1015 dst.setconfig(sect, key, val, 'copied')
1019 dst.setconfig(sect, key, val, 'copied')
1016 v = src.config('web', 'cacerts')
1020 v = src.config('web', 'cacerts')
1017 if v:
1021 if v:
1018 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
1022 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
1019
1023
1020 return dst
1024 return dst
1021
1025
1022 # Files of interest
1026 # Files of interest
1023 # Used to check if the repository has changed looking at mtime and size of
1027 # Used to check if the repository has changed looking at mtime and size of
1024 # these files.
1028 # these files.
1025 foi = [('spath', '00changelog.i'),
1029 foi = [('spath', '00changelog.i'),
1026 ('spath', 'phaseroots'), # ! phase can change content at the same size
1030 ('spath', 'phaseroots'), # ! phase can change content at the same size
1027 ('spath', 'obsstore'),
1031 ('spath', 'obsstore'),
1028 ('path', 'bookmarks'), # ! bookmark can change content at the same size
1032 ('path', 'bookmarks'), # ! bookmark can change content at the same size
1029 ]
1033 ]
1030
1034
1031 class cachedlocalrepo(object):
1035 class cachedlocalrepo(object):
1032 """Holds a localrepository that can be cached and reused."""
1036 """Holds a localrepository that can be cached and reused."""
1033
1037
1034 def __init__(self, repo):
1038 def __init__(self, repo):
1035 """Create a new cached repo from an existing repo.
1039 """Create a new cached repo from an existing repo.
1036
1040
1037 We assume the passed in repo was recently created. If the
1041 We assume the passed in repo was recently created. If the
1038 repo has changed between when it was created and when it was
1042 repo has changed between when it was created and when it was
1039 turned into a cache, it may not refresh properly.
1043 turned into a cache, it may not refresh properly.
1040 """
1044 """
1041 assert isinstance(repo, localrepo.localrepository)
1045 assert isinstance(repo, localrepo.localrepository)
1042 self._repo = repo
1046 self._repo = repo
1043 self._state, self.mtime = self._repostate()
1047 self._state, self.mtime = self._repostate()
1044 self._filtername = repo.filtername
1048 self._filtername = repo.filtername
1045
1049
1046 def fetch(self):
1050 def fetch(self):
1047 """Refresh (if necessary) and return a repository.
1051 """Refresh (if necessary) and return a repository.
1048
1052
1049 If the cached instance is out of date, it will be recreated
1053 If the cached instance is out of date, it will be recreated
1050 automatically and returned.
1054 automatically and returned.
1051
1055
1052 Returns a tuple of the repo and a boolean indicating whether a new
1056 Returns a tuple of the repo and a boolean indicating whether a new
1053 repo instance was created.
1057 repo instance was created.
1054 """
1058 """
1055 # We compare the mtimes and sizes of some well-known files to
1059 # We compare the mtimes and sizes of some well-known files to
1056 # determine if the repo changed. This is not precise, as mtimes
1060 # determine if the repo changed. This is not precise, as mtimes
1057 # are susceptible to clock skew and imprecise filesystems and
1061 # are susceptible to clock skew and imprecise filesystems and
1058 # file content can change while maintaining the same size.
1062 # file content can change while maintaining the same size.
1059
1063
1060 state, mtime = self._repostate()
1064 state, mtime = self._repostate()
1061 if state == self._state:
1065 if state == self._state:
1062 return self._repo, False
1066 return self._repo, False
1063
1067
1064 repo = repository(self._repo.baseui, self._repo.url())
1068 repo = repository(self._repo.baseui, self._repo.url())
1065 if self._filtername:
1069 if self._filtername:
1066 self._repo = repo.filtered(self._filtername)
1070 self._repo = repo.filtered(self._filtername)
1067 else:
1071 else:
1068 self._repo = repo.unfiltered()
1072 self._repo = repo.unfiltered()
1069 self._state = state
1073 self._state = state
1070 self.mtime = mtime
1074 self.mtime = mtime
1071
1075
1072 return self._repo, True
1076 return self._repo, True
1073
1077
1074 def _repostate(self):
1078 def _repostate(self):
1075 state = []
1079 state = []
1076 maxmtime = -1
1080 maxmtime = -1
1077 for attr, fname in foi:
1081 for attr, fname in foi:
1078 prefix = getattr(self._repo, attr)
1082 prefix = getattr(self._repo, attr)
1079 p = os.path.join(prefix, fname)
1083 p = os.path.join(prefix, fname)
1080 try:
1084 try:
1081 st = os.stat(p)
1085 st = os.stat(p)
1082 except OSError:
1086 except OSError:
1083 st = os.stat(prefix)
1087 st = os.stat(prefix)
1084 state.append((st.st_mtime, st.st_size))
1088 state.append((st.st_mtime, st.st_size))
1085 maxmtime = max(maxmtime, st.st_mtime)
1089 maxmtime = max(maxmtime, st.st_mtime)
1086
1090
1087 return tuple(state), maxmtime
1091 return tuple(state), maxmtime
1088
1092
1089 def copy(self):
1093 def copy(self):
1090 """Obtain a copy of this class instance.
1094 """Obtain a copy of this class instance.
1091
1095
1092 A new localrepository instance is obtained. The new instance should be
1096 A new localrepository instance is obtained. The new instance should be
1093 completely independent of the original.
1097 completely independent of the original.
1094 """
1098 """
1095 repo = repository(self._repo.baseui, self._repo.origroot)
1099 repo = repository(self._repo.baseui, self._repo.origroot)
1096 if self._filtername:
1100 if self._filtername:
1097 repo = repo.filtered(self._filtername)
1101 repo = repo.filtered(self._filtername)
1098 else:
1102 else:
1099 repo = repo.unfiltered()
1103 repo = repo.unfiltered()
1100 c = cachedlocalrepo(repo)
1104 c = cachedlocalrepo(repo)
1101 c._state = self._state
1105 c._state = self._state
1102 c.mtime = self.mtime
1106 c.mtime = self.mtime
1103 return c
1107 return c
@@ -1,119 +1,108
1 Testing the functionality to pull remotenames
1 Testing the functionality to pull remotenames
2 =============================================
2 =============================================
3
3
4 $ cat >> $HGRCPATH << EOF
4 $ cat >> $HGRCPATH << EOF
5 > [alias]
5 > [alias]
6 > glog = log -G -T '{rev}:{node|short} {desc}'
6 > glog = log -G -T '{rev}:{node|short} {desc}'
7 > [experimental]
8 > remotenames = True
7 > EOF
9 > EOF
8
10
9 Making a server repo
11 Making a server repo
10 --------------------
12 --------------------
11
13
12 $ hg init server
14 $ hg init server
13 $ cd server
15 $ cd server
14 $ for ch in a b c d e f g h; do
16 $ for ch in a b c d e f g h; do
15 > echo "foo" >> $ch
17 > echo "foo" >> $ch
16 > hg ci -Aqm "Added "$ch
18 > hg ci -Aqm "Added "$ch
17 > done
19 > done
18 $ hg glog
20 $ hg glog
19 @ 7:ec2426147f0e Added h
21 @ 7:ec2426147f0e Added h
20 |
22 |
21 o 6:87d6d6676308 Added g
23 o 6:87d6d6676308 Added g
22 |
24 |
23 o 5:825660c69f0c Added f
25 o 5:825660c69f0c Added f
24 |
26 |
25 o 4:aa98ab95a928 Added e
27 o 4:aa98ab95a928 Added e
26 |
28 |
27 o 3:62615734edd5 Added d
29 o 3:62615734edd5 Added d
28 |
30 |
29 o 2:28ad74487de9 Added c
31 o 2:28ad74487de9 Added c
30 |
32 |
31 o 1:29becc82797a Added b
33 o 1:29becc82797a Added b
32 |
34 |
33 o 0:18d04c59bb5d Added a
35 o 0:18d04c59bb5d Added a
34
36
35 $ hg bookmark -r 3 foo
37 $ hg bookmark -r 3 foo
36 $ hg bookmark -r 6 bar
38 $ hg bookmark -r 6 bar
37 $ hg up 4
39 $ hg up 4
38 0 files updated, 0 files merged, 3 files removed, 0 files unresolved
40 0 files updated, 0 files merged, 3 files removed, 0 files unresolved
39 $ hg branch wat
41 $ hg branch wat
40 marked working directory as branch wat
42 marked working directory as branch wat
41 (branches are permanent and global, did you want a bookmark?)
43 (branches are permanent and global, did you want a bookmark?)
42 $ echo foo >> bar
44 $ echo foo >> bar
43 $ hg ci -Aqm "added bar"
45 $ hg ci -Aqm "added bar"
44
46
45 Making a client repo
47 Making a client repo
46 --------------------
48 --------------------
47
49
48 $ cd ..
50 $ cd ..
49 $ hg init client
50 $ cd client
51 $ cat >> .hg/hgrc << EOF
52 > [experimental]
53 > remotenames = True
54 > EOF
55
51
56 $ hg pull ../server/
52 $ hg clone server client
57 pulling from ../server/
53 updating to branch default
58 requesting all changes
54 8 files updated, 0 files merged, 0 files removed, 0 files unresolved
59 adding changesets
60 adding manifests
61 adding file changes
62 added 9 changesets with 9 changes to 9 files (+1 heads)
63 adding remote bookmark bar
64 adding remote bookmark foo
65 new changesets 18d04c59bb5d:3e1487808078
66 (run 'hg heads' to see heads)
67
55
56 $ cd client
68 $ cat .hg/remotenames/bookmarks
57 $ cat .hg/remotenames/bookmarks
69 0
58 0
70
59
71 87d6d66763085b629e6d7ed56778c79827273022\x00file:$TESTTMP/server\x00bar (esc)
60 87d6d66763085b629e6d7ed56778c79827273022\x00file:$TESTTMP/server\x00bar (esc)
72 62615734edd52f06b6fb9c2beb429e4fe30d57b8\x00file:$TESTTMP/server\x00foo (esc)
61 62615734edd52f06b6fb9c2beb429e4fe30d57b8\x00file:$TESTTMP/server\x00foo (esc)
73
62
74 $ cat .hg/remotenames/branches
63 $ cat .hg/remotenames/branches
75 0
64 0
76
65
77 ec2426147f0e39dbc9cef599b066be6035ce691d\x00file:$TESTTMP/server\x00default (esc)
66 ec2426147f0e39dbc9cef599b066be6035ce691d\x00file:$TESTTMP/server\x00default (esc)
78 3e1487808078543b0af6d10dadf5d46943578db0\x00file:$TESTTMP/server\x00wat (esc)
67 3e1487808078543b0af6d10dadf5d46943578db0\x00file:$TESTTMP/server\x00wat (esc)
79
68
80 Making a new server
69 Making a new server
81 -------------------
70 -------------------
82
71
83 $ cd ..
72 $ cd ..
84 $ hg init server2
73 $ hg init server2
85 $ cd server2
74 $ cd server2
86 $ hg pull ../server/
75 $ hg pull ../server/
87 pulling from ../server/
76 pulling from ../server/
88 requesting all changes
77 requesting all changes
89 adding changesets
78 adding changesets
90 adding manifests
79 adding manifests
91 adding file changes
80 adding file changes
92 added 9 changesets with 9 changes to 9 files (+1 heads)
81 added 9 changesets with 9 changes to 9 files (+1 heads)
93 adding remote bookmark bar
82 adding remote bookmark bar
94 adding remote bookmark foo
83 adding remote bookmark foo
95 new changesets 18d04c59bb5d:3e1487808078
84 new changesets 18d04c59bb5d:3e1487808078
96 (run 'hg heads' to see heads)
85 (run 'hg heads' to see heads)
97
86
98 Pulling form the new server
87 Pulling form the new server
99 ---------------------------
88 ---------------------------
100 $ cd ../client/
89 $ cd ../client/
101 $ hg pull ../server2/
90 $ hg pull ../server2/
102 pulling from ../server2/
91 pulling from ../server2/
103 searching for changes
92 searching for changes
104 no changes found
93 no changes found
105 $ cat .hg/remotenames/bookmarks
94 $ cat .hg/remotenames/bookmarks
106 0
95 0
107
96
108 62615734edd52f06b6fb9c2beb429e4fe30d57b8\x00file:$TESTTMP/server\x00foo (esc)
97 62615734edd52f06b6fb9c2beb429e4fe30d57b8\x00file:$TESTTMP/server\x00foo (esc)
109 87d6d66763085b629e6d7ed56778c79827273022\x00file:$TESTTMP/server\x00bar (esc)
98 87d6d66763085b629e6d7ed56778c79827273022\x00file:$TESTTMP/server\x00bar (esc)
110 87d6d66763085b629e6d7ed56778c79827273022\x00file:$TESTTMP/server2\x00bar (esc)
99 87d6d66763085b629e6d7ed56778c79827273022\x00file:$TESTTMP/server2\x00bar (esc)
111 62615734edd52f06b6fb9c2beb429e4fe30d57b8\x00file:$TESTTMP/server2\x00foo (esc)
100 62615734edd52f06b6fb9c2beb429e4fe30d57b8\x00file:$TESTTMP/server2\x00foo (esc)
112
101
113 $ cat .hg/remotenames/branches
102 $ cat .hg/remotenames/branches
114 0
103 0
115
104
116 3e1487808078543b0af6d10dadf5d46943578db0\x00file:$TESTTMP/server\x00wat (esc)
105 3e1487808078543b0af6d10dadf5d46943578db0\x00file:$TESTTMP/server\x00wat (esc)
117 ec2426147f0e39dbc9cef599b066be6035ce691d\x00file:$TESTTMP/server\x00default (esc)
106 ec2426147f0e39dbc9cef599b066be6035ce691d\x00file:$TESTTMP/server\x00default (esc)
118 ec2426147f0e39dbc9cef599b066be6035ce691d\x00file:$TESTTMP/server2\x00default (esc)
107 ec2426147f0e39dbc9cef599b066be6035ce691d\x00file:$TESTTMP/server2\x00default (esc)
119 3e1487808078543b0af6d10dadf5d46943578db0\x00file:$TESTTMP/server2\x00wat (esc)
108 3e1487808078543b0af6d10dadf5d46943578db0\x00file:$TESTTMP/server2\x00wat (esc)
General Comments 0
You need to be logged in to leave comments. Login now