##// END OF EJS Templates
hg: ensure the progress bar is completed when copying the store...
Matt Harbison -
r39425:ddfd8002 default
parent child Browse files
Show More
@@ -1,1177 +1,1176 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import functools
12 import functools
13 import hashlib
13 import hashlib
14 import os
14 import os
15 import shutil
15 import shutil
16 import stat
16 import stat
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 nullid,
20 nullid,
21 )
21 )
22
22
23 from . import (
23 from . import (
24 bookmarks,
24 bookmarks,
25 bundlerepo,
25 bundlerepo,
26 cacheutil,
26 cacheutil,
27 cmdutil,
27 cmdutil,
28 destutil,
28 destutil,
29 discovery,
29 discovery,
30 error,
30 error,
31 exchange,
31 exchange,
32 extensions,
32 extensions,
33 httppeer,
33 httppeer,
34 localrepo,
34 localrepo,
35 lock,
35 lock,
36 logcmdutil,
36 logcmdutil,
37 logexchange,
37 logexchange,
38 merge as mergemod,
38 merge as mergemod,
39 node,
39 node,
40 phases,
40 phases,
41 scmutil,
41 scmutil,
42 sshpeer,
42 sshpeer,
43 statichttprepo,
43 statichttprepo,
44 ui as uimod,
44 ui as uimod,
45 unionrepo,
45 unionrepo,
46 url,
46 url,
47 util,
47 util,
48 verify as verifymod,
48 verify as verifymod,
49 vfs as vfsmod,
49 vfs as vfsmod,
50 )
50 )
51
51
52 from .utils import (
52 from .utils import (
53 stringutil,
53 stringutil,
54 )
54 )
55
55
56 release = lock.release
56 release = lock.release
57
57
58 # shared features
58 # shared features
59 sharedbookmarks = 'bookmarks'
59 sharedbookmarks = 'bookmarks'
60
60
61 def _local(path):
61 def _local(path):
62 path = util.expandpath(util.urllocalpath(path))
62 path = util.expandpath(util.urllocalpath(path))
63 return (os.path.isfile(path) and bundlerepo or localrepo)
63 return (os.path.isfile(path) and bundlerepo or localrepo)
64
64
65 def addbranchrevs(lrepo, other, branches, revs):
65 def addbranchrevs(lrepo, other, branches, revs):
66 peer = other.peer() # a courtesy to callers using a localrepo for other
66 peer = other.peer() # a courtesy to callers using a localrepo for other
67 hashbranch, branches = branches
67 hashbranch, branches = branches
68 if not hashbranch and not branches:
68 if not hashbranch and not branches:
69 x = revs or None
69 x = revs or None
70 if revs:
70 if revs:
71 y = revs[0]
71 y = revs[0]
72 else:
72 else:
73 y = None
73 y = None
74 return x, y
74 return x, y
75 if revs:
75 if revs:
76 revs = list(revs)
76 revs = list(revs)
77 else:
77 else:
78 revs = []
78 revs = []
79
79
80 if not peer.capable('branchmap'):
80 if not peer.capable('branchmap'):
81 if branches:
81 if branches:
82 raise error.Abort(_("remote branch lookup not supported"))
82 raise error.Abort(_("remote branch lookup not supported"))
83 revs.append(hashbranch)
83 revs.append(hashbranch)
84 return revs, revs[0]
84 return revs, revs[0]
85
85
86 with peer.commandexecutor() as e:
86 with peer.commandexecutor() as e:
87 branchmap = e.callcommand('branchmap', {}).result()
87 branchmap = e.callcommand('branchmap', {}).result()
88
88
89 def primary(branch):
89 def primary(branch):
90 if branch == '.':
90 if branch == '.':
91 if not lrepo:
91 if not lrepo:
92 raise error.Abort(_("dirstate branch not accessible"))
92 raise error.Abort(_("dirstate branch not accessible"))
93 branch = lrepo.dirstate.branch()
93 branch = lrepo.dirstate.branch()
94 if branch in branchmap:
94 if branch in branchmap:
95 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
95 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
96 return True
96 return True
97 else:
97 else:
98 return False
98 return False
99
99
100 for branch in branches:
100 for branch in branches:
101 if not primary(branch):
101 if not primary(branch):
102 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
102 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
103 if hashbranch:
103 if hashbranch:
104 if not primary(hashbranch):
104 if not primary(hashbranch):
105 revs.append(hashbranch)
105 revs.append(hashbranch)
106 return revs, revs[0]
106 return revs, revs[0]
107
107
108 def parseurl(path, branches=None):
108 def parseurl(path, branches=None):
109 '''parse url#branch, returning (url, (branch, branches))'''
109 '''parse url#branch, returning (url, (branch, branches))'''
110
110
111 u = util.url(path)
111 u = util.url(path)
112 branch = None
112 branch = None
113 if u.fragment:
113 if u.fragment:
114 branch = u.fragment
114 branch = u.fragment
115 u.fragment = None
115 u.fragment = None
116 return bytes(u), (branch, branches or [])
116 return bytes(u), (branch, branches or [])
117
117
118 schemes = {
118 schemes = {
119 'bundle': bundlerepo,
119 'bundle': bundlerepo,
120 'union': unionrepo,
120 'union': unionrepo,
121 'file': _local,
121 'file': _local,
122 'http': httppeer,
122 'http': httppeer,
123 'https': httppeer,
123 'https': httppeer,
124 'ssh': sshpeer,
124 'ssh': sshpeer,
125 'static-http': statichttprepo,
125 'static-http': statichttprepo,
126 }
126 }
127
127
128 def _peerlookup(path):
128 def _peerlookup(path):
129 u = util.url(path)
129 u = util.url(path)
130 scheme = u.scheme or 'file'
130 scheme = u.scheme or 'file'
131 thing = schemes.get(scheme) or schemes['file']
131 thing = schemes.get(scheme) or schemes['file']
132 try:
132 try:
133 return thing(path)
133 return thing(path)
134 except TypeError:
134 except TypeError:
135 # we can't test callable(thing) because 'thing' can be an unloaded
135 # we can't test callable(thing) because 'thing' can be an unloaded
136 # module that implements __call__
136 # module that implements __call__
137 if not util.safehasattr(thing, 'instance'):
137 if not util.safehasattr(thing, 'instance'):
138 raise
138 raise
139 return thing
139 return thing
140
140
141 def islocal(repo):
141 def islocal(repo):
142 '''return true if repo (or path pointing to repo) is local'''
142 '''return true if repo (or path pointing to repo) is local'''
143 if isinstance(repo, bytes):
143 if isinstance(repo, bytes):
144 try:
144 try:
145 return _peerlookup(repo).islocal(repo)
145 return _peerlookup(repo).islocal(repo)
146 except AttributeError:
146 except AttributeError:
147 return False
147 return False
148 return repo.local()
148 return repo.local()
149
149
150 def openpath(ui, path):
150 def openpath(ui, path):
151 '''open path with open if local, url.open if remote'''
151 '''open path with open if local, url.open if remote'''
152 pathurl = util.url(path, parsequery=False, parsefragment=False)
152 pathurl = util.url(path, parsequery=False, parsefragment=False)
153 if pathurl.islocal():
153 if pathurl.islocal():
154 return util.posixfile(pathurl.localpath(), 'rb')
154 return util.posixfile(pathurl.localpath(), 'rb')
155 else:
155 else:
156 return url.open(ui, path)
156 return url.open(ui, path)
157
157
158 # a list of (ui, repo) functions called for wire peer initialization
158 # a list of (ui, repo) functions called for wire peer initialization
159 wirepeersetupfuncs = []
159 wirepeersetupfuncs = []
160
160
161 def _peerorrepo(ui, path, create=False, presetupfuncs=None,
161 def _peerorrepo(ui, path, create=False, presetupfuncs=None,
162 intents=None):
162 intents=None):
163 """return a repository object for the specified path"""
163 """return a repository object for the specified path"""
164 obj = _peerlookup(path).instance(ui, path, create, intents=intents)
164 obj = _peerlookup(path).instance(ui, path, create, intents=intents)
165 ui = getattr(obj, "ui", ui)
165 ui = getattr(obj, "ui", ui)
166 if ui.configbool('devel', 'debug.extensions'):
166 if ui.configbool('devel', 'debug.extensions'):
167 log = functools.partial(
167 log = functools.partial(
168 ui.debug, 'debug.extensions: ', label='debug.extensions')
168 ui.debug, 'debug.extensions: ', label='debug.extensions')
169 else:
169 else:
170 log = lambda *a, **kw: None
170 log = lambda *a, **kw: None
171 for f in presetupfuncs or []:
171 for f in presetupfuncs or []:
172 f(ui, obj)
172 f(ui, obj)
173 log('- executing reposetup hooks\n')
173 log('- executing reposetup hooks\n')
174 for name, module in extensions.extensions(ui):
174 for name, module in extensions.extensions(ui):
175 log(' - running reposetup for %s\n' % (name,))
175 log(' - running reposetup for %s\n' % (name,))
176 hook = getattr(module, 'reposetup', None)
176 hook = getattr(module, 'reposetup', None)
177 if hook:
177 if hook:
178 hook(ui, obj)
178 hook(ui, obj)
179 if not obj.local():
179 if not obj.local():
180 for f in wirepeersetupfuncs:
180 for f in wirepeersetupfuncs:
181 f(ui, obj)
181 f(ui, obj)
182 return obj
182 return obj
183
183
184 def repository(ui, path='', create=False, presetupfuncs=None, intents=None):
184 def repository(ui, path='', create=False, presetupfuncs=None, intents=None):
185 """return a repository object for the specified path"""
185 """return a repository object for the specified path"""
186 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs,
186 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs,
187 intents=intents)
187 intents=intents)
188 repo = peer.local()
188 repo = peer.local()
189 if not repo:
189 if not repo:
190 raise error.Abort(_("repository '%s' is not local") %
190 raise error.Abort(_("repository '%s' is not local") %
191 (path or peer.url()))
191 (path or peer.url()))
192 return repo.filtered('visible')
192 return repo.filtered('visible')
193
193
194 def peer(uiorrepo, opts, path, create=False, intents=None):
194 def peer(uiorrepo, opts, path, create=False, intents=None):
195 '''return a repository peer for the specified path'''
195 '''return a repository peer for the specified path'''
196 rui = remoteui(uiorrepo, opts)
196 rui = remoteui(uiorrepo, opts)
197 return _peerorrepo(rui, path, create, intents=intents).peer()
197 return _peerorrepo(rui, path, create, intents=intents).peer()
198
198
199 def defaultdest(source):
199 def defaultdest(source):
200 '''return default destination of clone if none is given
200 '''return default destination of clone if none is given
201
201
202 >>> defaultdest(b'foo')
202 >>> defaultdest(b'foo')
203 'foo'
203 'foo'
204 >>> defaultdest(b'/foo/bar')
204 >>> defaultdest(b'/foo/bar')
205 'bar'
205 'bar'
206 >>> defaultdest(b'/')
206 >>> defaultdest(b'/')
207 ''
207 ''
208 >>> defaultdest(b'')
208 >>> defaultdest(b'')
209 ''
209 ''
210 >>> defaultdest(b'http://example.org/')
210 >>> defaultdest(b'http://example.org/')
211 ''
211 ''
212 >>> defaultdest(b'http://example.org/foo/')
212 >>> defaultdest(b'http://example.org/foo/')
213 'foo'
213 'foo'
214 '''
214 '''
215 path = util.url(source).path
215 path = util.url(source).path
216 if not path:
216 if not path:
217 return ''
217 return ''
218 return os.path.basename(os.path.normpath(path))
218 return os.path.basename(os.path.normpath(path))
219
219
220 def sharedreposource(repo):
220 def sharedreposource(repo):
221 """Returns repository object for source repository of a shared repo.
221 """Returns repository object for source repository of a shared repo.
222
222
223 If repo is not a shared repository, returns None.
223 If repo is not a shared repository, returns None.
224 """
224 """
225 if repo.sharedpath == repo.path:
225 if repo.sharedpath == repo.path:
226 return None
226 return None
227
227
228 if util.safehasattr(repo, 'srcrepo') and repo.srcrepo:
228 if util.safehasattr(repo, 'srcrepo') and repo.srcrepo:
229 return repo.srcrepo
229 return repo.srcrepo
230
230
231 # the sharedpath always ends in the .hg; we want the path to the repo
231 # the sharedpath always ends in the .hg; we want the path to the repo
232 source = repo.vfs.split(repo.sharedpath)[0]
232 source = repo.vfs.split(repo.sharedpath)[0]
233 srcurl, branches = parseurl(source)
233 srcurl, branches = parseurl(source)
234 srcrepo = repository(repo.ui, srcurl)
234 srcrepo = repository(repo.ui, srcurl)
235 repo.srcrepo = srcrepo
235 repo.srcrepo = srcrepo
236 return srcrepo
236 return srcrepo
237
237
238 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
238 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
239 relative=False):
239 relative=False):
240 '''create a shared repository'''
240 '''create a shared repository'''
241
241
242 if not islocal(source):
242 if not islocal(source):
243 raise error.Abort(_('can only share local repositories'))
243 raise error.Abort(_('can only share local repositories'))
244
244
245 if not dest:
245 if not dest:
246 dest = defaultdest(source)
246 dest = defaultdest(source)
247 else:
247 else:
248 dest = ui.expandpath(dest)
248 dest = ui.expandpath(dest)
249
249
250 if isinstance(source, bytes):
250 if isinstance(source, bytes):
251 origsource = ui.expandpath(source)
251 origsource = ui.expandpath(source)
252 source, branches = parseurl(origsource)
252 source, branches = parseurl(origsource)
253 srcrepo = repository(ui, source)
253 srcrepo = repository(ui, source)
254 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
254 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
255 else:
255 else:
256 srcrepo = source.local()
256 srcrepo = source.local()
257 origsource = source = srcrepo.url()
257 origsource = source = srcrepo.url()
258 checkout = None
258 checkout = None
259
259
260 sharedpath = srcrepo.sharedpath # if our source is already sharing
260 sharedpath = srcrepo.sharedpath # if our source is already sharing
261
261
262 destwvfs = vfsmod.vfs(dest, realpath=True)
262 destwvfs = vfsmod.vfs(dest, realpath=True)
263 destvfs = vfsmod.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
263 destvfs = vfsmod.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
264
264
265 if destvfs.lexists():
265 if destvfs.lexists():
266 raise error.Abort(_('destination already exists'))
266 raise error.Abort(_('destination already exists'))
267
267
268 if not destwvfs.isdir():
268 if not destwvfs.isdir():
269 destwvfs.makedirs()
269 destwvfs.makedirs()
270 destvfs.makedir()
270 destvfs.makedir()
271
271
272 requirements = ''
272 requirements = ''
273 try:
273 try:
274 requirements = srcrepo.vfs.read('requires')
274 requirements = srcrepo.vfs.read('requires')
275 except IOError as inst:
275 except IOError as inst:
276 if inst.errno != errno.ENOENT:
276 if inst.errno != errno.ENOENT:
277 raise
277 raise
278
278
279 if relative:
279 if relative:
280 try:
280 try:
281 sharedpath = os.path.relpath(sharedpath, destvfs.base)
281 sharedpath = os.path.relpath(sharedpath, destvfs.base)
282 requirements += 'relshared\n'
282 requirements += 'relshared\n'
283 except (IOError, ValueError) as e:
283 except (IOError, ValueError) as e:
284 # ValueError is raised on Windows if the drive letters differ on
284 # ValueError is raised on Windows if the drive letters differ on
285 # each path
285 # each path
286 raise error.Abort(_('cannot calculate relative path'),
286 raise error.Abort(_('cannot calculate relative path'),
287 hint=stringutil.forcebytestr(e))
287 hint=stringutil.forcebytestr(e))
288 else:
288 else:
289 requirements += 'shared\n'
289 requirements += 'shared\n'
290
290
291 destvfs.write('requires', requirements)
291 destvfs.write('requires', requirements)
292 destvfs.write('sharedpath', sharedpath)
292 destvfs.write('sharedpath', sharedpath)
293
293
294 r = repository(ui, destwvfs.base)
294 r = repository(ui, destwvfs.base)
295 postshare(srcrepo, r, bookmarks=bookmarks, defaultpath=defaultpath)
295 postshare(srcrepo, r, bookmarks=bookmarks, defaultpath=defaultpath)
296 _postshareupdate(r, update, checkout=checkout)
296 _postshareupdate(r, update, checkout=checkout)
297 return r
297 return r
298
298
299 def unshare(ui, repo):
299 def unshare(ui, repo):
300 """convert a shared repository to a normal one
300 """convert a shared repository to a normal one
301
301
302 Copy the store data to the repo and remove the sharedpath data.
302 Copy the store data to the repo and remove the sharedpath data.
303 """
303 """
304
304
305 destlock = lock = None
305 destlock = lock = None
306 lock = repo.lock()
306 lock = repo.lock()
307 try:
307 try:
308 # we use locks here because if we race with commit, we
308 # we use locks here because if we race with commit, we
309 # can end up with extra data in the cloned revlogs that's
309 # can end up with extra data in the cloned revlogs that's
310 # not pointed to by changesets, thus causing verify to
310 # not pointed to by changesets, thus causing verify to
311 # fail
311 # fail
312
312
313 destlock = copystore(ui, repo, repo.path)
313 destlock = copystore(ui, repo, repo.path)
314
314
315 sharefile = repo.vfs.join('sharedpath')
315 sharefile = repo.vfs.join('sharedpath')
316 util.rename(sharefile, sharefile + '.old')
316 util.rename(sharefile, sharefile + '.old')
317
317
318 repo.requirements.discard('shared')
318 repo.requirements.discard('shared')
319 repo.requirements.discard('relshared')
319 repo.requirements.discard('relshared')
320 repo._writerequirements()
320 repo._writerequirements()
321 finally:
321 finally:
322 destlock and destlock.release()
322 destlock and destlock.release()
323 lock and lock.release()
323 lock and lock.release()
324
324
325 # update store, spath, svfs and sjoin of repo
325 # update store, spath, svfs and sjoin of repo
326 repo.unfiltered().__init__(repo.baseui, repo.root)
326 repo.unfiltered().__init__(repo.baseui, repo.root)
327
327
328 # TODO: figure out how to access subrepos that exist, but were previously
328 # TODO: figure out how to access subrepos that exist, but were previously
329 # removed from .hgsub
329 # removed from .hgsub
330 c = repo['.']
330 c = repo['.']
331 subs = c.substate
331 subs = c.substate
332 for s in sorted(subs):
332 for s in sorted(subs):
333 c.sub(s).unshare()
333 c.sub(s).unshare()
334
334
335 def postshare(sourcerepo, destrepo, bookmarks=True, defaultpath=None):
335 def postshare(sourcerepo, destrepo, bookmarks=True, defaultpath=None):
336 """Called after a new shared repo is created.
336 """Called after a new shared repo is created.
337
337
338 The new repo only has a requirements file and pointer to the source.
338 The new repo only has a requirements file and pointer to the source.
339 This function configures additional shared data.
339 This function configures additional shared data.
340
340
341 Extensions can wrap this function and write additional entries to
341 Extensions can wrap this function and write additional entries to
342 destrepo/.hg/shared to indicate additional pieces of data to be shared.
342 destrepo/.hg/shared to indicate additional pieces of data to be shared.
343 """
343 """
344 default = defaultpath or sourcerepo.ui.config('paths', 'default')
344 default = defaultpath or sourcerepo.ui.config('paths', 'default')
345 if default:
345 if default:
346 template = ('[paths]\n'
346 template = ('[paths]\n'
347 'default = %s\n')
347 'default = %s\n')
348 destrepo.vfs.write('hgrc', util.tonativeeol(template % default))
348 destrepo.vfs.write('hgrc', util.tonativeeol(template % default))
349
349
350 with destrepo.wlock():
350 with destrepo.wlock():
351 if bookmarks:
351 if bookmarks:
352 destrepo.vfs.write('shared', sharedbookmarks + '\n')
352 destrepo.vfs.write('shared', sharedbookmarks + '\n')
353
353
354 def _postshareupdate(repo, update, checkout=None):
354 def _postshareupdate(repo, update, checkout=None):
355 """Maybe perform a working directory update after a shared repo is created.
355 """Maybe perform a working directory update after a shared repo is created.
356
356
357 ``update`` can be a boolean or a revision to update to.
357 ``update`` can be a boolean or a revision to update to.
358 """
358 """
359 if not update:
359 if not update:
360 return
360 return
361
361
362 repo.ui.status(_("updating working directory\n"))
362 repo.ui.status(_("updating working directory\n"))
363 if update is not True:
363 if update is not True:
364 checkout = update
364 checkout = update
365 for test in (checkout, 'default', 'tip'):
365 for test in (checkout, 'default', 'tip'):
366 if test is None:
366 if test is None:
367 continue
367 continue
368 try:
368 try:
369 uprev = repo.lookup(test)
369 uprev = repo.lookup(test)
370 break
370 break
371 except error.RepoLookupError:
371 except error.RepoLookupError:
372 continue
372 continue
373 _update(repo, uprev)
373 _update(repo, uprev)
374
374
375 def copystore(ui, srcrepo, destpath):
375 def copystore(ui, srcrepo, destpath):
376 '''copy files from store of srcrepo in destpath
376 '''copy files from store of srcrepo in destpath
377
377
378 returns destlock
378 returns destlock
379 '''
379 '''
380 destlock = None
380 destlock = None
381 try:
381 try:
382 hardlink = None
382 hardlink = None
383 topic = _('linking') if hardlink else _('copying')
383 topic = _('linking') if hardlink else _('copying')
384 progress = ui.makeprogress(topic)
384 with ui.makeprogress(topic) as progress:
385 num = 0
385 num = 0
386 srcpublishing = srcrepo.publishing()
386 srcpublishing = srcrepo.publishing()
387 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
387 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
388 dstvfs = vfsmod.vfs(destpath)
388 dstvfs = vfsmod.vfs(destpath)
389 for f in srcrepo.store.copylist():
389 for f in srcrepo.store.copylist():
390 if srcpublishing and f.endswith('phaseroots'):
390 if srcpublishing and f.endswith('phaseroots'):
391 continue
391 continue
392 dstbase = os.path.dirname(f)
392 dstbase = os.path.dirname(f)
393 if dstbase and not dstvfs.exists(dstbase):
393 if dstbase and not dstvfs.exists(dstbase):
394 dstvfs.mkdir(dstbase)
394 dstvfs.mkdir(dstbase)
395 if srcvfs.exists(f):
395 if srcvfs.exists(f):
396 if f.endswith('data'):
396 if f.endswith('data'):
397 # 'dstbase' may be empty (e.g. revlog format 0)
397 # 'dstbase' may be empty (e.g. revlog format 0)
398 lockfile = os.path.join(dstbase, "lock")
398 lockfile = os.path.join(dstbase, "lock")
399 # lock to avoid premature writing to the target
399 # lock to avoid premature writing to the target
400 destlock = lock.lock(dstvfs, lockfile)
400 destlock = lock.lock(dstvfs, lockfile)
401 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
401 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
402 hardlink, progress)
402 hardlink, progress)
403 num += n
403 num += n
404 if hardlink:
404 if hardlink:
405 ui.debug("linked %d files\n" % num)
405 ui.debug("linked %d files\n" % num)
406 else:
406 else:
407 ui.debug("copied %d files\n" % num)
407 ui.debug("copied %d files\n" % num)
408 progress.complete()
409 return destlock
408 return destlock
410 except: # re-raises
409 except: # re-raises
411 release(destlock)
410 release(destlock)
412 raise
411 raise
413
412
414 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
413 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
415 rev=None, update=True, stream=False):
414 rev=None, update=True, stream=False):
416 """Perform a clone using a shared repo.
415 """Perform a clone using a shared repo.
417
416
418 The store for the repository will be located at <sharepath>/.hg. The
417 The store for the repository will be located at <sharepath>/.hg. The
419 specified revisions will be cloned or pulled from "source". A shared repo
418 specified revisions will be cloned or pulled from "source". A shared repo
420 will be created at "dest" and a working copy will be created if "update" is
419 will be created at "dest" and a working copy will be created if "update" is
421 True.
420 True.
422 """
421 """
423 revs = None
422 revs = None
424 if rev:
423 if rev:
425 if not srcpeer.capable('lookup'):
424 if not srcpeer.capable('lookup'):
426 raise error.Abort(_("src repository does not support "
425 raise error.Abort(_("src repository does not support "
427 "revision lookup and so doesn't "
426 "revision lookup and so doesn't "
428 "support clone by revision"))
427 "support clone by revision"))
429
428
430 # TODO this is batchable.
429 # TODO this is batchable.
431 remoterevs = []
430 remoterevs = []
432 for r in rev:
431 for r in rev:
433 with srcpeer.commandexecutor() as e:
432 with srcpeer.commandexecutor() as e:
434 remoterevs.append(e.callcommand('lookup', {
433 remoterevs.append(e.callcommand('lookup', {
435 'key': r,
434 'key': r,
436 }).result())
435 }).result())
437 revs = remoterevs
436 revs = remoterevs
438
437
439 # Obtain a lock before checking for or cloning the pooled repo otherwise
438 # Obtain a lock before checking for or cloning the pooled repo otherwise
440 # 2 clients may race creating or populating it.
439 # 2 clients may race creating or populating it.
441 pooldir = os.path.dirname(sharepath)
440 pooldir = os.path.dirname(sharepath)
442 # lock class requires the directory to exist.
441 # lock class requires the directory to exist.
443 try:
442 try:
444 util.makedir(pooldir, False)
443 util.makedir(pooldir, False)
445 except OSError as e:
444 except OSError as e:
446 if e.errno != errno.EEXIST:
445 if e.errno != errno.EEXIST:
447 raise
446 raise
448
447
449 poolvfs = vfsmod.vfs(pooldir)
448 poolvfs = vfsmod.vfs(pooldir)
450 basename = os.path.basename(sharepath)
449 basename = os.path.basename(sharepath)
451
450
452 with lock.lock(poolvfs, '%s.lock' % basename):
451 with lock.lock(poolvfs, '%s.lock' % basename):
453 if os.path.exists(sharepath):
452 if os.path.exists(sharepath):
454 ui.status(_('(sharing from existing pooled repository %s)\n') %
453 ui.status(_('(sharing from existing pooled repository %s)\n') %
455 basename)
454 basename)
456 else:
455 else:
457 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
456 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
458 # Always use pull mode because hardlinks in share mode don't work
457 # Always use pull mode because hardlinks in share mode don't work
459 # well. Never update because working copies aren't necessary in
458 # well. Never update because working copies aren't necessary in
460 # share mode.
459 # share mode.
461 clone(ui, peeropts, source, dest=sharepath, pull=True,
460 clone(ui, peeropts, source, dest=sharepath, pull=True,
462 revs=rev, update=False, stream=stream)
461 revs=rev, update=False, stream=stream)
463
462
464 # Resolve the value to put in [paths] section for the source.
463 # Resolve the value to put in [paths] section for the source.
465 if islocal(source):
464 if islocal(source):
466 defaultpath = os.path.abspath(util.urllocalpath(source))
465 defaultpath = os.path.abspath(util.urllocalpath(source))
467 else:
466 else:
468 defaultpath = source
467 defaultpath = source
469
468
470 sharerepo = repository(ui, path=sharepath)
469 sharerepo = repository(ui, path=sharepath)
471 share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
470 share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
472 defaultpath=defaultpath)
471 defaultpath=defaultpath)
473
472
474 # We need to perform a pull against the dest repo to fetch bookmarks
473 # We need to perform a pull against the dest repo to fetch bookmarks
475 # and other non-store data that isn't shared by default. In the case of
474 # and other non-store data that isn't shared by default. In the case of
476 # non-existing shared repo, this means we pull from the remote twice. This
475 # non-existing shared repo, this means we pull from the remote twice. This
477 # is a bit weird. But at the time it was implemented, there wasn't an easy
476 # is a bit weird. But at the time it was implemented, there wasn't an easy
478 # way to pull just non-changegroup data.
477 # way to pull just non-changegroup data.
479 destrepo = repository(ui, path=dest)
478 destrepo = repository(ui, path=dest)
480 exchange.pull(destrepo, srcpeer, heads=revs)
479 exchange.pull(destrepo, srcpeer, heads=revs)
481
480
482 _postshareupdate(destrepo, update)
481 _postshareupdate(destrepo, update)
483
482
484 return srcpeer, peer(ui, peeropts, dest)
483 return srcpeer, peer(ui, peeropts, dest)
485
484
486 # Recomputing branch cache might be slow on big repos,
485 # Recomputing branch cache might be slow on big repos,
487 # so just copy it
486 # so just copy it
488 def _copycache(srcrepo, dstcachedir, fname):
487 def _copycache(srcrepo, dstcachedir, fname):
489 """copy a cache from srcrepo to destcachedir (if it exists)"""
488 """copy a cache from srcrepo to destcachedir (if it exists)"""
490 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
489 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
491 dstbranchcache = os.path.join(dstcachedir, fname)
490 dstbranchcache = os.path.join(dstcachedir, fname)
492 if os.path.exists(srcbranchcache):
491 if os.path.exists(srcbranchcache):
493 if not os.path.exists(dstcachedir):
492 if not os.path.exists(dstcachedir):
494 os.mkdir(dstcachedir)
493 os.mkdir(dstcachedir)
495 util.copyfile(srcbranchcache, dstbranchcache)
494 util.copyfile(srcbranchcache, dstbranchcache)
496
495
497 def clone(ui, peeropts, source, dest=None, pull=False, revs=None,
496 def clone(ui, peeropts, source, dest=None, pull=False, revs=None,
498 update=True, stream=False, branch=None, shareopts=None):
497 update=True, stream=False, branch=None, shareopts=None):
499 """Make a copy of an existing repository.
498 """Make a copy of an existing repository.
500
499
501 Create a copy of an existing repository in a new directory. The
500 Create a copy of an existing repository in a new directory. The
502 source and destination are URLs, as passed to the repository
501 source and destination are URLs, as passed to the repository
503 function. Returns a pair of repository peers, the source and
502 function. Returns a pair of repository peers, the source and
504 newly created destination.
503 newly created destination.
505
504
506 The location of the source is added to the new repository's
505 The location of the source is added to the new repository's
507 .hg/hgrc file, as the default to be used for future pulls and
506 .hg/hgrc file, as the default to be used for future pulls and
508 pushes.
507 pushes.
509
508
510 If an exception is raised, the partly cloned/updated destination
509 If an exception is raised, the partly cloned/updated destination
511 repository will be deleted.
510 repository will be deleted.
512
511
513 Arguments:
512 Arguments:
514
513
515 source: repository object or URL
514 source: repository object or URL
516
515
517 dest: URL of destination repository to create (defaults to base
516 dest: URL of destination repository to create (defaults to base
518 name of source repository)
517 name of source repository)
519
518
520 pull: always pull from source repository, even in local case or if the
519 pull: always pull from source repository, even in local case or if the
521 server prefers streaming
520 server prefers streaming
522
521
523 stream: stream raw data uncompressed from repository (fast over
522 stream: stream raw data uncompressed from repository (fast over
524 LAN, slow over WAN)
523 LAN, slow over WAN)
525
524
526 revs: revision to clone up to (implies pull=True)
525 revs: revision to clone up to (implies pull=True)
527
526
528 update: update working directory after clone completes, if
527 update: update working directory after clone completes, if
529 destination is local repository (True means update to default rev,
528 destination is local repository (True means update to default rev,
530 anything else is treated as a revision)
529 anything else is treated as a revision)
531
530
532 branch: branches to clone
531 branch: branches to clone
533
532
534 shareopts: dict of options to control auto sharing behavior. The "pool" key
533 shareopts: dict of options to control auto sharing behavior. The "pool" key
535 activates auto sharing mode and defines the directory for stores. The
534 activates auto sharing mode and defines the directory for stores. The
536 "mode" key determines how to construct the directory name of the shared
535 "mode" key determines how to construct the directory name of the shared
537 repository. "identity" means the name is derived from the node of the first
536 repository. "identity" means the name is derived from the node of the first
538 changeset in the repository. "remote" means the name is derived from the
537 changeset in the repository. "remote" means the name is derived from the
539 remote's path/URL. Defaults to "identity."
538 remote's path/URL. Defaults to "identity."
540 """
539 """
541
540
542 if isinstance(source, bytes):
541 if isinstance(source, bytes):
543 origsource = ui.expandpath(source)
542 origsource = ui.expandpath(source)
544 source, branches = parseurl(origsource, branch)
543 source, branches = parseurl(origsource, branch)
545 srcpeer = peer(ui, peeropts, source)
544 srcpeer = peer(ui, peeropts, source)
546 else:
545 else:
547 srcpeer = source.peer() # in case we were called with a localrepo
546 srcpeer = source.peer() # in case we were called with a localrepo
548 branches = (None, branch or [])
547 branches = (None, branch or [])
549 origsource = source = srcpeer.url()
548 origsource = source = srcpeer.url()
550 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
549 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
551
550
552 if dest is None:
551 if dest is None:
553 dest = defaultdest(source)
552 dest = defaultdest(source)
554 if dest:
553 if dest:
555 ui.status(_("destination directory: %s\n") % dest)
554 ui.status(_("destination directory: %s\n") % dest)
556 else:
555 else:
557 dest = ui.expandpath(dest)
556 dest = ui.expandpath(dest)
558
557
559 dest = util.urllocalpath(dest)
558 dest = util.urllocalpath(dest)
560 source = util.urllocalpath(source)
559 source = util.urllocalpath(source)
561
560
562 if not dest:
561 if not dest:
563 raise error.Abort(_("empty destination path is not valid"))
562 raise error.Abort(_("empty destination path is not valid"))
564
563
565 destvfs = vfsmod.vfs(dest, expandpath=True)
564 destvfs = vfsmod.vfs(dest, expandpath=True)
566 if destvfs.lexists():
565 if destvfs.lexists():
567 if not destvfs.isdir():
566 if not destvfs.isdir():
568 raise error.Abort(_("destination '%s' already exists") % dest)
567 raise error.Abort(_("destination '%s' already exists") % dest)
569 elif destvfs.listdir():
568 elif destvfs.listdir():
570 raise error.Abort(_("destination '%s' is not empty") % dest)
569 raise error.Abort(_("destination '%s' is not empty") % dest)
571
570
572 shareopts = shareopts or {}
571 shareopts = shareopts or {}
573 sharepool = shareopts.get('pool')
572 sharepool = shareopts.get('pool')
574 sharenamemode = shareopts.get('mode')
573 sharenamemode = shareopts.get('mode')
575 if sharepool and islocal(dest):
574 if sharepool and islocal(dest):
576 sharepath = None
575 sharepath = None
577 if sharenamemode == 'identity':
576 if sharenamemode == 'identity':
578 # Resolve the name from the initial changeset in the remote
577 # Resolve the name from the initial changeset in the remote
579 # repository. This returns nullid when the remote is empty. It
578 # repository. This returns nullid when the remote is empty. It
580 # raises RepoLookupError if revision 0 is filtered or otherwise
579 # raises RepoLookupError if revision 0 is filtered or otherwise
581 # not available. If we fail to resolve, sharing is not enabled.
580 # not available. If we fail to resolve, sharing is not enabled.
582 try:
581 try:
583 with srcpeer.commandexecutor() as e:
582 with srcpeer.commandexecutor() as e:
584 rootnode = e.callcommand('lookup', {
583 rootnode = e.callcommand('lookup', {
585 'key': '0',
584 'key': '0',
586 }).result()
585 }).result()
587
586
588 if rootnode != node.nullid:
587 if rootnode != node.nullid:
589 sharepath = os.path.join(sharepool, node.hex(rootnode))
588 sharepath = os.path.join(sharepool, node.hex(rootnode))
590 else:
589 else:
591 ui.status(_('(not using pooled storage: '
590 ui.status(_('(not using pooled storage: '
592 'remote appears to be empty)\n'))
591 'remote appears to be empty)\n'))
593 except error.RepoLookupError:
592 except error.RepoLookupError:
594 ui.status(_('(not using pooled storage: '
593 ui.status(_('(not using pooled storage: '
595 'unable to resolve identity of remote)\n'))
594 'unable to resolve identity of remote)\n'))
596 elif sharenamemode == 'remote':
595 elif sharenamemode == 'remote':
597 sharepath = os.path.join(
596 sharepath = os.path.join(
598 sharepool, node.hex(hashlib.sha1(source).digest()))
597 sharepool, node.hex(hashlib.sha1(source).digest()))
599 else:
598 else:
600 raise error.Abort(_('unknown share naming mode: %s') %
599 raise error.Abort(_('unknown share naming mode: %s') %
601 sharenamemode)
600 sharenamemode)
602
601
603 if sharepath:
602 if sharepath:
604 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
603 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
605 dest, pull=pull, rev=revs, update=update,
604 dest, pull=pull, rev=revs, update=update,
606 stream=stream)
605 stream=stream)
607
606
608 srclock = destlock = cleandir = None
607 srclock = destlock = cleandir = None
609 srcrepo = srcpeer.local()
608 srcrepo = srcpeer.local()
610 try:
609 try:
611 abspath = origsource
610 abspath = origsource
612 if islocal(origsource):
611 if islocal(origsource):
613 abspath = os.path.abspath(util.urllocalpath(origsource))
612 abspath = os.path.abspath(util.urllocalpath(origsource))
614
613
615 if islocal(dest):
614 if islocal(dest):
616 cleandir = dest
615 cleandir = dest
617
616
618 copy = False
617 copy = False
619 if (srcrepo and srcrepo.cancopy() and islocal(dest)
618 if (srcrepo and srcrepo.cancopy() and islocal(dest)
620 and not phases.hassecret(srcrepo)):
619 and not phases.hassecret(srcrepo)):
621 copy = not pull and not revs
620 copy = not pull and not revs
622
621
623 if copy:
622 if copy:
624 try:
623 try:
625 # we use a lock here because if we race with commit, we
624 # we use a lock here because if we race with commit, we
626 # can end up with extra data in the cloned revlogs that's
625 # can end up with extra data in the cloned revlogs that's
627 # not pointed to by changesets, thus causing verify to
626 # not pointed to by changesets, thus causing verify to
628 # fail
627 # fail
629 srclock = srcrepo.lock(wait=False)
628 srclock = srcrepo.lock(wait=False)
630 except error.LockError:
629 except error.LockError:
631 copy = False
630 copy = False
632
631
633 if copy:
632 if copy:
634 srcrepo.hook('preoutgoing', throw=True, source='clone')
633 srcrepo.hook('preoutgoing', throw=True, source='clone')
635 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
634 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
636 if not os.path.exists(dest):
635 if not os.path.exists(dest):
637 util.makedirs(dest)
636 util.makedirs(dest)
638 else:
637 else:
639 # only clean up directories we create ourselves
638 # only clean up directories we create ourselves
640 cleandir = hgdir
639 cleandir = hgdir
641 try:
640 try:
642 destpath = hgdir
641 destpath = hgdir
643 util.makedir(destpath, notindexed=True)
642 util.makedir(destpath, notindexed=True)
644 except OSError as inst:
643 except OSError as inst:
645 if inst.errno == errno.EEXIST:
644 if inst.errno == errno.EEXIST:
646 cleandir = None
645 cleandir = None
647 raise error.Abort(_("destination '%s' already exists")
646 raise error.Abort(_("destination '%s' already exists")
648 % dest)
647 % dest)
649 raise
648 raise
650
649
651 destlock = copystore(ui, srcrepo, destpath)
650 destlock = copystore(ui, srcrepo, destpath)
652 # copy bookmarks over
651 # copy bookmarks over
653 srcbookmarks = srcrepo.vfs.join('bookmarks')
652 srcbookmarks = srcrepo.vfs.join('bookmarks')
654 dstbookmarks = os.path.join(destpath, 'bookmarks')
653 dstbookmarks = os.path.join(destpath, 'bookmarks')
655 if os.path.exists(srcbookmarks):
654 if os.path.exists(srcbookmarks):
656 util.copyfile(srcbookmarks, dstbookmarks)
655 util.copyfile(srcbookmarks, dstbookmarks)
657
656
658 dstcachedir = os.path.join(destpath, 'cache')
657 dstcachedir = os.path.join(destpath, 'cache')
659 for cache in cacheutil.cachetocopy(srcrepo):
658 for cache in cacheutil.cachetocopy(srcrepo):
660 _copycache(srcrepo, dstcachedir, cache)
659 _copycache(srcrepo, dstcachedir, cache)
661
660
662 # we need to re-init the repo after manually copying the data
661 # we need to re-init the repo after manually copying the data
663 # into it
662 # into it
664 destpeer = peer(srcrepo, peeropts, dest)
663 destpeer = peer(srcrepo, peeropts, dest)
665 srcrepo.hook('outgoing', source='clone',
664 srcrepo.hook('outgoing', source='clone',
666 node=node.hex(node.nullid))
665 node=node.hex(node.nullid))
667 else:
666 else:
668 try:
667 try:
669 destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
668 destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
670 # only pass ui when no srcrepo
669 # only pass ui when no srcrepo
671 except OSError as inst:
670 except OSError as inst:
672 if inst.errno == errno.EEXIST:
671 if inst.errno == errno.EEXIST:
673 cleandir = None
672 cleandir = None
674 raise error.Abort(_("destination '%s' already exists")
673 raise error.Abort(_("destination '%s' already exists")
675 % dest)
674 % dest)
676 raise
675 raise
677
676
678 if revs:
677 if revs:
679 if not srcpeer.capable('lookup'):
678 if not srcpeer.capable('lookup'):
680 raise error.Abort(_("src repository does not support "
679 raise error.Abort(_("src repository does not support "
681 "revision lookup and so doesn't "
680 "revision lookup and so doesn't "
682 "support clone by revision"))
681 "support clone by revision"))
683
682
684 # TODO this is batchable.
683 # TODO this is batchable.
685 remoterevs = []
684 remoterevs = []
686 for rev in revs:
685 for rev in revs:
687 with srcpeer.commandexecutor() as e:
686 with srcpeer.commandexecutor() as e:
688 remoterevs.append(e.callcommand('lookup', {
687 remoterevs.append(e.callcommand('lookup', {
689 'key': rev,
688 'key': rev,
690 }).result())
689 }).result())
691 revs = remoterevs
690 revs = remoterevs
692
691
693 checkout = revs[0]
692 checkout = revs[0]
694 else:
693 else:
695 revs = None
694 revs = None
696 local = destpeer.local()
695 local = destpeer.local()
697 if local:
696 if local:
698 u = util.url(abspath)
697 u = util.url(abspath)
699 defaulturl = bytes(u)
698 defaulturl = bytes(u)
700 local.ui.setconfig('paths', 'default', defaulturl, 'clone')
699 local.ui.setconfig('paths', 'default', defaulturl, 'clone')
701 if not stream:
700 if not stream:
702 if pull:
701 if pull:
703 stream = False
702 stream = False
704 else:
703 else:
705 stream = None
704 stream = None
706 # internal config: ui.quietbookmarkmove
705 # internal config: ui.quietbookmarkmove
707 overrides = {('ui', 'quietbookmarkmove'): True}
706 overrides = {('ui', 'quietbookmarkmove'): True}
708 with local.ui.configoverride(overrides, 'clone'):
707 with local.ui.configoverride(overrides, 'clone'):
709 exchange.pull(local, srcpeer, revs,
708 exchange.pull(local, srcpeer, revs,
710 streamclonerequested=stream)
709 streamclonerequested=stream)
711 elif srcrepo:
710 elif srcrepo:
712 exchange.push(srcrepo, destpeer, revs=revs,
711 exchange.push(srcrepo, destpeer, revs=revs,
713 bookmarks=srcrepo._bookmarks.keys())
712 bookmarks=srcrepo._bookmarks.keys())
714 else:
713 else:
715 raise error.Abort(_("clone from remote to remote not supported")
714 raise error.Abort(_("clone from remote to remote not supported")
716 )
715 )
717
716
718 cleandir = None
717 cleandir = None
719
718
720 destrepo = destpeer.local()
719 destrepo = destpeer.local()
721 if destrepo:
720 if destrepo:
722 template = uimod.samplehgrcs['cloned']
721 template = uimod.samplehgrcs['cloned']
723 u = util.url(abspath)
722 u = util.url(abspath)
724 u.passwd = None
723 u.passwd = None
725 defaulturl = bytes(u)
724 defaulturl = bytes(u)
726 destrepo.vfs.write('hgrc', util.tonativeeol(template % defaulturl))
725 destrepo.vfs.write('hgrc', util.tonativeeol(template % defaulturl))
727 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
726 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
728
727
729 if ui.configbool('experimental', 'remotenames'):
728 if ui.configbool('experimental', 'remotenames'):
730 logexchange.pullremotenames(destrepo, srcpeer)
729 logexchange.pullremotenames(destrepo, srcpeer)
731
730
732 if update:
731 if update:
733 if update is not True:
732 if update is not True:
734 with srcpeer.commandexecutor() as e:
733 with srcpeer.commandexecutor() as e:
735 checkout = e.callcommand('lookup', {
734 checkout = e.callcommand('lookup', {
736 'key': update,
735 'key': update,
737 }).result()
736 }).result()
738
737
739 uprev = None
738 uprev = None
740 status = None
739 status = None
741 if checkout is not None:
740 if checkout is not None:
742 # Some extensions (at least hg-git and hg-subversion) have
741 # Some extensions (at least hg-git and hg-subversion) have
743 # a peer.lookup() implementation that returns a name instead
742 # a peer.lookup() implementation that returns a name instead
744 # of a nodeid. We work around it here until we've figured
743 # of a nodeid. We work around it here until we've figured
745 # out a better solution.
744 # out a better solution.
746 if len(checkout) == 20 and checkout in destrepo:
745 if len(checkout) == 20 and checkout in destrepo:
747 uprev = checkout
746 uprev = checkout
748 elif scmutil.isrevsymbol(destrepo, checkout):
747 elif scmutil.isrevsymbol(destrepo, checkout):
749 uprev = scmutil.revsymbol(destrepo, checkout).node()
748 uprev = scmutil.revsymbol(destrepo, checkout).node()
750 else:
749 else:
751 if update is not True:
750 if update is not True:
752 try:
751 try:
753 uprev = destrepo.lookup(update)
752 uprev = destrepo.lookup(update)
754 except error.RepoLookupError:
753 except error.RepoLookupError:
755 pass
754 pass
756 if uprev is None:
755 if uprev is None:
757 try:
756 try:
758 uprev = destrepo._bookmarks['@']
757 uprev = destrepo._bookmarks['@']
759 update = '@'
758 update = '@'
760 bn = destrepo[uprev].branch()
759 bn = destrepo[uprev].branch()
761 if bn == 'default':
760 if bn == 'default':
762 status = _("updating to bookmark @\n")
761 status = _("updating to bookmark @\n")
763 else:
762 else:
764 status = (_("updating to bookmark @ on branch %s\n")
763 status = (_("updating to bookmark @ on branch %s\n")
765 % bn)
764 % bn)
766 except KeyError:
765 except KeyError:
767 try:
766 try:
768 uprev = destrepo.branchtip('default')
767 uprev = destrepo.branchtip('default')
769 except error.RepoLookupError:
768 except error.RepoLookupError:
770 uprev = destrepo.lookup('tip')
769 uprev = destrepo.lookup('tip')
771 if not status:
770 if not status:
772 bn = destrepo[uprev].branch()
771 bn = destrepo[uprev].branch()
773 status = _("updating to branch %s\n") % bn
772 status = _("updating to branch %s\n") % bn
774 destrepo.ui.status(status)
773 destrepo.ui.status(status)
775 _update(destrepo, uprev)
774 _update(destrepo, uprev)
776 if update in destrepo._bookmarks:
775 if update in destrepo._bookmarks:
777 bookmarks.activate(destrepo, update)
776 bookmarks.activate(destrepo, update)
778 finally:
777 finally:
779 release(srclock, destlock)
778 release(srclock, destlock)
780 if cleandir is not None:
779 if cleandir is not None:
781 shutil.rmtree(cleandir, True)
780 shutil.rmtree(cleandir, True)
782 if srcpeer is not None:
781 if srcpeer is not None:
783 srcpeer.close()
782 srcpeer.close()
784 return srcpeer, destpeer
783 return srcpeer, destpeer
785
784
786 def _showstats(repo, stats, quietempty=False):
785 def _showstats(repo, stats, quietempty=False):
787 if quietempty and stats.isempty():
786 if quietempty and stats.isempty():
788 return
787 return
789 repo.ui.status(_("%d files updated, %d files merged, "
788 repo.ui.status(_("%d files updated, %d files merged, "
790 "%d files removed, %d files unresolved\n") % (
789 "%d files removed, %d files unresolved\n") % (
791 stats.updatedcount, stats.mergedcount,
790 stats.updatedcount, stats.mergedcount,
792 stats.removedcount, stats.unresolvedcount))
791 stats.removedcount, stats.unresolvedcount))
793
792
794 def updaterepo(repo, node, overwrite, updatecheck=None):
793 def updaterepo(repo, node, overwrite, updatecheck=None):
795 """Update the working directory to node.
794 """Update the working directory to node.
796
795
797 When overwrite is set, changes are clobbered, merged else
796 When overwrite is set, changes are clobbered, merged else
798
797
799 returns stats (see pydoc mercurial.merge.applyupdates)"""
798 returns stats (see pydoc mercurial.merge.applyupdates)"""
800 return mergemod.update(repo, node, False, overwrite,
799 return mergemod.update(repo, node, False, overwrite,
801 labels=['working copy', 'destination'],
800 labels=['working copy', 'destination'],
802 updatecheck=updatecheck)
801 updatecheck=updatecheck)
803
802
804 def update(repo, node, quietempty=False, updatecheck=None):
803 def update(repo, node, quietempty=False, updatecheck=None):
805 """update the working directory to node"""
804 """update the working directory to node"""
806 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
805 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
807 _showstats(repo, stats, quietempty)
806 _showstats(repo, stats, quietempty)
808 if stats.unresolvedcount:
807 if stats.unresolvedcount:
809 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
808 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
810 return stats.unresolvedcount > 0
809 return stats.unresolvedcount > 0
811
810
812 # naming conflict in clone()
811 # naming conflict in clone()
813 _update = update
812 _update = update
814
813
815 def clean(repo, node, show_stats=True, quietempty=False):
814 def clean(repo, node, show_stats=True, quietempty=False):
816 """forcibly switch the working directory to node, clobbering changes"""
815 """forcibly switch the working directory to node, clobbering changes"""
817 stats = updaterepo(repo, node, True)
816 stats = updaterepo(repo, node, True)
818 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
817 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
819 if show_stats:
818 if show_stats:
820 _showstats(repo, stats, quietempty)
819 _showstats(repo, stats, quietempty)
821 return stats.unresolvedcount > 0
820 return stats.unresolvedcount > 0
822
821
823 # naming conflict in updatetotally()
822 # naming conflict in updatetotally()
824 _clean = clean
823 _clean = clean
825
824
826 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
825 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
827 """Update the working directory with extra care for non-file components
826 """Update the working directory with extra care for non-file components
828
827
829 This takes care of non-file components below:
828 This takes care of non-file components below:
830
829
831 :bookmark: might be advanced or (in)activated
830 :bookmark: might be advanced or (in)activated
832
831
833 This takes arguments below:
832 This takes arguments below:
834
833
835 :checkout: to which revision the working directory is updated
834 :checkout: to which revision the working directory is updated
836 :brev: a name, which might be a bookmark to be activated after updating
835 :brev: a name, which might be a bookmark to be activated after updating
837 :clean: whether changes in the working directory can be discarded
836 :clean: whether changes in the working directory can be discarded
838 :updatecheck: how to deal with a dirty working directory
837 :updatecheck: how to deal with a dirty working directory
839
838
840 Valid values for updatecheck are (None => linear):
839 Valid values for updatecheck are (None => linear):
841
840
842 * abort: abort if the working directory is dirty
841 * abort: abort if the working directory is dirty
843 * none: don't check (merge working directory changes into destination)
842 * none: don't check (merge working directory changes into destination)
844 * linear: check that update is linear before merging working directory
843 * linear: check that update is linear before merging working directory
845 changes into destination
844 changes into destination
846 * noconflict: check that the update does not result in file merges
845 * noconflict: check that the update does not result in file merges
847
846
848 This returns whether conflict is detected at updating or not.
847 This returns whether conflict is detected at updating or not.
849 """
848 """
850 if updatecheck is None:
849 if updatecheck is None:
851 updatecheck = ui.config('commands', 'update.check')
850 updatecheck = ui.config('commands', 'update.check')
852 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
851 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
853 # If not configured, or invalid value configured
852 # If not configured, or invalid value configured
854 updatecheck = 'linear'
853 updatecheck = 'linear'
855 with repo.wlock():
854 with repo.wlock():
856 movemarkfrom = None
855 movemarkfrom = None
857 warndest = False
856 warndest = False
858 if checkout is None:
857 if checkout is None:
859 updata = destutil.destupdate(repo, clean=clean)
858 updata = destutil.destupdate(repo, clean=clean)
860 checkout, movemarkfrom, brev = updata
859 checkout, movemarkfrom, brev = updata
861 warndest = True
860 warndest = True
862
861
863 if clean:
862 if clean:
864 ret = _clean(repo, checkout)
863 ret = _clean(repo, checkout)
865 else:
864 else:
866 if updatecheck == 'abort':
865 if updatecheck == 'abort':
867 cmdutil.bailifchanged(repo, merge=False)
866 cmdutil.bailifchanged(repo, merge=False)
868 updatecheck = 'none'
867 updatecheck = 'none'
869 ret = _update(repo, checkout, updatecheck=updatecheck)
868 ret = _update(repo, checkout, updatecheck=updatecheck)
870
869
871 if not ret and movemarkfrom:
870 if not ret and movemarkfrom:
872 if movemarkfrom == repo['.'].node():
871 if movemarkfrom == repo['.'].node():
873 pass # no-op update
872 pass # no-op update
874 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
873 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
875 b = ui.label(repo._activebookmark, 'bookmarks.active')
874 b = ui.label(repo._activebookmark, 'bookmarks.active')
876 ui.status(_("updating bookmark %s\n") % b)
875 ui.status(_("updating bookmark %s\n") % b)
877 else:
876 else:
878 # this can happen with a non-linear update
877 # this can happen with a non-linear update
879 b = ui.label(repo._activebookmark, 'bookmarks')
878 b = ui.label(repo._activebookmark, 'bookmarks')
880 ui.status(_("(leaving bookmark %s)\n") % b)
879 ui.status(_("(leaving bookmark %s)\n") % b)
881 bookmarks.deactivate(repo)
880 bookmarks.deactivate(repo)
882 elif brev in repo._bookmarks:
881 elif brev in repo._bookmarks:
883 if brev != repo._activebookmark:
882 if brev != repo._activebookmark:
884 b = ui.label(brev, 'bookmarks.active')
883 b = ui.label(brev, 'bookmarks.active')
885 ui.status(_("(activating bookmark %s)\n") % b)
884 ui.status(_("(activating bookmark %s)\n") % b)
886 bookmarks.activate(repo, brev)
885 bookmarks.activate(repo, brev)
887 elif brev:
886 elif brev:
888 if repo._activebookmark:
887 if repo._activebookmark:
889 b = ui.label(repo._activebookmark, 'bookmarks')
888 b = ui.label(repo._activebookmark, 'bookmarks')
890 ui.status(_("(leaving bookmark %s)\n") % b)
889 ui.status(_("(leaving bookmark %s)\n") % b)
891 bookmarks.deactivate(repo)
890 bookmarks.deactivate(repo)
892
891
893 if warndest:
892 if warndest:
894 destutil.statusotherdests(ui, repo)
893 destutil.statusotherdests(ui, repo)
895
894
896 return ret
895 return ret
897
896
898 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None,
897 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None,
899 abort=False):
898 abort=False):
900 """Branch merge with node, resolving changes. Return true if any
899 """Branch merge with node, resolving changes. Return true if any
901 unresolved conflicts."""
900 unresolved conflicts."""
902 if not abort:
901 if not abort:
903 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce,
902 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce,
904 labels=labels)
903 labels=labels)
905 else:
904 else:
906 ms = mergemod.mergestate.read(repo)
905 ms = mergemod.mergestate.read(repo)
907 if ms.active():
906 if ms.active():
908 # there were conflicts
907 # there were conflicts
909 node = ms.localctx.hex()
908 node = ms.localctx.hex()
910 else:
909 else:
911 # there were no conficts, mergestate was not stored
910 # there were no conficts, mergestate was not stored
912 node = repo['.'].hex()
911 node = repo['.'].hex()
913
912
914 repo.ui.status(_("aborting the merge, updating back to"
913 repo.ui.status(_("aborting the merge, updating back to"
915 " %s\n") % node[:12])
914 " %s\n") % node[:12])
916 stats = mergemod.update(repo, node, branchmerge=False, force=True,
915 stats = mergemod.update(repo, node, branchmerge=False, force=True,
917 labels=labels)
916 labels=labels)
918
917
919 _showstats(repo, stats)
918 _showstats(repo, stats)
920 if stats.unresolvedcount:
919 if stats.unresolvedcount:
921 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
920 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
922 "or 'hg merge --abort' to abandon\n"))
921 "or 'hg merge --abort' to abandon\n"))
923 elif remind and not abort:
922 elif remind and not abort:
924 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
923 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
925 return stats.unresolvedcount > 0
924 return stats.unresolvedcount > 0
926
925
927 def _incoming(displaychlist, subreporecurse, ui, repo, source,
926 def _incoming(displaychlist, subreporecurse, ui, repo, source,
928 opts, buffered=False):
927 opts, buffered=False):
929 """
928 """
930 Helper for incoming / gincoming.
929 Helper for incoming / gincoming.
931 displaychlist gets called with
930 displaychlist gets called with
932 (remoterepo, incomingchangesetlist, displayer) parameters,
931 (remoterepo, incomingchangesetlist, displayer) parameters,
933 and is supposed to contain only code that can't be unified.
932 and is supposed to contain only code that can't be unified.
934 """
933 """
935 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
934 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
936 other = peer(repo, opts, source)
935 other = peer(repo, opts, source)
937 ui.status(_('comparing with %s\n') % util.hidepassword(source))
936 ui.status(_('comparing with %s\n') % util.hidepassword(source))
938 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
937 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
939
938
940 if revs:
939 if revs:
941 revs = [other.lookup(rev) for rev in revs]
940 revs = [other.lookup(rev) for rev in revs]
942 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
941 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
943 revs, opts["bundle"], opts["force"])
942 revs, opts["bundle"], opts["force"])
944 try:
943 try:
945 if not chlist:
944 if not chlist:
946 ui.status(_("no changes found\n"))
945 ui.status(_("no changes found\n"))
947 return subreporecurse()
946 return subreporecurse()
948 ui.pager('incoming')
947 ui.pager('incoming')
949 displayer = logcmdutil.changesetdisplayer(ui, other, opts,
948 displayer = logcmdutil.changesetdisplayer(ui, other, opts,
950 buffered=buffered)
949 buffered=buffered)
951 displaychlist(other, chlist, displayer)
950 displaychlist(other, chlist, displayer)
952 displayer.close()
951 displayer.close()
953 finally:
952 finally:
954 cleanupfn()
953 cleanupfn()
955 subreporecurse()
954 subreporecurse()
956 return 0 # exit code is zero since we found incoming changes
955 return 0 # exit code is zero since we found incoming changes
957
956
958 def incoming(ui, repo, source, opts):
957 def incoming(ui, repo, source, opts):
959 def subreporecurse():
958 def subreporecurse():
960 ret = 1
959 ret = 1
961 if opts.get('subrepos'):
960 if opts.get('subrepos'):
962 ctx = repo[None]
961 ctx = repo[None]
963 for subpath in sorted(ctx.substate):
962 for subpath in sorted(ctx.substate):
964 sub = ctx.sub(subpath)
963 sub = ctx.sub(subpath)
965 ret = min(ret, sub.incoming(ui, source, opts))
964 ret = min(ret, sub.incoming(ui, source, opts))
966 return ret
965 return ret
967
966
968 def display(other, chlist, displayer):
967 def display(other, chlist, displayer):
969 limit = logcmdutil.getlimit(opts)
968 limit = logcmdutil.getlimit(opts)
970 if opts.get('newest_first'):
969 if opts.get('newest_first'):
971 chlist.reverse()
970 chlist.reverse()
972 count = 0
971 count = 0
973 for n in chlist:
972 for n in chlist:
974 if limit is not None and count >= limit:
973 if limit is not None and count >= limit:
975 break
974 break
976 parents = [p for p in other.changelog.parents(n) if p != nullid]
975 parents = [p for p in other.changelog.parents(n) if p != nullid]
977 if opts.get('no_merges') and len(parents) == 2:
976 if opts.get('no_merges') and len(parents) == 2:
978 continue
977 continue
979 count += 1
978 count += 1
980 displayer.show(other[n])
979 displayer.show(other[n])
981 return _incoming(display, subreporecurse, ui, repo, source, opts)
980 return _incoming(display, subreporecurse, ui, repo, source, opts)
982
981
983 def _outgoing(ui, repo, dest, opts):
982 def _outgoing(ui, repo, dest, opts):
984 path = ui.paths.getpath(dest, default=('default-push', 'default'))
983 path = ui.paths.getpath(dest, default=('default-push', 'default'))
985 if not path:
984 if not path:
986 raise error.Abort(_('default repository not configured!'),
985 raise error.Abort(_('default repository not configured!'),
987 hint=_("see 'hg help config.paths'"))
986 hint=_("see 'hg help config.paths'"))
988 dest = path.pushloc or path.loc
987 dest = path.pushloc or path.loc
989 branches = path.branch, opts.get('branch') or []
988 branches = path.branch, opts.get('branch') or []
990
989
991 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
990 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
992 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
991 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
993 if revs:
992 if revs:
994 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
993 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
995
994
996 other = peer(repo, opts, dest)
995 other = peer(repo, opts, dest)
997 outgoing = discovery.findcommonoutgoing(repo, other, revs,
996 outgoing = discovery.findcommonoutgoing(repo, other, revs,
998 force=opts.get('force'))
997 force=opts.get('force'))
999 o = outgoing.missing
998 o = outgoing.missing
1000 if not o:
999 if not o:
1001 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1000 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1002 return o, other
1001 return o, other
1003
1002
1004 def outgoing(ui, repo, dest, opts):
1003 def outgoing(ui, repo, dest, opts):
1005 def recurse():
1004 def recurse():
1006 ret = 1
1005 ret = 1
1007 if opts.get('subrepos'):
1006 if opts.get('subrepos'):
1008 ctx = repo[None]
1007 ctx = repo[None]
1009 for subpath in sorted(ctx.substate):
1008 for subpath in sorted(ctx.substate):
1010 sub = ctx.sub(subpath)
1009 sub = ctx.sub(subpath)
1011 ret = min(ret, sub.outgoing(ui, dest, opts))
1010 ret = min(ret, sub.outgoing(ui, dest, opts))
1012 return ret
1011 return ret
1013
1012
1014 limit = logcmdutil.getlimit(opts)
1013 limit = logcmdutil.getlimit(opts)
1015 o, other = _outgoing(ui, repo, dest, opts)
1014 o, other = _outgoing(ui, repo, dest, opts)
1016 if not o:
1015 if not o:
1017 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1016 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1018 return recurse()
1017 return recurse()
1019
1018
1020 if opts.get('newest_first'):
1019 if opts.get('newest_first'):
1021 o.reverse()
1020 o.reverse()
1022 ui.pager('outgoing')
1021 ui.pager('outgoing')
1023 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1022 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1024 count = 0
1023 count = 0
1025 for n in o:
1024 for n in o:
1026 if limit is not None and count >= limit:
1025 if limit is not None and count >= limit:
1027 break
1026 break
1028 parents = [p for p in repo.changelog.parents(n) if p != nullid]
1027 parents = [p for p in repo.changelog.parents(n) if p != nullid]
1029 if opts.get('no_merges') and len(parents) == 2:
1028 if opts.get('no_merges') and len(parents) == 2:
1030 continue
1029 continue
1031 count += 1
1030 count += 1
1032 displayer.show(repo[n])
1031 displayer.show(repo[n])
1033 displayer.close()
1032 displayer.close()
1034 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1033 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1035 recurse()
1034 recurse()
1036 return 0 # exit code is zero since we found outgoing changes
1035 return 0 # exit code is zero since we found outgoing changes
1037
1036
1038 def verify(repo):
1037 def verify(repo):
1039 """verify the consistency of a repository"""
1038 """verify the consistency of a repository"""
1040 ret = verifymod.verify(repo)
1039 ret = verifymod.verify(repo)
1041
1040
1042 # Broken subrepo references in hidden csets don't seem worth worrying about,
1041 # Broken subrepo references in hidden csets don't seem worth worrying about,
1043 # since they can't be pushed/pulled, and --hidden can be used if they are a
1042 # since they can't be pushed/pulled, and --hidden can be used if they are a
1044 # concern.
1043 # concern.
1045
1044
1046 # pathto() is needed for -R case
1045 # pathto() is needed for -R case
1047 revs = repo.revs("filelog(%s)",
1046 revs = repo.revs("filelog(%s)",
1048 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
1047 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
1049
1048
1050 if revs:
1049 if revs:
1051 repo.ui.status(_('checking subrepo links\n'))
1050 repo.ui.status(_('checking subrepo links\n'))
1052 for rev in revs:
1051 for rev in revs:
1053 ctx = repo[rev]
1052 ctx = repo[rev]
1054 try:
1053 try:
1055 for subpath in ctx.substate:
1054 for subpath in ctx.substate:
1056 try:
1055 try:
1057 ret = (ctx.sub(subpath, allowcreate=False).verify()
1056 ret = (ctx.sub(subpath, allowcreate=False).verify()
1058 or ret)
1057 or ret)
1059 except error.RepoError as e:
1058 except error.RepoError as e:
1060 repo.ui.warn(('%d: %s\n') % (rev, e))
1059 repo.ui.warn(('%d: %s\n') % (rev, e))
1061 except Exception:
1060 except Exception:
1062 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
1061 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
1063 node.short(ctx.node()))
1062 node.short(ctx.node()))
1064
1063
1065 return ret
1064 return ret
1066
1065
1067 def remoteui(src, opts):
1066 def remoteui(src, opts):
1068 'build a remote ui from ui or repo and opts'
1067 'build a remote ui from ui or repo and opts'
1069 if util.safehasattr(src, 'baseui'): # looks like a repository
1068 if util.safehasattr(src, 'baseui'): # looks like a repository
1070 dst = src.baseui.copy() # drop repo-specific config
1069 dst = src.baseui.copy() # drop repo-specific config
1071 src = src.ui # copy target options from repo
1070 src = src.ui # copy target options from repo
1072 else: # assume it's a global ui object
1071 else: # assume it's a global ui object
1073 dst = src.copy() # keep all global options
1072 dst = src.copy() # keep all global options
1074
1073
1075 # copy ssh-specific options
1074 # copy ssh-specific options
1076 for o in 'ssh', 'remotecmd':
1075 for o in 'ssh', 'remotecmd':
1077 v = opts.get(o) or src.config('ui', o)
1076 v = opts.get(o) or src.config('ui', o)
1078 if v:
1077 if v:
1079 dst.setconfig("ui", o, v, 'copied')
1078 dst.setconfig("ui", o, v, 'copied')
1080
1079
1081 # copy bundle-specific options
1080 # copy bundle-specific options
1082 r = src.config('bundle', 'mainreporoot')
1081 r = src.config('bundle', 'mainreporoot')
1083 if r:
1082 if r:
1084 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
1083 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
1085
1084
1086 # copy selected local settings to the remote ui
1085 # copy selected local settings to the remote ui
1087 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
1086 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
1088 for key, val in src.configitems(sect):
1087 for key, val in src.configitems(sect):
1089 dst.setconfig(sect, key, val, 'copied')
1088 dst.setconfig(sect, key, val, 'copied')
1090 v = src.config('web', 'cacerts')
1089 v = src.config('web', 'cacerts')
1091 if v:
1090 if v:
1092 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
1091 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
1093
1092
1094 return dst
1093 return dst
1095
1094
1096 # Files of interest
1095 # Files of interest
1097 # Used to check if the repository has changed looking at mtime and size of
1096 # Used to check if the repository has changed looking at mtime and size of
1098 # these files.
1097 # these files.
1099 foi = [('spath', '00changelog.i'),
1098 foi = [('spath', '00changelog.i'),
1100 ('spath', 'phaseroots'), # ! phase can change content at the same size
1099 ('spath', 'phaseroots'), # ! phase can change content at the same size
1101 ('spath', 'obsstore'),
1100 ('spath', 'obsstore'),
1102 ('path', 'bookmarks'), # ! bookmark can change content at the same size
1101 ('path', 'bookmarks'), # ! bookmark can change content at the same size
1103 ]
1102 ]
1104
1103
1105 class cachedlocalrepo(object):
1104 class cachedlocalrepo(object):
1106 """Holds a localrepository that can be cached and reused."""
1105 """Holds a localrepository that can be cached and reused."""
1107
1106
1108 def __init__(self, repo):
1107 def __init__(self, repo):
1109 """Create a new cached repo from an existing repo.
1108 """Create a new cached repo from an existing repo.
1110
1109
1111 We assume the passed in repo was recently created. If the
1110 We assume the passed in repo was recently created. If the
1112 repo has changed between when it was created and when it was
1111 repo has changed between when it was created and when it was
1113 turned into a cache, it may not refresh properly.
1112 turned into a cache, it may not refresh properly.
1114 """
1113 """
1115 assert isinstance(repo, localrepo.localrepository)
1114 assert isinstance(repo, localrepo.localrepository)
1116 self._repo = repo
1115 self._repo = repo
1117 self._state, self.mtime = self._repostate()
1116 self._state, self.mtime = self._repostate()
1118 self._filtername = repo.filtername
1117 self._filtername = repo.filtername
1119
1118
1120 def fetch(self):
1119 def fetch(self):
1121 """Refresh (if necessary) and return a repository.
1120 """Refresh (if necessary) and return a repository.
1122
1121
1123 If the cached instance is out of date, it will be recreated
1122 If the cached instance is out of date, it will be recreated
1124 automatically and returned.
1123 automatically and returned.
1125
1124
1126 Returns a tuple of the repo and a boolean indicating whether a new
1125 Returns a tuple of the repo and a boolean indicating whether a new
1127 repo instance was created.
1126 repo instance was created.
1128 """
1127 """
1129 # We compare the mtimes and sizes of some well-known files to
1128 # We compare the mtimes and sizes of some well-known files to
1130 # determine if the repo changed. This is not precise, as mtimes
1129 # determine if the repo changed. This is not precise, as mtimes
1131 # are susceptible to clock skew and imprecise filesystems and
1130 # are susceptible to clock skew and imprecise filesystems and
1132 # file content can change while maintaining the same size.
1131 # file content can change while maintaining the same size.
1133
1132
1134 state, mtime = self._repostate()
1133 state, mtime = self._repostate()
1135 if state == self._state:
1134 if state == self._state:
1136 return self._repo, False
1135 return self._repo, False
1137
1136
1138 repo = repository(self._repo.baseui, self._repo.url())
1137 repo = repository(self._repo.baseui, self._repo.url())
1139 if self._filtername:
1138 if self._filtername:
1140 self._repo = repo.filtered(self._filtername)
1139 self._repo = repo.filtered(self._filtername)
1141 else:
1140 else:
1142 self._repo = repo.unfiltered()
1141 self._repo = repo.unfiltered()
1143 self._state = state
1142 self._state = state
1144 self.mtime = mtime
1143 self.mtime = mtime
1145
1144
1146 return self._repo, True
1145 return self._repo, True
1147
1146
1148 def _repostate(self):
1147 def _repostate(self):
1149 state = []
1148 state = []
1150 maxmtime = -1
1149 maxmtime = -1
1151 for attr, fname in foi:
1150 for attr, fname in foi:
1152 prefix = getattr(self._repo, attr)
1151 prefix = getattr(self._repo, attr)
1153 p = os.path.join(prefix, fname)
1152 p = os.path.join(prefix, fname)
1154 try:
1153 try:
1155 st = os.stat(p)
1154 st = os.stat(p)
1156 except OSError:
1155 except OSError:
1157 st = os.stat(prefix)
1156 st = os.stat(prefix)
1158 state.append((st[stat.ST_MTIME], st.st_size))
1157 state.append((st[stat.ST_MTIME], st.st_size))
1159 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1158 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1160
1159
1161 return tuple(state), maxmtime
1160 return tuple(state), maxmtime
1162
1161
1163 def copy(self):
1162 def copy(self):
1164 """Obtain a copy of this class instance.
1163 """Obtain a copy of this class instance.
1165
1164
1166 A new localrepository instance is obtained. The new instance should be
1165 A new localrepository instance is obtained. The new instance should be
1167 completely independent of the original.
1166 completely independent of the original.
1168 """
1167 """
1169 repo = repository(self._repo.baseui, self._repo.origroot)
1168 repo = repository(self._repo.baseui, self._repo.origroot)
1170 if self._filtername:
1169 if self._filtername:
1171 repo = repo.filtered(self._filtername)
1170 repo = repo.filtered(self._filtername)
1172 else:
1171 else:
1173 repo = repo.unfiltered()
1172 repo = repo.unfiltered()
1174 c = cachedlocalrepo(repo)
1173 c = cachedlocalrepo(repo)
1175 c._state = self._state
1174 c._state = self._state
1176 c.mtime = self.mtime
1175 c.mtime = self.mtime
1177 return c
1176 return c
General Comments 0
You need to be logged in to leave comments. Login now