##// END OF EJS Templates
outgoing: avoid repo.lookup() for converting revnum to nodeid...
Martin von Zweigbergk -
r37329:70c52800 default
parent child Browse files
Show More
@@ -1,1143 +1,1143
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13 import os
13 import os
14 import shutil
14 import shutil
15 import stat
15 import stat
16
16
17 from .i18n import _
17 from .i18n import _
18 from .node import (
18 from .node import (
19 nullid,
19 nullid,
20 )
20 )
21
21
22 from . import (
22 from . import (
23 bookmarks,
23 bookmarks,
24 bundlerepo,
24 bundlerepo,
25 cacheutil,
25 cacheutil,
26 cmdutil,
26 cmdutil,
27 destutil,
27 destutil,
28 discovery,
28 discovery,
29 error,
29 error,
30 exchange,
30 exchange,
31 extensions,
31 extensions,
32 httppeer,
32 httppeer,
33 localrepo,
33 localrepo,
34 lock,
34 lock,
35 logcmdutil,
35 logcmdutil,
36 logexchange,
36 logexchange,
37 merge as mergemod,
37 merge as mergemod,
38 node,
38 node,
39 phases,
39 phases,
40 scmutil,
40 scmutil,
41 sshpeer,
41 sshpeer,
42 statichttprepo,
42 statichttprepo,
43 ui as uimod,
43 ui as uimod,
44 unionrepo,
44 unionrepo,
45 url,
45 url,
46 util,
46 util,
47 verify as verifymod,
47 verify as verifymod,
48 vfs as vfsmod,
48 vfs as vfsmod,
49 )
49 )
50
50
51 from .utils import (
51 from .utils import (
52 stringutil,
52 stringutil,
53 )
53 )
54
54
55 release = lock.release
55 release = lock.release
56
56
57 # shared features
57 # shared features
58 sharedbookmarks = 'bookmarks'
58 sharedbookmarks = 'bookmarks'
59
59
60 def _local(path):
60 def _local(path):
61 path = util.expandpath(util.urllocalpath(path))
61 path = util.expandpath(util.urllocalpath(path))
62 return (os.path.isfile(path) and bundlerepo or localrepo)
62 return (os.path.isfile(path) and bundlerepo or localrepo)
63
63
64 def addbranchrevs(lrepo, other, branches, revs):
64 def addbranchrevs(lrepo, other, branches, revs):
65 peer = other.peer() # a courtesy to callers using a localrepo for other
65 peer = other.peer() # a courtesy to callers using a localrepo for other
66 hashbranch, branches = branches
66 hashbranch, branches = branches
67 if not hashbranch and not branches:
67 if not hashbranch and not branches:
68 x = revs or None
68 x = revs or None
69 if util.safehasattr(revs, 'first'):
69 if util.safehasattr(revs, 'first'):
70 y = revs.first()
70 y = revs.first()
71 elif revs:
71 elif revs:
72 y = revs[0]
72 y = revs[0]
73 else:
73 else:
74 y = None
74 y = None
75 return x, y
75 return x, y
76 if revs:
76 if revs:
77 revs = list(revs)
77 revs = list(revs)
78 else:
78 else:
79 revs = []
79 revs = []
80
80
81 if not peer.capable('branchmap'):
81 if not peer.capable('branchmap'):
82 if branches:
82 if branches:
83 raise error.Abort(_("remote branch lookup not supported"))
83 raise error.Abort(_("remote branch lookup not supported"))
84 revs.append(hashbranch)
84 revs.append(hashbranch)
85 return revs, revs[0]
85 return revs, revs[0]
86 branchmap = peer.branchmap()
86 branchmap = peer.branchmap()
87
87
88 def primary(branch):
88 def primary(branch):
89 if branch == '.':
89 if branch == '.':
90 if not lrepo:
90 if not lrepo:
91 raise error.Abort(_("dirstate branch not accessible"))
91 raise error.Abort(_("dirstate branch not accessible"))
92 branch = lrepo.dirstate.branch()
92 branch = lrepo.dirstate.branch()
93 if branch in branchmap:
93 if branch in branchmap:
94 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
94 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
95 return True
95 return True
96 else:
96 else:
97 return False
97 return False
98
98
99 for branch in branches:
99 for branch in branches:
100 if not primary(branch):
100 if not primary(branch):
101 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
101 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
102 if hashbranch:
102 if hashbranch:
103 if not primary(hashbranch):
103 if not primary(hashbranch):
104 revs.append(hashbranch)
104 revs.append(hashbranch)
105 return revs, revs[0]
105 return revs, revs[0]
106
106
107 def parseurl(path, branches=None):
107 def parseurl(path, branches=None):
108 '''parse url#branch, returning (url, (branch, branches))'''
108 '''parse url#branch, returning (url, (branch, branches))'''
109
109
110 u = util.url(path)
110 u = util.url(path)
111 branch = None
111 branch = None
112 if u.fragment:
112 if u.fragment:
113 branch = u.fragment
113 branch = u.fragment
114 u.fragment = None
114 u.fragment = None
115 return bytes(u), (branch, branches or [])
115 return bytes(u), (branch, branches or [])
116
116
117 schemes = {
117 schemes = {
118 'bundle': bundlerepo,
118 'bundle': bundlerepo,
119 'union': unionrepo,
119 'union': unionrepo,
120 'file': _local,
120 'file': _local,
121 'http': httppeer,
121 'http': httppeer,
122 'https': httppeer,
122 'https': httppeer,
123 'ssh': sshpeer,
123 'ssh': sshpeer,
124 'static-http': statichttprepo,
124 'static-http': statichttprepo,
125 }
125 }
126
126
127 def _peerlookup(path):
127 def _peerlookup(path):
128 u = util.url(path)
128 u = util.url(path)
129 scheme = u.scheme or 'file'
129 scheme = u.scheme or 'file'
130 thing = schemes.get(scheme) or schemes['file']
130 thing = schemes.get(scheme) or schemes['file']
131 try:
131 try:
132 return thing(path)
132 return thing(path)
133 except TypeError:
133 except TypeError:
134 # we can't test callable(thing) because 'thing' can be an unloaded
134 # we can't test callable(thing) because 'thing' can be an unloaded
135 # module that implements __call__
135 # module that implements __call__
136 if not util.safehasattr(thing, 'instance'):
136 if not util.safehasattr(thing, 'instance'):
137 raise
137 raise
138 return thing
138 return thing
139
139
140 def islocal(repo):
140 def islocal(repo):
141 '''return true if repo (or path pointing to repo) is local'''
141 '''return true if repo (or path pointing to repo) is local'''
142 if isinstance(repo, bytes):
142 if isinstance(repo, bytes):
143 try:
143 try:
144 return _peerlookup(repo).islocal(repo)
144 return _peerlookup(repo).islocal(repo)
145 except AttributeError:
145 except AttributeError:
146 return False
146 return False
147 return repo.local()
147 return repo.local()
148
148
149 def openpath(ui, path):
149 def openpath(ui, path):
150 '''open path with open if local, url.open if remote'''
150 '''open path with open if local, url.open if remote'''
151 pathurl = util.url(path, parsequery=False, parsefragment=False)
151 pathurl = util.url(path, parsequery=False, parsefragment=False)
152 if pathurl.islocal():
152 if pathurl.islocal():
153 return util.posixfile(pathurl.localpath(), 'rb')
153 return util.posixfile(pathurl.localpath(), 'rb')
154 else:
154 else:
155 return url.open(ui, path)
155 return url.open(ui, path)
156
156
157 # a list of (ui, repo) functions called for wire peer initialization
157 # a list of (ui, repo) functions called for wire peer initialization
158 wirepeersetupfuncs = []
158 wirepeersetupfuncs = []
159
159
160 def _peerorrepo(ui, path, create=False, presetupfuncs=None):
160 def _peerorrepo(ui, path, create=False, presetupfuncs=None):
161 """return a repository object for the specified path"""
161 """return a repository object for the specified path"""
162 obj = _peerlookup(path).instance(ui, path, create)
162 obj = _peerlookup(path).instance(ui, path, create)
163 ui = getattr(obj, "ui", ui)
163 ui = getattr(obj, "ui", ui)
164 for f in presetupfuncs or []:
164 for f in presetupfuncs or []:
165 f(ui, obj)
165 f(ui, obj)
166 for name, module in extensions.extensions(ui):
166 for name, module in extensions.extensions(ui):
167 hook = getattr(module, 'reposetup', None)
167 hook = getattr(module, 'reposetup', None)
168 if hook:
168 if hook:
169 hook(ui, obj)
169 hook(ui, obj)
170 if not obj.local():
170 if not obj.local():
171 for f in wirepeersetupfuncs:
171 for f in wirepeersetupfuncs:
172 f(ui, obj)
172 f(ui, obj)
173 return obj
173 return obj
174
174
175 def repository(ui, path='', create=False, presetupfuncs=None):
175 def repository(ui, path='', create=False, presetupfuncs=None):
176 """return a repository object for the specified path"""
176 """return a repository object for the specified path"""
177 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs)
177 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs)
178 repo = peer.local()
178 repo = peer.local()
179 if not repo:
179 if not repo:
180 raise error.Abort(_("repository '%s' is not local") %
180 raise error.Abort(_("repository '%s' is not local") %
181 (path or peer.url()))
181 (path or peer.url()))
182 return repo.filtered('visible')
182 return repo.filtered('visible')
183
183
184 def peer(uiorrepo, opts, path, create=False):
184 def peer(uiorrepo, opts, path, create=False):
185 '''return a repository peer for the specified path'''
185 '''return a repository peer for the specified path'''
186 rui = remoteui(uiorrepo, opts)
186 rui = remoteui(uiorrepo, opts)
187 return _peerorrepo(rui, path, create).peer()
187 return _peerorrepo(rui, path, create).peer()
188
188
189 def defaultdest(source):
189 def defaultdest(source):
190 '''return default destination of clone if none is given
190 '''return default destination of clone if none is given
191
191
192 >>> defaultdest(b'foo')
192 >>> defaultdest(b'foo')
193 'foo'
193 'foo'
194 >>> defaultdest(b'/foo/bar')
194 >>> defaultdest(b'/foo/bar')
195 'bar'
195 'bar'
196 >>> defaultdest(b'/')
196 >>> defaultdest(b'/')
197 ''
197 ''
198 >>> defaultdest(b'')
198 >>> defaultdest(b'')
199 ''
199 ''
200 >>> defaultdest(b'http://example.org/')
200 >>> defaultdest(b'http://example.org/')
201 ''
201 ''
202 >>> defaultdest(b'http://example.org/foo/')
202 >>> defaultdest(b'http://example.org/foo/')
203 'foo'
203 'foo'
204 '''
204 '''
205 path = util.url(source).path
205 path = util.url(source).path
206 if not path:
206 if not path:
207 return ''
207 return ''
208 return os.path.basename(os.path.normpath(path))
208 return os.path.basename(os.path.normpath(path))
209
209
210 def sharedreposource(repo):
210 def sharedreposource(repo):
211 """Returns repository object for source repository of a shared repo.
211 """Returns repository object for source repository of a shared repo.
212
212
213 If repo is not a shared repository, returns None.
213 If repo is not a shared repository, returns None.
214 """
214 """
215 if repo.sharedpath == repo.path:
215 if repo.sharedpath == repo.path:
216 return None
216 return None
217
217
218 if util.safehasattr(repo, 'srcrepo') and repo.srcrepo:
218 if util.safehasattr(repo, 'srcrepo') and repo.srcrepo:
219 return repo.srcrepo
219 return repo.srcrepo
220
220
221 # the sharedpath always ends in the .hg; we want the path to the repo
221 # the sharedpath always ends in the .hg; we want the path to the repo
222 source = repo.vfs.split(repo.sharedpath)[0]
222 source = repo.vfs.split(repo.sharedpath)[0]
223 srcurl, branches = parseurl(source)
223 srcurl, branches = parseurl(source)
224 srcrepo = repository(repo.ui, srcurl)
224 srcrepo = repository(repo.ui, srcurl)
225 repo.srcrepo = srcrepo
225 repo.srcrepo = srcrepo
226 return srcrepo
226 return srcrepo
227
227
228 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
228 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
229 relative=False):
229 relative=False):
230 '''create a shared repository'''
230 '''create a shared repository'''
231
231
232 if not islocal(source):
232 if not islocal(source):
233 raise error.Abort(_('can only share local repositories'))
233 raise error.Abort(_('can only share local repositories'))
234
234
235 if not dest:
235 if not dest:
236 dest = defaultdest(source)
236 dest = defaultdest(source)
237 else:
237 else:
238 dest = ui.expandpath(dest)
238 dest = ui.expandpath(dest)
239
239
240 if isinstance(source, bytes):
240 if isinstance(source, bytes):
241 origsource = ui.expandpath(source)
241 origsource = ui.expandpath(source)
242 source, branches = parseurl(origsource)
242 source, branches = parseurl(origsource)
243 srcrepo = repository(ui, source)
243 srcrepo = repository(ui, source)
244 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
244 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
245 else:
245 else:
246 srcrepo = source.local()
246 srcrepo = source.local()
247 origsource = source = srcrepo.url()
247 origsource = source = srcrepo.url()
248 checkout = None
248 checkout = None
249
249
250 sharedpath = srcrepo.sharedpath # if our source is already sharing
250 sharedpath = srcrepo.sharedpath # if our source is already sharing
251
251
252 destwvfs = vfsmod.vfs(dest, realpath=True)
252 destwvfs = vfsmod.vfs(dest, realpath=True)
253 destvfs = vfsmod.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
253 destvfs = vfsmod.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
254
254
255 if destvfs.lexists():
255 if destvfs.lexists():
256 raise error.Abort(_('destination already exists'))
256 raise error.Abort(_('destination already exists'))
257
257
258 if not destwvfs.isdir():
258 if not destwvfs.isdir():
259 destwvfs.mkdir()
259 destwvfs.mkdir()
260 destvfs.makedir()
260 destvfs.makedir()
261
261
262 requirements = ''
262 requirements = ''
263 try:
263 try:
264 requirements = srcrepo.vfs.read('requires')
264 requirements = srcrepo.vfs.read('requires')
265 except IOError as inst:
265 except IOError as inst:
266 if inst.errno != errno.ENOENT:
266 if inst.errno != errno.ENOENT:
267 raise
267 raise
268
268
269 if relative:
269 if relative:
270 try:
270 try:
271 sharedpath = os.path.relpath(sharedpath, destvfs.base)
271 sharedpath = os.path.relpath(sharedpath, destvfs.base)
272 requirements += 'relshared\n'
272 requirements += 'relshared\n'
273 except (IOError, ValueError) as e:
273 except (IOError, ValueError) as e:
274 # ValueError is raised on Windows if the drive letters differ on
274 # ValueError is raised on Windows if the drive letters differ on
275 # each path
275 # each path
276 raise error.Abort(_('cannot calculate relative path'),
276 raise error.Abort(_('cannot calculate relative path'),
277 hint=stringutil.forcebytestr(e))
277 hint=stringutil.forcebytestr(e))
278 else:
278 else:
279 requirements += 'shared\n'
279 requirements += 'shared\n'
280
280
281 destvfs.write('requires', requirements)
281 destvfs.write('requires', requirements)
282 destvfs.write('sharedpath', sharedpath)
282 destvfs.write('sharedpath', sharedpath)
283
283
284 r = repository(ui, destwvfs.base)
284 r = repository(ui, destwvfs.base)
285 postshare(srcrepo, r, bookmarks=bookmarks, defaultpath=defaultpath)
285 postshare(srcrepo, r, bookmarks=bookmarks, defaultpath=defaultpath)
286 _postshareupdate(r, update, checkout=checkout)
286 _postshareupdate(r, update, checkout=checkout)
287 return r
287 return r
288
288
289 def unshare(ui, repo):
289 def unshare(ui, repo):
290 """convert a shared repository to a normal one
290 """convert a shared repository to a normal one
291
291
292 Copy the store data to the repo and remove the sharedpath data.
292 Copy the store data to the repo and remove the sharedpath data.
293 """
293 """
294
294
295 destlock = lock = None
295 destlock = lock = None
296 lock = repo.lock()
296 lock = repo.lock()
297 try:
297 try:
298 # we use locks here because if we race with commit, we
298 # we use locks here because if we race with commit, we
299 # can end up with extra data in the cloned revlogs that's
299 # can end up with extra data in the cloned revlogs that's
300 # not pointed to by changesets, thus causing verify to
300 # not pointed to by changesets, thus causing verify to
301 # fail
301 # fail
302
302
303 destlock = copystore(ui, repo, repo.path)
303 destlock = copystore(ui, repo, repo.path)
304
304
305 sharefile = repo.vfs.join('sharedpath')
305 sharefile = repo.vfs.join('sharedpath')
306 util.rename(sharefile, sharefile + '.old')
306 util.rename(sharefile, sharefile + '.old')
307
307
308 repo.requirements.discard('shared')
308 repo.requirements.discard('shared')
309 repo.requirements.discard('relshared')
309 repo.requirements.discard('relshared')
310 repo._writerequirements()
310 repo._writerequirements()
311 finally:
311 finally:
312 destlock and destlock.release()
312 destlock and destlock.release()
313 lock and lock.release()
313 lock and lock.release()
314
314
315 # update store, spath, svfs and sjoin of repo
315 # update store, spath, svfs and sjoin of repo
316 repo.unfiltered().__init__(repo.baseui, repo.root)
316 repo.unfiltered().__init__(repo.baseui, repo.root)
317
317
318 # TODO: figure out how to access subrepos that exist, but were previously
318 # TODO: figure out how to access subrepos that exist, but were previously
319 # removed from .hgsub
319 # removed from .hgsub
320 c = repo['.']
320 c = repo['.']
321 subs = c.substate
321 subs = c.substate
322 for s in sorted(subs):
322 for s in sorted(subs):
323 c.sub(s).unshare()
323 c.sub(s).unshare()
324
324
325 def postshare(sourcerepo, destrepo, bookmarks=True, defaultpath=None):
325 def postshare(sourcerepo, destrepo, bookmarks=True, defaultpath=None):
326 """Called after a new shared repo is created.
326 """Called after a new shared repo is created.
327
327
328 The new repo only has a requirements file and pointer to the source.
328 The new repo only has a requirements file and pointer to the source.
329 This function configures additional shared data.
329 This function configures additional shared data.
330
330
331 Extensions can wrap this function and write additional entries to
331 Extensions can wrap this function and write additional entries to
332 destrepo/.hg/shared to indicate additional pieces of data to be shared.
332 destrepo/.hg/shared to indicate additional pieces of data to be shared.
333 """
333 """
334 default = defaultpath or sourcerepo.ui.config('paths', 'default')
334 default = defaultpath or sourcerepo.ui.config('paths', 'default')
335 if default:
335 if default:
336 template = ('[paths]\n'
336 template = ('[paths]\n'
337 'default = %s\n')
337 'default = %s\n')
338 destrepo.vfs.write('hgrc', util.tonativeeol(template % default))
338 destrepo.vfs.write('hgrc', util.tonativeeol(template % default))
339
339
340 with destrepo.wlock():
340 with destrepo.wlock():
341 if bookmarks:
341 if bookmarks:
342 destrepo.vfs.write('shared', sharedbookmarks + '\n')
342 destrepo.vfs.write('shared', sharedbookmarks + '\n')
343
343
344 def _postshareupdate(repo, update, checkout=None):
344 def _postshareupdate(repo, update, checkout=None):
345 """Maybe perform a working directory update after a shared repo is created.
345 """Maybe perform a working directory update after a shared repo is created.
346
346
347 ``update`` can be a boolean or a revision to update to.
347 ``update`` can be a boolean or a revision to update to.
348 """
348 """
349 if not update:
349 if not update:
350 return
350 return
351
351
352 repo.ui.status(_("updating working directory\n"))
352 repo.ui.status(_("updating working directory\n"))
353 if update is not True:
353 if update is not True:
354 checkout = update
354 checkout = update
355 for test in (checkout, 'default', 'tip'):
355 for test in (checkout, 'default', 'tip'):
356 if test is None:
356 if test is None:
357 continue
357 continue
358 try:
358 try:
359 uprev = repo.lookup(test)
359 uprev = repo.lookup(test)
360 break
360 break
361 except error.RepoLookupError:
361 except error.RepoLookupError:
362 continue
362 continue
363 _update(repo, uprev)
363 _update(repo, uprev)
364
364
365 def copystore(ui, srcrepo, destpath):
365 def copystore(ui, srcrepo, destpath):
366 '''copy files from store of srcrepo in destpath
366 '''copy files from store of srcrepo in destpath
367
367
368 returns destlock
368 returns destlock
369 '''
369 '''
370 destlock = None
370 destlock = None
371 try:
371 try:
372 hardlink = None
372 hardlink = None
373 num = 0
373 num = 0
374 closetopic = [None]
374 closetopic = [None]
375 def prog(topic, pos):
375 def prog(topic, pos):
376 if pos is None:
376 if pos is None:
377 closetopic[0] = topic
377 closetopic[0] = topic
378 else:
378 else:
379 ui.progress(topic, pos + num)
379 ui.progress(topic, pos + num)
380 srcpublishing = srcrepo.publishing()
380 srcpublishing = srcrepo.publishing()
381 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
381 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
382 dstvfs = vfsmod.vfs(destpath)
382 dstvfs = vfsmod.vfs(destpath)
383 for f in srcrepo.store.copylist():
383 for f in srcrepo.store.copylist():
384 if srcpublishing and f.endswith('phaseroots'):
384 if srcpublishing and f.endswith('phaseroots'):
385 continue
385 continue
386 dstbase = os.path.dirname(f)
386 dstbase = os.path.dirname(f)
387 if dstbase and not dstvfs.exists(dstbase):
387 if dstbase and not dstvfs.exists(dstbase):
388 dstvfs.mkdir(dstbase)
388 dstvfs.mkdir(dstbase)
389 if srcvfs.exists(f):
389 if srcvfs.exists(f):
390 if f.endswith('data'):
390 if f.endswith('data'):
391 # 'dstbase' may be empty (e.g. revlog format 0)
391 # 'dstbase' may be empty (e.g. revlog format 0)
392 lockfile = os.path.join(dstbase, "lock")
392 lockfile = os.path.join(dstbase, "lock")
393 # lock to avoid premature writing to the target
393 # lock to avoid premature writing to the target
394 destlock = lock.lock(dstvfs, lockfile)
394 destlock = lock.lock(dstvfs, lockfile)
395 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
395 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
396 hardlink, progress=prog)
396 hardlink, progress=prog)
397 num += n
397 num += n
398 if hardlink:
398 if hardlink:
399 ui.debug("linked %d files\n" % num)
399 ui.debug("linked %d files\n" % num)
400 if closetopic[0]:
400 if closetopic[0]:
401 ui.progress(closetopic[0], None)
401 ui.progress(closetopic[0], None)
402 else:
402 else:
403 ui.debug("copied %d files\n" % num)
403 ui.debug("copied %d files\n" % num)
404 if closetopic[0]:
404 if closetopic[0]:
405 ui.progress(closetopic[0], None)
405 ui.progress(closetopic[0], None)
406 return destlock
406 return destlock
407 except: # re-raises
407 except: # re-raises
408 release(destlock)
408 release(destlock)
409 raise
409 raise
410
410
411 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
411 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
412 rev=None, update=True, stream=False):
412 rev=None, update=True, stream=False):
413 """Perform a clone using a shared repo.
413 """Perform a clone using a shared repo.
414
414
415 The store for the repository will be located at <sharepath>/.hg. The
415 The store for the repository will be located at <sharepath>/.hg. The
416 specified revisions will be cloned or pulled from "source". A shared repo
416 specified revisions will be cloned or pulled from "source". A shared repo
417 will be created at "dest" and a working copy will be created if "update" is
417 will be created at "dest" and a working copy will be created if "update" is
418 True.
418 True.
419 """
419 """
420 revs = None
420 revs = None
421 if rev:
421 if rev:
422 if not srcpeer.capable('lookup'):
422 if not srcpeer.capable('lookup'):
423 raise error.Abort(_("src repository does not support "
423 raise error.Abort(_("src repository does not support "
424 "revision lookup and so doesn't "
424 "revision lookup and so doesn't "
425 "support clone by revision"))
425 "support clone by revision"))
426 revs = [srcpeer.lookup(r) for r in rev]
426 revs = [srcpeer.lookup(r) for r in rev]
427
427
428 # Obtain a lock before checking for or cloning the pooled repo otherwise
428 # Obtain a lock before checking for or cloning the pooled repo otherwise
429 # 2 clients may race creating or populating it.
429 # 2 clients may race creating or populating it.
430 pooldir = os.path.dirname(sharepath)
430 pooldir = os.path.dirname(sharepath)
431 # lock class requires the directory to exist.
431 # lock class requires the directory to exist.
432 try:
432 try:
433 util.makedir(pooldir, False)
433 util.makedir(pooldir, False)
434 except OSError as e:
434 except OSError as e:
435 if e.errno != errno.EEXIST:
435 if e.errno != errno.EEXIST:
436 raise
436 raise
437
437
438 poolvfs = vfsmod.vfs(pooldir)
438 poolvfs = vfsmod.vfs(pooldir)
439 basename = os.path.basename(sharepath)
439 basename = os.path.basename(sharepath)
440
440
441 with lock.lock(poolvfs, '%s.lock' % basename):
441 with lock.lock(poolvfs, '%s.lock' % basename):
442 if os.path.exists(sharepath):
442 if os.path.exists(sharepath):
443 ui.status(_('(sharing from existing pooled repository %s)\n') %
443 ui.status(_('(sharing from existing pooled repository %s)\n') %
444 basename)
444 basename)
445 else:
445 else:
446 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
446 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
447 # Always use pull mode because hardlinks in share mode don't work
447 # Always use pull mode because hardlinks in share mode don't work
448 # well. Never update because working copies aren't necessary in
448 # well. Never update because working copies aren't necessary in
449 # share mode.
449 # share mode.
450 clone(ui, peeropts, source, dest=sharepath, pull=True,
450 clone(ui, peeropts, source, dest=sharepath, pull=True,
451 revs=rev, update=False, stream=stream)
451 revs=rev, update=False, stream=stream)
452
452
453 # Resolve the value to put in [paths] section for the source.
453 # Resolve the value to put in [paths] section for the source.
454 if islocal(source):
454 if islocal(source):
455 defaultpath = os.path.abspath(util.urllocalpath(source))
455 defaultpath = os.path.abspath(util.urllocalpath(source))
456 else:
456 else:
457 defaultpath = source
457 defaultpath = source
458
458
459 sharerepo = repository(ui, path=sharepath)
459 sharerepo = repository(ui, path=sharepath)
460 share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
460 share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
461 defaultpath=defaultpath)
461 defaultpath=defaultpath)
462
462
463 # We need to perform a pull against the dest repo to fetch bookmarks
463 # We need to perform a pull against the dest repo to fetch bookmarks
464 # and other non-store data that isn't shared by default. In the case of
464 # and other non-store data that isn't shared by default. In the case of
465 # non-existing shared repo, this means we pull from the remote twice. This
465 # non-existing shared repo, this means we pull from the remote twice. This
466 # is a bit weird. But at the time it was implemented, there wasn't an easy
466 # is a bit weird. But at the time it was implemented, there wasn't an easy
467 # way to pull just non-changegroup data.
467 # way to pull just non-changegroup data.
468 destrepo = repository(ui, path=dest)
468 destrepo = repository(ui, path=dest)
469 exchange.pull(destrepo, srcpeer, heads=revs)
469 exchange.pull(destrepo, srcpeer, heads=revs)
470
470
471 _postshareupdate(destrepo, update)
471 _postshareupdate(destrepo, update)
472
472
473 return srcpeer, peer(ui, peeropts, dest)
473 return srcpeer, peer(ui, peeropts, dest)
474
474
475 # Recomputing branch cache might be slow on big repos,
475 # Recomputing branch cache might be slow on big repos,
476 # so just copy it
476 # so just copy it
477 def _copycache(srcrepo, dstcachedir, fname):
477 def _copycache(srcrepo, dstcachedir, fname):
478 """copy a cache from srcrepo to destcachedir (if it exists)"""
478 """copy a cache from srcrepo to destcachedir (if it exists)"""
479 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
479 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
480 dstbranchcache = os.path.join(dstcachedir, fname)
480 dstbranchcache = os.path.join(dstcachedir, fname)
481 if os.path.exists(srcbranchcache):
481 if os.path.exists(srcbranchcache):
482 if not os.path.exists(dstcachedir):
482 if not os.path.exists(dstcachedir):
483 os.mkdir(dstcachedir)
483 os.mkdir(dstcachedir)
484 util.copyfile(srcbranchcache, dstbranchcache)
484 util.copyfile(srcbranchcache, dstbranchcache)
485
485
486 def clone(ui, peeropts, source, dest=None, pull=False, revs=None,
486 def clone(ui, peeropts, source, dest=None, pull=False, revs=None,
487 update=True, stream=False, branch=None, shareopts=None):
487 update=True, stream=False, branch=None, shareopts=None):
488 """Make a copy of an existing repository.
488 """Make a copy of an existing repository.
489
489
490 Create a copy of an existing repository in a new directory. The
490 Create a copy of an existing repository in a new directory. The
491 source and destination are URLs, as passed to the repository
491 source and destination are URLs, as passed to the repository
492 function. Returns a pair of repository peers, the source and
492 function. Returns a pair of repository peers, the source and
493 newly created destination.
493 newly created destination.
494
494
495 The location of the source is added to the new repository's
495 The location of the source is added to the new repository's
496 .hg/hgrc file, as the default to be used for future pulls and
496 .hg/hgrc file, as the default to be used for future pulls and
497 pushes.
497 pushes.
498
498
499 If an exception is raised, the partly cloned/updated destination
499 If an exception is raised, the partly cloned/updated destination
500 repository will be deleted.
500 repository will be deleted.
501
501
502 Arguments:
502 Arguments:
503
503
504 source: repository object or URL
504 source: repository object or URL
505
505
506 dest: URL of destination repository to create (defaults to base
506 dest: URL of destination repository to create (defaults to base
507 name of source repository)
507 name of source repository)
508
508
509 pull: always pull from source repository, even in local case or if the
509 pull: always pull from source repository, even in local case or if the
510 server prefers streaming
510 server prefers streaming
511
511
512 stream: stream raw data uncompressed from repository (fast over
512 stream: stream raw data uncompressed from repository (fast over
513 LAN, slow over WAN)
513 LAN, slow over WAN)
514
514
515 revs: revision to clone up to (implies pull=True)
515 revs: revision to clone up to (implies pull=True)
516
516
517 update: update working directory after clone completes, if
517 update: update working directory after clone completes, if
518 destination is local repository (True means update to default rev,
518 destination is local repository (True means update to default rev,
519 anything else is treated as a revision)
519 anything else is treated as a revision)
520
520
521 branch: branches to clone
521 branch: branches to clone
522
522
523 shareopts: dict of options to control auto sharing behavior. The "pool" key
523 shareopts: dict of options to control auto sharing behavior. The "pool" key
524 activates auto sharing mode and defines the directory for stores. The
524 activates auto sharing mode and defines the directory for stores. The
525 "mode" key determines how to construct the directory name of the shared
525 "mode" key determines how to construct the directory name of the shared
526 repository. "identity" means the name is derived from the node of the first
526 repository. "identity" means the name is derived from the node of the first
527 changeset in the repository. "remote" means the name is derived from the
527 changeset in the repository. "remote" means the name is derived from the
528 remote's path/URL. Defaults to "identity."
528 remote's path/URL. Defaults to "identity."
529 """
529 """
530
530
531 if isinstance(source, bytes):
531 if isinstance(source, bytes):
532 origsource = ui.expandpath(source)
532 origsource = ui.expandpath(source)
533 source, branches = parseurl(origsource, branch)
533 source, branches = parseurl(origsource, branch)
534 srcpeer = peer(ui, peeropts, source)
534 srcpeer = peer(ui, peeropts, source)
535 else:
535 else:
536 srcpeer = source.peer() # in case we were called with a localrepo
536 srcpeer = source.peer() # in case we were called with a localrepo
537 branches = (None, branch or [])
537 branches = (None, branch or [])
538 origsource = source = srcpeer.url()
538 origsource = source = srcpeer.url()
539 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
539 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
540
540
541 if dest is None:
541 if dest is None:
542 dest = defaultdest(source)
542 dest = defaultdest(source)
543 if dest:
543 if dest:
544 ui.status(_("destination directory: %s\n") % dest)
544 ui.status(_("destination directory: %s\n") % dest)
545 else:
545 else:
546 dest = ui.expandpath(dest)
546 dest = ui.expandpath(dest)
547
547
548 dest = util.urllocalpath(dest)
548 dest = util.urllocalpath(dest)
549 source = util.urllocalpath(source)
549 source = util.urllocalpath(source)
550
550
551 if not dest:
551 if not dest:
552 raise error.Abort(_("empty destination path is not valid"))
552 raise error.Abort(_("empty destination path is not valid"))
553
553
554 destvfs = vfsmod.vfs(dest, expandpath=True)
554 destvfs = vfsmod.vfs(dest, expandpath=True)
555 if destvfs.lexists():
555 if destvfs.lexists():
556 if not destvfs.isdir():
556 if not destvfs.isdir():
557 raise error.Abort(_("destination '%s' already exists") % dest)
557 raise error.Abort(_("destination '%s' already exists") % dest)
558 elif destvfs.listdir():
558 elif destvfs.listdir():
559 raise error.Abort(_("destination '%s' is not empty") % dest)
559 raise error.Abort(_("destination '%s' is not empty") % dest)
560
560
561 shareopts = shareopts or {}
561 shareopts = shareopts or {}
562 sharepool = shareopts.get('pool')
562 sharepool = shareopts.get('pool')
563 sharenamemode = shareopts.get('mode')
563 sharenamemode = shareopts.get('mode')
564 if sharepool and islocal(dest):
564 if sharepool and islocal(dest):
565 sharepath = None
565 sharepath = None
566 if sharenamemode == 'identity':
566 if sharenamemode == 'identity':
567 # Resolve the name from the initial changeset in the remote
567 # Resolve the name from the initial changeset in the remote
568 # repository. This returns nullid when the remote is empty. It
568 # repository. This returns nullid when the remote is empty. It
569 # raises RepoLookupError if revision 0 is filtered or otherwise
569 # raises RepoLookupError if revision 0 is filtered or otherwise
570 # not available. If we fail to resolve, sharing is not enabled.
570 # not available. If we fail to resolve, sharing is not enabled.
571 try:
571 try:
572 rootnode = srcpeer.lookup('0')
572 rootnode = srcpeer.lookup('0')
573 if rootnode != node.nullid:
573 if rootnode != node.nullid:
574 sharepath = os.path.join(sharepool, node.hex(rootnode))
574 sharepath = os.path.join(sharepool, node.hex(rootnode))
575 else:
575 else:
576 ui.status(_('(not using pooled storage: '
576 ui.status(_('(not using pooled storage: '
577 'remote appears to be empty)\n'))
577 'remote appears to be empty)\n'))
578 except error.RepoLookupError:
578 except error.RepoLookupError:
579 ui.status(_('(not using pooled storage: '
579 ui.status(_('(not using pooled storage: '
580 'unable to resolve identity of remote)\n'))
580 'unable to resolve identity of remote)\n'))
581 elif sharenamemode == 'remote':
581 elif sharenamemode == 'remote':
582 sharepath = os.path.join(
582 sharepath = os.path.join(
583 sharepool, node.hex(hashlib.sha1(source).digest()))
583 sharepool, node.hex(hashlib.sha1(source).digest()))
584 else:
584 else:
585 raise error.Abort(_('unknown share naming mode: %s') %
585 raise error.Abort(_('unknown share naming mode: %s') %
586 sharenamemode)
586 sharenamemode)
587
587
588 if sharepath:
588 if sharepath:
589 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
589 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
590 dest, pull=pull, rev=revs, update=update,
590 dest, pull=pull, rev=revs, update=update,
591 stream=stream)
591 stream=stream)
592
592
593 srclock = destlock = cleandir = None
593 srclock = destlock = cleandir = None
594 srcrepo = srcpeer.local()
594 srcrepo = srcpeer.local()
595 try:
595 try:
596 abspath = origsource
596 abspath = origsource
597 if islocal(origsource):
597 if islocal(origsource):
598 abspath = os.path.abspath(util.urllocalpath(origsource))
598 abspath = os.path.abspath(util.urllocalpath(origsource))
599
599
600 if islocal(dest):
600 if islocal(dest):
601 cleandir = dest
601 cleandir = dest
602
602
603 copy = False
603 copy = False
604 if (srcrepo and srcrepo.cancopy() and islocal(dest)
604 if (srcrepo and srcrepo.cancopy() and islocal(dest)
605 and not phases.hassecret(srcrepo)):
605 and not phases.hassecret(srcrepo)):
606 copy = not pull and not revs
606 copy = not pull and not revs
607
607
608 if copy:
608 if copy:
609 try:
609 try:
610 # we use a lock here because if we race with commit, we
610 # we use a lock here because if we race with commit, we
611 # can end up with extra data in the cloned revlogs that's
611 # can end up with extra data in the cloned revlogs that's
612 # not pointed to by changesets, thus causing verify to
612 # not pointed to by changesets, thus causing verify to
613 # fail
613 # fail
614 srclock = srcrepo.lock(wait=False)
614 srclock = srcrepo.lock(wait=False)
615 except error.LockError:
615 except error.LockError:
616 copy = False
616 copy = False
617
617
618 if copy:
618 if copy:
619 srcrepo.hook('preoutgoing', throw=True, source='clone')
619 srcrepo.hook('preoutgoing', throw=True, source='clone')
620 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
620 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
621 if not os.path.exists(dest):
621 if not os.path.exists(dest):
622 os.mkdir(dest)
622 os.mkdir(dest)
623 else:
623 else:
624 # only clean up directories we create ourselves
624 # only clean up directories we create ourselves
625 cleandir = hgdir
625 cleandir = hgdir
626 try:
626 try:
627 destpath = hgdir
627 destpath = hgdir
628 util.makedir(destpath, notindexed=True)
628 util.makedir(destpath, notindexed=True)
629 except OSError as inst:
629 except OSError as inst:
630 if inst.errno == errno.EEXIST:
630 if inst.errno == errno.EEXIST:
631 cleandir = None
631 cleandir = None
632 raise error.Abort(_("destination '%s' already exists")
632 raise error.Abort(_("destination '%s' already exists")
633 % dest)
633 % dest)
634 raise
634 raise
635
635
636 destlock = copystore(ui, srcrepo, destpath)
636 destlock = copystore(ui, srcrepo, destpath)
637 # copy bookmarks over
637 # copy bookmarks over
638 srcbookmarks = srcrepo.vfs.join('bookmarks')
638 srcbookmarks = srcrepo.vfs.join('bookmarks')
639 dstbookmarks = os.path.join(destpath, 'bookmarks')
639 dstbookmarks = os.path.join(destpath, 'bookmarks')
640 if os.path.exists(srcbookmarks):
640 if os.path.exists(srcbookmarks):
641 util.copyfile(srcbookmarks, dstbookmarks)
641 util.copyfile(srcbookmarks, dstbookmarks)
642
642
643 dstcachedir = os.path.join(destpath, 'cache')
643 dstcachedir = os.path.join(destpath, 'cache')
644 for cache in cacheutil.cachetocopy(srcrepo):
644 for cache in cacheutil.cachetocopy(srcrepo):
645 _copycache(srcrepo, dstcachedir, cache)
645 _copycache(srcrepo, dstcachedir, cache)
646
646
647 # we need to re-init the repo after manually copying the data
647 # we need to re-init the repo after manually copying the data
648 # into it
648 # into it
649 destpeer = peer(srcrepo, peeropts, dest)
649 destpeer = peer(srcrepo, peeropts, dest)
650 srcrepo.hook('outgoing', source='clone',
650 srcrepo.hook('outgoing', source='clone',
651 node=node.hex(node.nullid))
651 node=node.hex(node.nullid))
652 else:
652 else:
653 try:
653 try:
654 destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
654 destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
655 # only pass ui when no srcrepo
655 # only pass ui when no srcrepo
656 except OSError as inst:
656 except OSError as inst:
657 if inst.errno == errno.EEXIST:
657 if inst.errno == errno.EEXIST:
658 cleandir = None
658 cleandir = None
659 raise error.Abort(_("destination '%s' already exists")
659 raise error.Abort(_("destination '%s' already exists")
660 % dest)
660 % dest)
661 raise
661 raise
662
662
663 if revs:
663 if revs:
664 if not srcpeer.capable('lookup'):
664 if not srcpeer.capable('lookup'):
665 raise error.Abort(_("src repository does not support "
665 raise error.Abort(_("src repository does not support "
666 "revision lookup and so doesn't "
666 "revision lookup and so doesn't "
667 "support clone by revision"))
667 "support clone by revision"))
668 revs = [srcpeer.lookup(r) for r in revs]
668 revs = [srcpeer.lookup(r) for r in revs]
669 checkout = revs[0]
669 checkout = revs[0]
670 else:
670 else:
671 revs = None
671 revs = None
672 local = destpeer.local()
672 local = destpeer.local()
673 if local:
673 if local:
674 u = util.url(abspath)
674 u = util.url(abspath)
675 defaulturl = bytes(u)
675 defaulturl = bytes(u)
676 local.ui.setconfig('paths', 'default', defaulturl, 'clone')
676 local.ui.setconfig('paths', 'default', defaulturl, 'clone')
677 if not stream:
677 if not stream:
678 if pull:
678 if pull:
679 stream = False
679 stream = False
680 else:
680 else:
681 stream = None
681 stream = None
682 # internal config: ui.quietbookmarkmove
682 # internal config: ui.quietbookmarkmove
683 overrides = {('ui', 'quietbookmarkmove'): True}
683 overrides = {('ui', 'quietbookmarkmove'): True}
684 with local.ui.configoverride(overrides, 'clone'):
684 with local.ui.configoverride(overrides, 'clone'):
685 exchange.pull(local, srcpeer, revs,
685 exchange.pull(local, srcpeer, revs,
686 streamclonerequested=stream)
686 streamclonerequested=stream)
687 elif srcrepo:
687 elif srcrepo:
688 exchange.push(srcrepo, destpeer, revs=revs,
688 exchange.push(srcrepo, destpeer, revs=revs,
689 bookmarks=srcrepo._bookmarks.keys())
689 bookmarks=srcrepo._bookmarks.keys())
690 else:
690 else:
691 raise error.Abort(_("clone from remote to remote not supported")
691 raise error.Abort(_("clone from remote to remote not supported")
692 )
692 )
693
693
694 cleandir = None
694 cleandir = None
695
695
696 destrepo = destpeer.local()
696 destrepo = destpeer.local()
697 if destrepo:
697 if destrepo:
698 template = uimod.samplehgrcs['cloned']
698 template = uimod.samplehgrcs['cloned']
699 u = util.url(abspath)
699 u = util.url(abspath)
700 u.passwd = None
700 u.passwd = None
701 defaulturl = bytes(u)
701 defaulturl = bytes(u)
702 destrepo.vfs.write('hgrc', util.tonativeeol(template % defaulturl))
702 destrepo.vfs.write('hgrc', util.tonativeeol(template % defaulturl))
703 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
703 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
704
704
705 if ui.configbool('experimental', 'remotenames'):
705 if ui.configbool('experimental', 'remotenames'):
706 logexchange.pullremotenames(destrepo, srcpeer)
706 logexchange.pullremotenames(destrepo, srcpeer)
707
707
708 if update:
708 if update:
709 if update is not True:
709 if update is not True:
710 checkout = srcpeer.lookup(update)
710 checkout = srcpeer.lookup(update)
711 uprev = None
711 uprev = None
712 status = None
712 status = None
713 if checkout is not None:
713 if checkout is not None:
714 try:
714 try:
715 uprev = destrepo.lookup(checkout)
715 uprev = destrepo.lookup(checkout)
716 except error.RepoLookupError:
716 except error.RepoLookupError:
717 if update is not True:
717 if update is not True:
718 try:
718 try:
719 uprev = destrepo.lookup(update)
719 uprev = destrepo.lookup(update)
720 except error.RepoLookupError:
720 except error.RepoLookupError:
721 pass
721 pass
722 if uprev is None:
722 if uprev is None:
723 try:
723 try:
724 uprev = destrepo._bookmarks['@']
724 uprev = destrepo._bookmarks['@']
725 update = '@'
725 update = '@'
726 bn = destrepo[uprev].branch()
726 bn = destrepo[uprev].branch()
727 if bn == 'default':
727 if bn == 'default':
728 status = _("updating to bookmark @\n")
728 status = _("updating to bookmark @\n")
729 else:
729 else:
730 status = (_("updating to bookmark @ on branch %s\n")
730 status = (_("updating to bookmark @ on branch %s\n")
731 % bn)
731 % bn)
732 except KeyError:
732 except KeyError:
733 try:
733 try:
734 uprev = destrepo.branchtip('default')
734 uprev = destrepo.branchtip('default')
735 except error.RepoLookupError:
735 except error.RepoLookupError:
736 uprev = destrepo.lookup('tip')
736 uprev = destrepo.lookup('tip')
737 if not status:
737 if not status:
738 bn = destrepo[uprev].branch()
738 bn = destrepo[uprev].branch()
739 status = _("updating to branch %s\n") % bn
739 status = _("updating to branch %s\n") % bn
740 destrepo.ui.status(status)
740 destrepo.ui.status(status)
741 _update(destrepo, uprev)
741 _update(destrepo, uprev)
742 if update in destrepo._bookmarks:
742 if update in destrepo._bookmarks:
743 bookmarks.activate(destrepo, update)
743 bookmarks.activate(destrepo, update)
744 finally:
744 finally:
745 release(srclock, destlock)
745 release(srclock, destlock)
746 if cleandir is not None:
746 if cleandir is not None:
747 shutil.rmtree(cleandir, True)
747 shutil.rmtree(cleandir, True)
748 if srcpeer is not None:
748 if srcpeer is not None:
749 srcpeer.close()
749 srcpeer.close()
750 return srcpeer, destpeer
750 return srcpeer, destpeer
751
751
752 def _showstats(repo, stats, quietempty=False):
752 def _showstats(repo, stats, quietempty=False):
753 if quietempty and stats.isempty():
753 if quietempty and stats.isempty():
754 return
754 return
755 repo.ui.status(_("%d files updated, %d files merged, "
755 repo.ui.status(_("%d files updated, %d files merged, "
756 "%d files removed, %d files unresolved\n") % (
756 "%d files removed, %d files unresolved\n") % (
757 stats.updatedcount, stats.mergedcount,
757 stats.updatedcount, stats.mergedcount,
758 stats.removedcount, stats.unresolvedcount))
758 stats.removedcount, stats.unresolvedcount))
759
759
760 def updaterepo(repo, node, overwrite, updatecheck=None):
760 def updaterepo(repo, node, overwrite, updatecheck=None):
761 """Update the working directory to node.
761 """Update the working directory to node.
762
762
763 When overwrite is set, changes are clobbered, merged else
763 When overwrite is set, changes are clobbered, merged else
764
764
765 returns stats (see pydoc mercurial.merge.applyupdates)"""
765 returns stats (see pydoc mercurial.merge.applyupdates)"""
766 return mergemod.update(repo, node, False, overwrite,
766 return mergemod.update(repo, node, False, overwrite,
767 labels=['working copy', 'destination'],
767 labels=['working copy', 'destination'],
768 updatecheck=updatecheck)
768 updatecheck=updatecheck)
769
769
770 def update(repo, node, quietempty=False, updatecheck=None):
770 def update(repo, node, quietempty=False, updatecheck=None):
771 """update the working directory to node"""
771 """update the working directory to node"""
772 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
772 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
773 _showstats(repo, stats, quietempty)
773 _showstats(repo, stats, quietempty)
774 if stats.unresolvedcount:
774 if stats.unresolvedcount:
775 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
775 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
776 return stats.unresolvedcount > 0
776 return stats.unresolvedcount > 0
777
777
778 # naming conflict in clone()
778 # naming conflict in clone()
779 _update = update
779 _update = update
780
780
781 def clean(repo, node, show_stats=True, quietempty=False):
781 def clean(repo, node, show_stats=True, quietempty=False):
782 """forcibly switch the working directory to node, clobbering changes"""
782 """forcibly switch the working directory to node, clobbering changes"""
783 stats = updaterepo(repo, node, True)
783 stats = updaterepo(repo, node, True)
784 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
784 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
785 if show_stats:
785 if show_stats:
786 _showstats(repo, stats, quietempty)
786 _showstats(repo, stats, quietempty)
787 return stats.unresolvedcount > 0
787 return stats.unresolvedcount > 0
788
788
789 # naming conflict in updatetotally()
789 # naming conflict in updatetotally()
790 _clean = clean
790 _clean = clean
791
791
792 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
792 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
793 """Update the working directory with extra care for non-file components
793 """Update the working directory with extra care for non-file components
794
794
795 This takes care of non-file components below:
795 This takes care of non-file components below:
796
796
797 :bookmark: might be advanced or (in)activated
797 :bookmark: might be advanced or (in)activated
798
798
799 This takes arguments below:
799 This takes arguments below:
800
800
801 :checkout: to which revision the working directory is updated
801 :checkout: to which revision the working directory is updated
802 :brev: a name, which might be a bookmark to be activated after updating
802 :brev: a name, which might be a bookmark to be activated after updating
803 :clean: whether changes in the working directory can be discarded
803 :clean: whether changes in the working directory can be discarded
804 :updatecheck: how to deal with a dirty working directory
804 :updatecheck: how to deal with a dirty working directory
805
805
806 Valid values for updatecheck are (None => linear):
806 Valid values for updatecheck are (None => linear):
807
807
808 * abort: abort if the working directory is dirty
808 * abort: abort if the working directory is dirty
809 * none: don't check (merge working directory changes into destination)
809 * none: don't check (merge working directory changes into destination)
810 * linear: check that update is linear before merging working directory
810 * linear: check that update is linear before merging working directory
811 changes into destination
811 changes into destination
812 * noconflict: check that the update does not result in file merges
812 * noconflict: check that the update does not result in file merges
813
813
814 This returns whether conflict is detected at updating or not.
814 This returns whether conflict is detected at updating or not.
815 """
815 """
816 if updatecheck is None:
816 if updatecheck is None:
817 updatecheck = ui.config('commands', 'update.check')
817 updatecheck = ui.config('commands', 'update.check')
818 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
818 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
819 # If not configured, or invalid value configured
819 # If not configured, or invalid value configured
820 updatecheck = 'linear'
820 updatecheck = 'linear'
821 with repo.wlock():
821 with repo.wlock():
822 movemarkfrom = None
822 movemarkfrom = None
823 warndest = False
823 warndest = False
824 if checkout is None:
824 if checkout is None:
825 updata = destutil.destupdate(repo, clean=clean)
825 updata = destutil.destupdate(repo, clean=clean)
826 checkout, movemarkfrom, brev = updata
826 checkout, movemarkfrom, brev = updata
827 warndest = True
827 warndest = True
828
828
829 if clean:
829 if clean:
830 ret = _clean(repo, checkout)
830 ret = _clean(repo, checkout)
831 else:
831 else:
832 if updatecheck == 'abort':
832 if updatecheck == 'abort':
833 cmdutil.bailifchanged(repo, merge=False)
833 cmdutil.bailifchanged(repo, merge=False)
834 updatecheck = 'none'
834 updatecheck = 'none'
835 ret = _update(repo, checkout, updatecheck=updatecheck)
835 ret = _update(repo, checkout, updatecheck=updatecheck)
836
836
837 if not ret and movemarkfrom:
837 if not ret and movemarkfrom:
838 if movemarkfrom == repo['.'].node():
838 if movemarkfrom == repo['.'].node():
839 pass # no-op update
839 pass # no-op update
840 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
840 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
841 b = ui.label(repo._activebookmark, 'bookmarks.active')
841 b = ui.label(repo._activebookmark, 'bookmarks.active')
842 ui.status(_("updating bookmark %s\n") % b)
842 ui.status(_("updating bookmark %s\n") % b)
843 else:
843 else:
844 # this can happen with a non-linear update
844 # this can happen with a non-linear update
845 b = ui.label(repo._activebookmark, 'bookmarks')
845 b = ui.label(repo._activebookmark, 'bookmarks')
846 ui.status(_("(leaving bookmark %s)\n") % b)
846 ui.status(_("(leaving bookmark %s)\n") % b)
847 bookmarks.deactivate(repo)
847 bookmarks.deactivate(repo)
848 elif brev in repo._bookmarks:
848 elif brev in repo._bookmarks:
849 if brev != repo._activebookmark:
849 if brev != repo._activebookmark:
850 b = ui.label(brev, 'bookmarks.active')
850 b = ui.label(brev, 'bookmarks.active')
851 ui.status(_("(activating bookmark %s)\n") % b)
851 ui.status(_("(activating bookmark %s)\n") % b)
852 bookmarks.activate(repo, brev)
852 bookmarks.activate(repo, brev)
853 elif brev:
853 elif brev:
854 if repo._activebookmark:
854 if repo._activebookmark:
855 b = ui.label(repo._activebookmark, 'bookmarks')
855 b = ui.label(repo._activebookmark, 'bookmarks')
856 ui.status(_("(leaving bookmark %s)\n") % b)
856 ui.status(_("(leaving bookmark %s)\n") % b)
857 bookmarks.deactivate(repo)
857 bookmarks.deactivate(repo)
858
858
859 if warndest:
859 if warndest:
860 destutil.statusotherdests(ui, repo)
860 destutil.statusotherdests(ui, repo)
861
861
862 return ret
862 return ret
863
863
864 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None,
864 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None,
865 abort=False):
865 abort=False):
866 """Branch merge with node, resolving changes. Return true if any
866 """Branch merge with node, resolving changes. Return true if any
867 unresolved conflicts."""
867 unresolved conflicts."""
868 if not abort:
868 if not abort:
869 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce,
869 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce,
870 labels=labels)
870 labels=labels)
871 else:
871 else:
872 ms = mergemod.mergestate.read(repo)
872 ms = mergemod.mergestate.read(repo)
873 if ms.active():
873 if ms.active():
874 # there were conflicts
874 # there were conflicts
875 node = ms.localctx.hex()
875 node = ms.localctx.hex()
876 else:
876 else:
877 # there were no conficts, mergestate was not stored
877 # there were no conficts, mergestate was not stored
878 node = repo['.'].hex()
878 node = repo['.'].hex()
879
879
880 repo.ui.status(_("aborting the merge, updating back to"
880 repo.ui.status(_("aborting the merge, updating back to"
881 " %s\n") % node[:12])
881 " %s\n") % node[:12])
882 stats = mergemod.update(repo, node, branchmerge=False, force=True,
882 stats = mergemod.update(repo, node, branchmerge=False, force=True,
883 labels=labels)
883 labels=labels)
884
884
885 _showstats(repo, stats)
885 _showstats(repo, stats)
886 if stats.unresolvedcount:
886 if stats.unresolvedcount:
887 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
887 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
888 "or 'hg merge --abort' to abandon\n"))
888 "or 'hg merge --abort' to abandon\n"))
889 elif remind and not abort:
889 elif remind and not abort:
890 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
890 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
891 return stats.unresolvedcount > 0
891 return stats.unresolvedcount > 0
892
892
893 def _incoming(displaychlist, subreporecurse, ui, repo, source,
893 def _incoming(displaychlist, subreporecurse, ui, repo, source,
894 opts, buffered=False):
894 opts, buffered=False):
895 """
895 """
896 Helper for incoming / gincoming.
896 Helper for incoming / gincoming.
897 displaychlist gets called with
897 displaychlist gets called with
898 (remoterepo, incomingchangesetlist, displayer) parameters,
898 (remoterepo, incomingchangesetlist, displayer) parameters,
899 and is supposed to contain only code that can't be unified.
899 and is supposed to contain only code that can't be unified.
900 """
900 """
901 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
901 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
902 other = peer(repo, opts, source)
902 other = peer(repo, opts, source)
903 ui.status(_('comparing with %s\n') % util.hidepassword(source))
903 ui.status(_('comparing with %s\n') % util.hidepassword(source))
904 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
904 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
905
905
906 if revs:
906 if revs:
907 revs = [other.lookup(rev) for rev in revs]
907 revs = [other.lookup(rev) for rev in revs]
908 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
908 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
909 revs, opts["bundle"], opts["force"])
909 revs, opts["bundle"], opts["force"])
910 try:
910 try:
911 if not chlist:
911 if not chlist:
912 ui.status(_("no changes found\n"))
912 ui.status(_("no changes found\n"))
913 return subreporecurse()
913 return subreporecurse()
914 ui.pager('incoming')
914 ui.pager('incoming')
915 displayer = logcmdutil.changesetdisplayer(ui, other, opts,
915 displayer = logcmdutil.changesetdisplayer(ui, other, opts,
916 buffered=buffered)
916 buffered=buffered)
917 displaychlist(other, chlist, displayer)
917 displaychlist(other, chlist, displayer)
918 displayer.close()
918 displayer.close()
919 finally:
919 finally:
920 cleanupfn()
920 cleanupfn()
921 subreporecurse()
921 subreporecurse()
922 return 0 # exit code is zero since we found incoming changes
922 return 0 # exit code is zero since we found incoming changes
923
923
924 def incoming(ui, repo, source, opts):
924 def incoming(ui, repo, source, opts):
925 def subreporecurse():
925 def subreporecurse():
926 ret = 1
926 ret = 1
927 if opts.get('subrepos'):
927 if opts.get('subrepos'):
928 ctx = repo[None]
928 ctx = repo[None]
929 for subpath in sorted(ctx.substate):
929 for subpath in sorted(ctx.substate):
930 sub = ctx.sub(subpath)
930 sub = ctx.sub(subpath)
931 ret = min(ret, sub.incoming(ui, source, opts))
931 ret = min(ret, sub.incoming(ui, source, opts))
932 return ret
932 return ret
933
933
934 def display(other, chlist, displayer):
934 def display(other, chlist, displayer):
935 limit = logcmdutil.getlimit(opts)
935 limit = logcmdutil.getlimit(opts)
936 if opts.get('newest_first'):
936 if opts.get('newest_first'):
937 chlist.reverse()
937 chlist.reverse()
938 count = 0
938 count = 0
939 for n in chlist:
939 for n in chlist:
940 if limit is not None and count >= limit:
940 if limit is not None and count >= limit:
941 break
941 break
942 parents = [p for p in other.changelog.parents(n) if p != nullid]
942 parents = [p for p in other.changelog.parents(n) if p != nullid]
943 if opts.get('no_merges') and len(parents) == 2:
943 if opts.get('no_merges') and len(parents) == 2:
944 continue
944 continue
945 count += 1
945 count += 1
946 displayer.show(other[n])
946 displayer.show(other[n])
947 return _incoming(display, subreporecurse, ui, repo, source, opts)
947 return _incoming(display, subreporecurse, ui, repo, source, opts)
948
948
949 def _outgoing(ui, repo, dest, opts):
949 def _outgoing(ui, repo, dest, opts):
950 path = ui.paths.getpath(dest, default=('default-push', 'default'))
950 path = ui.paths.getpath(dest, default=('default-push', 'default'))
951 if not path:
951 if not path:
952 raise error.Abort(_('default repository not configured!'),
952 raise error.Abort(_('default repository not configured!'),
953 hint=_("see 'hg help config.paths'"))
953 hint=_("see 'hg help config.paths'"))
954 dest = path.pushloc or path.loc
954 dest = path.pushloc or path.loc
955 branches = path.branch, opts.get('branch') or []
955 branches = path.branch, opts.get('branch') or []
956
956
957 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
957 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
958 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
958 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
959 if revs:
959 if revs:
960 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
960 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
961
961
962 other = peer(repo, opts, dest)
962 other = peer(repo, opts, dest)
963 outgoing = discovery.findcommonoutgoing(repo, other, revs,
963 outgoing = discovery.findcommonoutgoing(repo, other, revs,
964 force=opts.get('force'))
964 force=opts.get('force'))
965 o = outgoing.missing
965 o = outgoing.missing
966 if not o:
966 if not o:
967 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
967 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
968 return o, other
968 return o, other
969
969
970 def outgoing(ui, repo, dest, opts):
970 def outgoing(ui, repo, dest, opts):
971 def recurse():
971 def recurse():
972 ret = 1
972 ret = 1
973 if opts.get('subrepos'):
973 if opts.get('subrepos'):
974 ctx = repo[None]
974 ctx = repo[None]
975 for subpath in sorted(ctx.substate):
975 for subpath in sorted(ctx.substate):
976 sub = ctx.sub(subpath)
976 sub = ctx.sub(subpath)
977 ret = min(ret, sub.outgoing(ui, dest, opts))
977 ret = min(ret, sub.outgoing(ui, dest, opts))
978 return ret
978 return ret
979
979
980 limit = logcmdutil.getlimit(opts)
980 limit = logcmdutil.getlimit(opts)
981 o, other = _outgoing(ui, repo, dest, opts)
981 o, other = _outgoing(ui, repo, dest, opts)
982 if not o:
982 if not o:
983 cmdutil.outgoinghooks(ui, repo, other, opts, o)
983 cmdutil.outgoinghooks(ui, repo, other, opts, o)
984 return recurse()
984 return recurse()
985
985
986 if opts.get('newest_first'):
986 if opts.get('newest_first'):
987 o.reverse()
987 o.reverse()
988 ui.pager('outgoing')
988 ui.pager('outgoing')
989 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
989 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
990 count = 0
990 count = 0
991 for n in o:
991 for n in o:
992 if limit is not None and count >= limit:
992 if limit is not None and count >= limit:
993 break
993 break
994 parents = [p for p in repo.changelog.parents(n) if p != nullid]
994 parents = [p for p in repo.changelog.parents(n) if p != nullid]
995 if opts.get('no_merges') and len(parents) == 2:
995 if opts.get('no_merges') and len(parents) == 2:
996 continue
996 continue
997 count += 1
997 count += 1
998 displayer.show(repo[n])
998 displayer.show(repo[n])
999 displayer.close()
999 displayer.close()
1000 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1000 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1001 recurse()
1001 recurse()
1002 return 0 # exit code is zero since we found outgoing changes
1002 return 0 # exit code is zero since we found outgoing changes
1003
1003
1004 def verify(repo):
1004 def verify(repo):
1005 """verify the consistency of a repository"""
1005 """verify the consistency of a repository"""
1006 ret = verifymod.verify(repo)
1006 ret = verifymod.verify(repo)
1007
1007
1008 # Broken subrepo references in hidden csets don't seem worth worrying about,
1008 # Broken subrepo references in hidden csets don't seem worth worrying about,
1009 # since they can't be pushed/pulled, and --hidden can be used if they are a
1009 # since they can't be pushed/pulled, and --hidden can be used if they are a
1010 # concern.
1010 # concern.
1011
1011
1012 # pathto() is needed for -R case
1012 # pathto() is needed for -R case
1013 revs = repo.revs("filelog(%s)",
1013 revs = repo.revs("filelog(%s)",
1014 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
1014 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
1015
1015
1016 if revs:
1016 if revs:
1017 repo.ui.status(_('checking subrepo links\n'))
1017 repo.ui.status(_('checking subrepo links\n'))
1018 for rev in revs:
1018 for rev in revs:
1019 ctx = repo[rev]
1019 ctx = repo[rev]
1020 try:
1020 try:
1021 for subpath in ctx.substate:
1021 for subpath in ctx.substate:
1022 try:
1022 try:
1023 ret = (ctx.sub(subpath, allowcreate=False).verify()
1023 ret = (ctx.sub(subpath, allowcreate=False).verify()
1024 or ret)
1024 or ret)
1025 except error.RepoError as e:
1025 except error.RepoError as e:
1026 repo.ui.warn(('%s: %s\n') % (rev, e))
1026 repo.ui.warn(('%s: %s\n') % (rev, e))
1027 except Exception:
1027 except Exception:
1028 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
1028 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
1029 node.short(ctx.node()))
1029 node.short(ctx.node()))
1030
1030
1031 return ret
1031 return ret
1032
1032
1033 def remoteui(src, opts):
1033 def remoteui(src, opts):
1034 'build a remote ui from ui or repo and opts'
1034 'build a remote ui from ui or repo and opts'
1035 if util.safehasattr(src, 'baseui'): # looks like a repository
1035 if util.safehasattr(src, 'baseui'): # looks like a repository
1036 dst = src.baseui.copy() # drop repo-specific config
1036 dst = src.baseui.copy() # drop repo-specific config
1037 src = src.ui # copy target options from repo
1037 src = src.ui # copy target options from repo
1038 else: # assume it's a global ui object
1038 else: # assume it's a global ui object
1039 dst = src.copy() # keep all global options
1039 dst = src.copy() # keep all global options
1040
1040
1041 # copy ssh-specific options
1041 # copy ssh-specific options
1042 for o in 'ssh', 'remotecmd':
1042 for o in 'ssh', 'remotecmd':
1043 v = opts.get(o) or src.config('ui', o)
1043 v = opts.get(o) or src.config('ui', o)
1044 if v:
1044 if v:
1045 dst.setconfig("ui", o, v, 'copied')
1045 dst.setconfig("ui", o, v, 'copied')
1046
1046
1047 # copy bundle-specific options
1047 # copy bundle-specific options
1048 r = src.config('bundle', 'mainreporoot')
1048 r = src.config('bundle', 'mainreporoot')
1049 if r:
1049 if r:
1050 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
1050 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
1051
1051
1052 # copy selected local settings to the remote ui
1052 # copy selected local settings to the remote ui
1053 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
1053 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
1054 for key, val in src.configitems(sect):
1054 for key, val in src.configitems(sect):
1055 dst.setconfig(sect, key, val, 'copied')
1055 dst.setconfig(sect, key, val, 'copied')
1056 v = src.config('web', 'cacerts')
1056 v = src.config('web', 'cacerts')
1057 if v:
1057 if v:
1058 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
1058 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
1059
1059
1060 return dst
1060 return dst
1061
1061
1062 # Files of interest
1062 # Files of interest
1063 # Used to check if the repository has changed looking at mtime and size of
1063 # Used to check if the repository has changed looking at mtime and size of
1064 # these files.
1064 # these files.
1065 foi = [('spath', '00changelog.i'),
1065 foi = [('spath', '00changelog.i'),
1066 ('spath', 'phaseroots'), # ! phase can change content at the same size
1066 ('spath', 'phaseroots'), # ! phase can change content at the same size
1067 ('spath', 'obsstore'),
1067 ('spath', 'obsstore'),
1068 ('path', 'bookmarks'), # ! bookmark can change content at the same size
1068 ('path', 'bookmarks'), # ! bookmark can change content at the same size
1069 ]
1069 ]
1070
1070
1071 class cachedlocalrepo(object):
1071 class cachedlocalrepo(object):
1072 """Holds a localrepository that can be cached and reused."""
1072 """Holds a localrepository that can be cached and reused."""
1073
1073
1074 def __init__(self, repo):
1074 def __init__(self, repo):
1075 """Create a new cached repo from an existing repo.
1075 """Create a new cached repo from an existing repo.
1076
1076
1077 We assume the passed in repo was recently created. If the
1077 We assume the passed in repo was recently created. If the
1078 repo has changed between when it was created and when it was
1078 repo has changed between when it was created and when it was
1079 turned into a cache, it may not refresh properly.
1079 turned into a cache, it may not refresh properly.
1080 """
1080 """
1081 assert isinstance(repo, localrepo.localrepository)
1081 assert isinstance(repo, localrepo.localrepository)
1082 self._repo = repo
1082 self._repo = repo
1083 self._state, self.mtime = self._repostate()
1083 self._state, self.mtime = self._repostate()
1084 self._filtername = repo.filtername
1084 self._filtername = repo.filtername
1085
1085
1086 def fetch(self):
1086 def fetch(self):
1087 """Refresh (if necessary) and return a repository.
1087 """Refresh (if necessary) and return a repository.
1088
1088
1089 If the cached instance is out of date, it will be recreated
1089 If the cached instance is out of date, it will be recreated
1090 automatically and returned.
1090 automatically and returned.
1091
1091
1092 Returns a tuple of the repo and a boolean indicating whether a new
1092 Returns a tuple of the repo and a boolean indicating whether a new
1093 repo instance was created.
1093 repo instance was created.
1094 """
1094 """
1095 # We compare the mtimes and sizes of some well-known files to
1095 # We compare the mtimes and sizes of some well-known files to
1096 # determine if the repo changed. This is not precise, as mtimes
1096 # determine if the repo changed. This is not precise, as mtimes
1097 # are susceptible to clock skew and imprecise filesystems and
1097 # are susceptible to clock skew and imprecise filesystems and
1098 # file content can change while maintaining the same size.
1098 # file content can change while maintaining the same size.
1099
1099
1100 state, mtime = self._repostate()
1100 state, mtime = self._repostate()
1101 if state == self._state:
1101 if state == self._state:
1102 return self._repo, False
1102 return self._repo, False
1103
1103
1104 repo = repository(self._repo.baseui, self._repo.url())
1104 repo = repository(self._repo.baseui, self._repo.url())
1105 if self._filtername:
1105 if self._filtername:
1106 self._repo = repo.filtered(self._filtername)
1106 self._repo = repo.filtered(self._filtername)
1107 else:
1107 else:
1108 self._repo = repo.unfiltered()
1108 self._repo = repo.unfiltered()
1109 self._state = state
1109 self._state = state
1110 self.mtime = mtime
1110 self.mtime = mtime
1111
1111
1112 return self._repo, True
1112 return self._repo, True
1113
1113
1114 def _repostate(self):
1114 def _repostate(self):
1115 state = []
1115 state = []
1116 maxmtime = -1
1116 maxmtime = -1
1117 for attr, fname in foi:
1117 for attr, fname in foi:
1118 prefix = getattr(self._repo, attr)
1118 prefix = getattr(self._repo, attr)
1119 p = os.path.join(prefix, fname)
1119 p = os.path.join(prefix, fname)
1120 try:
1120 try:
1121 st = os.stat(p)
1121 st = os.stat(p)
1122 except OSError:
1122 except OSError:
1123 st = os.stat(prefix)
1123 st = os.stat(prefix)
1124 state.append((st[stat.ST_MTIME], st.st_size))
1124 state.append((st[stat.ST_MTIME], st.st_size))
1125 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1125 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1126
1126
1127 return tuple(state), maxmtime
1127 return tuple(state), maxmtime
1128
1128
1129 def copy(self):
1129 def copy(self):
1130 """Obtain a copy of this class instance.
1130 """Obtain a copy of this class instance.
1131
1131
1132 A new localrepository instance is obtained. The new instance should be
1132 A new localrepository instance is obtained. The new instance should be
1133 completely independent of the original.
1133 completely independent of the original.
1134 """
1134 """
1135 repo = repository(self._repo.baseui, self._repo.origroot)
1135 repo = repository(self._repo.baseui, self._repo.origroot)
1136 if self._filtername:
1136 if self._filtername:
1137 repo = repo.filtered(self._filtername)
1137 repo = repo.filtered(self._filtername)
1138 else:
1138 else:
1139 repo = repo.unfiltered()
1139 repo = repo.unfiltered()
1140 c = cachedlocalrepo(repo)
1140 c = cachedlocalrepo(repo)
1141 c._state = self._state
1141 c._state = self._state
1142 c.mtime = self.mtime
1142 c.mtime = self.mtime
1143 return c
1143 return c
General Comments 0
You need to be logged in to leave comments. Login now