##// END OF EJS Templates
caches: make 'cachetocopy' available in scmutil...
Boris Feld -
r35784:72fdd99e default
parent child Browse files
Show More
@@ -0,0 +1,21 b''
1 # scmutil.py - Mercurial core utility functions
2 #
3 # Copyright Matt Mackall <mpm@selenic.com> and other
4 #
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
7 from __future__ import absolute_import
8
9 from . import repoview
10
11 def cachetocopy(srcrepo):
12 """return the list of cache file valuable to copy during a clone"""
13 # In local clones we're copying all nodes, not just served
14 # ones. Therefore copy all branch caches over.
15 cachefiles = ['branch2']
16 cachefiles += ['branch2-%s' % f for f in repoview.filtertable]
17 cachefiles += ['rbc-names-v1', 'rbc-revs-v1']
18 cachefiles += ['tags2']
19 cachefiles += ['tags2-%s' % f for f in repoview.filtertable]
20 cachefiles += ['hgtagsfnodes1']
21 return cachefiles
@@ -1,1127 +1,1115 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13 import os
13 import os
14 import shutil
14 import shutil
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import (
17 from .node import (
18 nullid,
18 nullid,
19 )
19 )
20
20
21 from . import (
21 from . import (
22 bookmarks,
22 bookmarks,
23 bundlerepo,
23 bundlerepo,
24 cacheutil,
24 cmdutil,
25 cmdutil,
25 destutil,
26 destutil,
26 discovery,
27 discovery,
27 error,
28 error,
28 exchange,
29 exchange,
29 extensions,
30 extensions,
30 httppeer,
31 httppeer,
31 localrepo,
32 localrepo,
32 lock,
33 lock,
33 logexchange,
34 logexchange,
34 merge as mergemod,
35 merge as mergemod,
35 node,
36 node,
36 phases,
37 phases,
37 repoview,
38 scmutil,
38 scmutil,
39 sshpeer,
39 sshpeer,
40 statichttprepo,
40 statichttprepo,
41 ui as uimod,
41 ui as uimod,
42 unionrepo,
42 unionrepo,
43 url,
43 url,
44 util,
44 util,
45 verify as verifymod,
45 verify as verifymod,
46 vfs as vfsmod,
46 vfs as vfsmod,
47 )
47 )
48
48
49 release = lock.release
49 release = lock.release
50
50
51 # shared features
51 # shared features
52 sharedbookmarks = 'bookmarks'
52 sharedbookmarks = 'bookmarks'
53
53
54 def _local(path):
54 def _local(path):
55 path = util.expandpath(util.urllocalpath(path))
55 path = util.expandpath(util.urllocalpath(path))
56 return (os.path.isfile(path) and bundlerepo or localrepo)
56 return (os.path.isfile(path) and bundlerepo or localrepo)
57
57
58 def addbranchrevs(lrepo, other, branches, revs):
58 def addbranchrevs(lrepo, other, branches, revs):
59 peer = other.peer() # a courtesy to callers using a localrepo for other
59 peer = other.peer() # a courtesy to callers using a localrepo for other
60 hashbranch, branches = branches
60 hashbranch, branches = branches
61 if not hashbranch and not branches:
61 if not hashbranch and not branches:
62 x = revs or None
62 x = revs or None
63 if util.safehasattr(revs, 'first'):
63 if util.safehasattr(revs, 'first'):
64 y = revs.first()
64 y = revs.first()
65 elif revs:
65 elif revs:
66 y = revs[0]
66 y = revs[0]
67 else:
67 else:
68 y = None
68 y = None
69 return x, y
69 return x, y
70 if revs:
70 if revs:
71 revs = list(revs)
71 revs = list(revs)
72 else:
72 else:
73 revs = []
73 revs = []
74
74
75 if not peer.capable('branchmap'):
75 if not peer.capable('branchmap'):
76 if branches:
76 if branches:
77 raise error.Abort(_("remote branch lookup not supported"))
77 raise error.Abort(_("remote branch lookup not supported"))
78 revs.append(hashbranch)
78 revs.append(hashbranch)
79 return revs, revs[0]
79 return revs, revs[0]
80 branchmap = peer.branchmap()
80 branchmap = peer.branchmap()
81
81
82 def primary(branch):
82 def primary(branch):
83 if branch == '.':
83 if branch == '.':
84 if not lrepo:
84 if not lrepo:
85 raise error.Abort(_("dirstate branch not accessible"))
85 raise error.Abort(_("dirstate branch not accessible"))
86 branch = lrepo.dirstate.branch()
86 branch = lrepo.dirstate.branch()
87 if branch in branchmap:
87 if branch in branchmap:
88 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
88 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
89 return True
89 return True
90 else:
90 else:
91 return False
91 return False
92
92
93 for branch in branches:
93 for branch in branches:
94 if not primary(branch):
94 if not primary(branch):
95 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
95 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
96 if hashbranch:
96 if hashbranch:
97 if not primary(hashbranch):
97 if not primary(hashbranch):
98 revs.append(hashbranch)
98 revs.append(hashbranch)
99 return revs, revs[0]
99 return revs, revs[0]
100
100
101 def parseurl(path, branches=None):
101 def parseurl(path, branches=None):
102 '''parse url#branch, returning (url, (branch, branches))'''
102 '''parse url#branch, returning (url, (branch, branches))'''
103
103
104 u = util.url(path)
104 u = util.url(path)
105 branch = None
105 branch = None
106 if u.fragment:
106 if u.fragment:
107 branch = u.fragment
107 branch = u.fragment
108 u.fragment = None
108 u.fragment = None
109 return bytes(u), (branch, branches or [])
109 return bytes(u), (branch, branches or [])
110
110
111 schemes = {
111 schemes = {
112 'bundle': bundlerepo,
112 'bundle': bundlerepo,
113 'union': unionrepo,
113 'union': unionrepo,
114 'file': _local,
114 'file': _local,
115 'http': httppeer,
115 'http': httppeer,
116 'https': httppeer,
116 'https': httppeer,
117 'ssh': sshpeer,
117 'ssh': sshpeer,
118 'static-http': statichttprepo,
118 'static-http': statichttprepo,
119 }
119 }
120
120
121 def _peerlookup(path):
121 def _peerlookup(path):
122 u = util.url(path)
122 u = util.url(path)
123 scheme = u.scheme or 'file'
123 scheme = u.scheme or 'file'
124 thing = schemes.get(scheme) or schemes['file']
124 thing = schemes.get(scheme) or schemes['file']
125 try:
125 try:
126 return thing(path)
126 return thing(path)
127 except TypeError:
127 except TypeError:
128 # we can't test callable(thing) because 'thing' can be an unloaded
128 # we can't test callable(thing) because 'thing' can be an unloaded
129 # module that implements __call__
129 # module that implements __call__
130 if not util.safehasattr(thing, 'instance'):
130 if not util.safehasattr(thing, 'instance'):
131 raise
131 raise
132 return thing
132 return thing
133
133
134 def islocal(repo):
134 def islocal(repo):
135 '''return true if repo (or path pointing to repo) is local'''
135 '''return true if repo (or path pointing to repo) is local'''
136 if isinstance(repo, bytes):
136 if isinstance(repo, bytes):
137 try:
137 try:
138 return _peerlookup(repo).islocal(repo)
138 return _peerlookup(repo).islocal(repo)
139 except AttributeError:
139 except AttributeError:
140 return False
140 return False
141 return repo.local()
141 return repo.local()
142
142
143 def openpath(ui, path):
143 def openpath(ui, path):
144 '''open path with open if local, url.open if remote'''
144 '''open path with open if local, url.open if remote'''
145 pathurl = util.url(path, parsequery=False, parsefragment=False)
145 pathurl = util.url(path, parsequery=False, parsefragment=False)
146 if pathurl.islocal():
146 if pathurl.islocal():
147 return util.posixfile(pathurl.localpath(), 'rb')
147 return util.posixfile(pathurl.localpath(), 'rb')
148 else:
148 else:
149 return url.open(ui, path)
149 return url.open(ui, path)
150
150
151 # a list of (ui, repo) functions called for wire peer initialization
151 # a list of (ui, repo) functions called for wire peer initialization
152 wirepeersetupfuncs = []
152 wirepeersetupfuncs = []
153
153
154 def _peerorrepo(ui, path, create=False, presetupfuncs=None):
154 def _peerorrepo(ui, path, create=False, presetupfuncs=None):
155 """return a repository object for the specified path"""
155 """return a repository object for the specified path"""
156 obj = _peerlookup(path).instance(ui, path, create)
156 obj = _peerlookup(path).instance(ui, path, create)
157 ui = getattr(obj, "ui", ui)
157 ui = getattr(obj, "ui", ui)
158 for f in presetupfuncs or []:
158 for f in presetupfuncs or []:
159 f(ui, obj)
159 f(ui, obj)
160 for name, module in extensions.extensions(ui):
160 for name, module in extensions.extensions(ui):
161 hook = getattr(module, 'reposetup', None)
161 hook = getattr(module, 'reposetup', None)
162 if hook:
162 if hook:
163 hook(ui, obj)
163 hook(ui, obj)
164 if not obj.local():
164 if not obj.local():
165 for f in wirepeersetupfuncs:
165 for f in wirepeersetupfuncs:
166 f(ui, obj)
166 f(ui, obj)
167 return obj
167 return obj
168
168
169 def repository(ui, path='', create=False, presetupfuncs=None):
169 def repository(ui, path='', create=False, presetupfuncs=None):
170 """return a repository object for the specified path"""
170 """return a repository object for the specified path"""
171 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs)
171 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs)
172 repo = peer.local()
172 repo = peer.local()
173 if not repo:
173 if not repo:
174 raise error.Abort(_("repository '%s' is not local") %
174 raise error.Abort(_("repository '%s' is not local") %
175 (path or peer.url()))
175 (path or peer.url()))
176 return repo.filtered('visible')
176 return repo.filtered('visible')
177
177
178 def peer(uiorrepo, opts, path, create=False):
178 def peer(uiorrepo, opts, path, create=False):
179 '''return a repository peer for the specified path'''
179 '''return a repository peer for the specified path'''
180 rui = remoteui(uiorrepo, opts)
180 rui = remoteui(uiorrepo, opts)
181 return _peerorrepo(rui, path, create).peer()
181 return _peerorrepo(rui, path, create).peer()
182
182
183 def defaultdest(source):
183 def defaultdest(source):
184 '''return default destination of clone if none is given
184 '''return default destination of clone if none is given
185
185
186 >>> defaultdest(b'foo')
186 >>> defaultdest(b'foo')
187 'foo'
187 'foo'
188 >>> defaultdest(b'/foo/bar')
188 >>> defaultdest(b'/foo/bar')
189 'bar'
189 'bar'
190 >>> defaultdest(b'/')
190 >>> defaultdest(b'/')
191 ''
191 ''
192 >>> defaultdest(b'')
192 >>> defaultdest(b'')
193 ''
193 ''
194 >>> defaultdest(b'http://example.org/')
194 >>> defaultdest(b'http://example.org/')
195 ''
195 ''
196 >>> defaultdest(b'http://example.org/foo/')
196 >>> defaultdest(b'http://example.org/foo/')
197 'foo'
197 'foo'
198 '''
198 '''
199 path = util.url(source).path
199 path = util.url(source).path
200 if not path:
200 if not path:
201 return ''
201 return ''
202 return os.path.basename(os.path.normpath(path))
202 return os.path.basename(os.path.normpath(path))
203
203
204 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
204 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
205 relative=False):
205 relative=False):
206 '''create a shared repository'''
206 '''create a shared repository'''
207
207
208 if not islocal(source):
208 if not islocal(source):
209 raise error.Abort(_('can only share local repositories'))
209 raise error.Abort(_('can only share local repositories'))
210
210
211 if not dest:
211 if not dest:
212 dest = defaultdest(source)
212 dest = defaultdest(source)
213 else:
213 else:
214 dest = ui.expandpath(dest)
214 dest = ui.expandpath(dest)
215
215
216 if isinstance(source, str):
216 if isinstance(source, str):
217 origsource = ui.expandpath(source)
217 origsource = ui.expandpath(source)
218 source, branches = parseurl(origsource)
218 source, branches = parseurl(origsource)
219 srcrepo = repository(ui, source)
219 srcrepo = repository(ui, source)
220 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
220 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
221 else:
221 else:
222 srcrepo = source.local()
222 srcrepo = source.local()
223 origsource = source = srcrepo.url()
223 origsource = source = srcrepo.url()
224 checkout = None
224 checkout = None
225
225
226 sharedpath = srcrepo.sharedpath # if our source is already sharing
226 sharedpath = srcrepo.sharedpath # if our source is already sharing
227
227
228 destwvfs = vfsmod.vfs(dest, realpath=True)
228 destwvfs = vfsmod.vfs(dest, realpath=True)
229 destvfs = vfsmod.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
229 destvfs = vfsmod.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
230
230
231 if destvfs.lexists():
231 if destvfs.lexists():
232 raise error.Abort(_('destination already exists'))
232 raise error.Abort(_('destination already exists'))
233
233
234 if not destwvfs.isdir():
234 if not destwvfs.isdir():
235 destwvfs.mkdir()
235 destwvfs.mkdir()
236 destvfs.makedir()
236 destvfs.makedir()
237
237
238 requirements = ''
238 requirements = ''
239 try:
239 try:
240 requirements = srcrepo.vfs.read('requires')
240 requirements = srcrepo.vfs.read('requires')
241 except IOError as inst:
241 except IOError as inst:
242 if inst.errno != errno.ENOENT:
242 if inst.errno != errno.ENOENT:
243 raise
243 raise
244
244
245 if relative:
245 if relative:
246 try:
246 try:
247 sharedpath = os.path.relpath(sharedpath, destvfs.base)
247 sharedpath = os.path.relpath(sharedpath, destvfs.base)
248 requirements += 'relshared\n'
248 requirements += 'relshared\n'
249 except (IOError, ValueError) as e:
249 except (IOError, ValueError) as e:
250 # ValueError is raised on Windows if the drive letters differ on
250 # ValueError is raised on Windows if the drive letters differ on
251 # each path
251 # each path
252 raise error.Abort(_('cannot calculate relative path'),
252 raise error.Abort(_('cannot calculate relative path'),
253 hint=str(e))
253 hint=str(e))
254 else:
254 else:
255 requirements += 'shared\n'
255 requirements += 'shared\n'
256
256
257 destvfs.write('requires', requirements)
257 destvfs.write('requires', requirements)
258 destvfs.write('sharedpath', sharedpath)
258 destvfs.write('sharedpath', sharedpath)
259
259
260 r = repository(ui, destwvfs.base)
260 r = repository(ui, destwvfs.base)
261 postshare(srcrepo, r, bookmarks=bookmarks, defaultpath=defaultpath)
261 postshare(srcrepo, r, bookmarks=bookmarks, defaultpath=defaultpath)
262 _postshareupdate(r, update, checkout=checkout)
262 _postshareupdate(r, update, checkout=checkout)
263 return r
263 return r
264
264
265 def unshare(ui, repo):
265 def unshare(ui, repo):
266 """convert a shared repository to a normal one
266 """convert a shared repository to a normal one
267
267
268 Copy the store data to the repo and remove the sharedpath data.
268 Copy the store data to the repo and remove the sharedpath data.
269 """
269 """
270
270
271 destlock = lock = None
271 destlock = lock = None
272 lock = repo.lock()
272 lock = repo.lock()
273 try:
273 try:
274 # we use locks here because if we race with commit, we
274 # we use locks here because if we race with commit, we
275 # can end up with extra data in the cloned revlogs that's
275 # can end up with extra data in the cloned revlogs that's
276 # not pointed to by changesets, thus causing verify to
276 # not pointed to by changesets, thus causing verify to
277 # fail
277 # fail
278
278
279 destlock = copystore(ui, repo, repo.path)
279 destlock = copystore(ui, repo, repo.path)
280
280
281 sharefile = repo.vfs.join('sharedpath')
281 sharefile = repo.vfs.join('sharedpath')
282 util.rename(sharefile, sharefile + '.old')
282 util.rename(sharefile, sharefile + '.old')
283
283
284 repo.requirements.discard('shared')
284 repo.requirements.discard('shared')
285 repo.requirements.discard('relshared')
285 repo.requirements.discard('relshared')
286 repo._writerequirements()
286 repo._writerequirements()
287 finally:
287 finally:
288 destlock and destlock.release()
288 destlock and destlock.release()
289 lock and lock.release()
289 lock and lock.release()
290
290
291 # update store, spath, svfs and sjoin of repo
291 # update store, spath, svfs and sjoin of repo
292 repo.unfiltered().__init__(repo.baseui, repo.root)
292 repo.unfiltered().__init__(repo.baseui, repo.root)
293
293
294 # TODO: figure out how to access subrepos that exist, but were previously
294 # TODO: figure out how to access subrepos that exist, but were previously
295 # removed from .hgsub
295 # removed from .hgsub
296 c = repo['.']
296 c = repo['.']
297 subs = c.substate
297 subs = c.substate
298 for s in sorted(subs):
298 for s in sorted(subs):
299 c.sub(s).unshare()
299 c.sub(s).unshare()
300
300
301 def postshare(sourcerepo, destrepo, bookmarks=True, defaultpath=None):
301 def postshare(sourcerepo, destrepo, bookmarks=True, defaultpath=None):
302 """Called after a new shared repo is created.
302 """Called after a new shared repo is created.
303
303
304 The new repo only has a requirements file and pointer to the source.
304 The new repo only has a requirements file and pointer to the source.
305 This function configures additional shared data.
305 This function configures additional shared data.
306
306
307 Extensions can wrap this function and write additional entries to
307 Extensions can wrap this function and write additional entries to
308 destrepo/.hg/shared to indicate additional pieces of data to be shared.
308 destrepo/.hg/shared to indicate additional pieces of data to be shared.
309 """
309 """
310 default = defaultpath or sourcerepo.ui.config('paths', 'default')
310 default = defaultpath or sourcerepo.ui.config('paths', 'default')
311 if default:
311 if default:
312 template = ('[paths]\n'
312 template = ('[paths]\n'
313 'default = %s\n')
313 'default = %s\n')
314 destrepo.vfs.write('hgrc', util.tonativeeol(template % default))
314 destrepo.vfs.write('hgrc', util.tonativeeol(template % default))
315
315
316 with destrepo.wlock():
316 with destrepo.wlock():
317 if bookmarks:
317 if bookmarks:
318 destrepo.vfs.write('shared', sharedbookmarks + '\n')
318 destrepo.vfs.write('shared', sharedbookmarks + '\n')
319
319
320 def _postshareupdate(repo, update, checkout=None):
320 def _postshareupdate(repo, update, checkout=None):
321 """Maybe perform a working directory update after a shared repo is created.
321 """Maybe perform a working directory update after a shared repo is created.
322
322
323 ``update`` can be a boolean or a revision to update to.
323 ``update`` can be a boolean or a revision to update to.
324 """
324 """
325 if not update:
325 if not update:
326 return
326 return
327
327
328 repo.ui.status(_("updating working directory\n"))
328 repo.ui.status(_("updating working directory\n"))
329 if update is not True:
329 if update is not True:
330 checkout = update
330 checkout = update
331 for test in (checkout, 'default', 'tip'):
331 for test in (checkout, 'default', 'tip'):
332 if test is None:
332 if test is None:
333 continue
333 continue
334 try:
334 try:
335 uprev = repo.lookup(test)
335 uprev = repo.lookup(test)
336 break
336 break
337 except error.RepoLookupError:
337 except error.RepoLookupError:
338 continue
338 continue
339 _update(repo, uprev)
339 _update(repo, uprev)
340
340
341 def copystore(ui, srcrepo, destpath):
341 def copystore(ui, srcrepo, destpath):
342 '''copy files from store of srcrepo in destpath
342 '''copy files from store of srcrepo in destpath
343
343
344 returns destlock
344 returns destlock
345 '''
345 '''
346 destlock = None
346 destlock = None
347 try:
347 try:
348 hardlink = None
348 hardlink = None
349 num = 0
349 num = 0
350 closetopic = [None]
350 closetopic = [None]
351 def prog(topic, pos):
351 def prog(topic, pos):
352 if pos is None:
352 if pos is None:
353 closetopic[0] = topic
353 closetopic[0] = topic
354 else:
354 else:
355 ui.progress(topic, pos + num)
355 ui.progress(topic, pos + num)
356 srcpublishing = srcrepo.publishing()
356 srcpublishing = srcrepo.publishing()
357 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
357 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
358 dstvfs = vfsmod.vfs(destpath)
358 dstvfs = vfsmod.vfs(destpath)
359 for f in srcrepo.store.copylist():
359 for f in srcrepo.store.copylist():
360 if srcpublishing and f.endswith('phaseroots'):
360 if srcpublishing and f.endswith('phaseroots'):
361 continue
361 continue
362 dstbase = os.path.dirname(f)
362 dstbase = os.path.dirname(f)
363 if dstbase and not dstvfs.exists(dstbase):
363 if dstbase and not dstvfs.exists(dstbase):
364 dstvfs.mkdir(dstbase)
364 dstvfs.mkdir(dstbase)
365 if srcvfs.exists(f):
365 if srcvfs.exists(f):
366 if f.endswith('data'):
366 if f.endswith('data'):
367 # 'dstbase' may be empty (e.g. revlog format 0)
367 # 'dstbase' may be empty (e.g. revlog format 0)
368 lockfile = os.path.join(dstbase, "lock")
368 lockfile = os.path.join(dstbase, "lock")
369 # lock to avoid premature writing to the target
369 # lock to avoid premature writing to the target
370 destlock = lock.lock(dstvfs, lockfile)
370 destlock = lock.lock(dstvfs, lockfile)
371 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
371 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
372 hardlink, progress=prog)
372 hardlink, progress=prog)
373 num += n
373 num += n
374 if hardlink:
374 if hardlink:
375 ui.debug("linked %d files\n" % num)
375 ui.debug("linked %d files\n" % num)
376 if closetopic[0]:
376 if closetopic[0]:
377 ui.progress(closetopic[0], None)
377 ui.progress(closetopic[0], None)
378 else:
378 else:
379 ui.debug("copied %d files\n" % num)
379 ui.debug("copied %d files\n" % num)
380 if closetopic[0]:
380 if closetopic[0]:
381 ui.progress(closetopic[0], None)
381 ui.progress(closetopic[0], None)
382 return destlock
382 return destlock
383 except: # re-raises
383 except: # re-raises
384 release(destlock)
384 release(destlock)
385 raise
385 raise
386
386
387 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
387 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
388 rev=None, update=True, stream=False):
388 rev=None, update=True, stream=False):
389 """Perform a clone using a shared repo.
389 """Perform a clone using a shared repo.
390
390
391 The store for the repository will be located at <sharepath>/.hg. The
391 The store for the repository will be located at <sharepath>/.hg. The
392 specified revisions will be cloned or pulled from "source". A shared repo
392 specified revisions will be cloned or pulled from "source". A shared repo
393 will be created at "dest" and a working copy will be created if "update" is
393 will be created at "dest" and a working copy will be created if "update" is
394 True.
394 True.
395 """
395 """
396 revs = None
396 revs = None
397 if rev:
397 if rev:
398 if not srcpeer.capable('lookup'):
398 if not srcpeer.capable('lookup'):
399 raise error.Abort(_("src repository does not support "
399 raise error.Abort(_("src repository does not support "
400 "revision lookup and so doesn't "
400 "revision lookup and so doesn't "
401 "support clone by revision"))
401 "support clone by revision"))
402 revs = [srcpeer.lookup(r) for r in rev]
402 revs = [srcpeer.lookup(r) for r in rev]
403
403
404 # Obtain a lock before checking for or cloning the pooled repo otherwise
404 # Obtain a lock before checking for or cloning the pooled repo otherwise
405 # 2 clients may race creating or populating it.
405 # 2 clients may race creating or populating it.
406 pooldir = os.path.dirname(sharepath)
406 pooldir = os.path.dirname(sharepath)
407 # lock class requires the directory to exist.
407 # lock class requires the directory to exist.
408 try:
408 try:
409 util.makedir(pooldir, False)
409 util.makedir(pooldir, False)
410 except OSError as e:
410 except OSError as e:
411 if e.errno != errno.EEXIST:
411 if e.errno != errno.EEXIST:
412 raise
412 raise
413
413
414 poolvfs = vfsmod.vfs(pooldir)
414 poolvfs = vfsmod.vfs(pooldir)
415 basename = os.path.basename(sharepath)
415 basename = os.path.basename(sharepath)
416
416
417 with lock.lock(poolvfs, '%s.lock' % basename):
417 with lock.lock(poolvfs, '%s.lock' % basename):
418 if os.path.exists(sharepath):
418 if os.path.exists(sharepath):
419 ui.status(_('(sharing from existing pooled repository %s)\n') %
419 ui.status(_('(sharing from existing pooled repository %s)\n') %
420 basename)
420 basename)
421 else:
421 else:
422 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
422 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
423 # Always use pull mode because hardlinks in share mode don't work
423 # Always use pull mode because hardlinks in share mode don't work
424 # well. Never update because working copies aren't necessary in
424 # well. Never update because working copies aren't necessary in
425 # share mode.
425 # share mode.
426 clone(ui, peeropts, source, dest=sharepath, pull=True,
426 clone(ui, peeropts, source, dest=sharepath, pull=True,
427 rev=rev, update=False, stream=stream)
427 rev=rev, update=False, stream=stream)
428
428
429 # Resolve the value to put in [paths] section for the source.
429 # Resolve the value to put in [paths] section for the source.
430 if islocal(source):
430 if islocal(source):
431 defaultpath = os.path.abspath(util.urllocalpath(source))
431 defaultpath = os.path.abspath(util.urllocalpath(source))
432 else:
432 else:
433 defaultpath = source
433 defaultpath = source
434
434
435 sharerepo = repository(ui, path=sharepath)
435 sharerepo = repository(ui, path=sharepath)
436 share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
436 share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
437 defaultpath=defaultpath)
437 defaultpath=defaultpath)
438
438
439 # We need to perform a pull against the dest repo to fetch bookmarks
439 # We need to perform a pull against the dest repo to fetch bookmarks
440 # and other non-store data that isn't shared by default. In the case of
440 # and other non-store data that isn't shared by default. In the case of
441 # non-existing shared repo, this means we pull from the remote twice. This
441 # non-existing shared repo, this means we pull from the remote twice. This
442 # is a bit weird. But at the time it was implemented, there wasn't an easy
442 # is a bit weird. But at the time it was implemented, there wasn't an easy
443 # way to pull just non-changegroup data.
443 # way to pull just non-changegroup data.
444 destrepo = repository(ui, path=dest)
444 destrepo = repository(ui, path=dest)
445 exchange.pull(destrepo, srcpeer, heads=revs)
445 exchange.pull(destrepo, srcpeer, heads=revs)
446
446
447 _postshareupdate(destrepo, update)
447 _postshareupdate(destrepo, update)
448
448
449 return srcpeer, peer(ui, peeropts, dest)
449 return srcpeer, peer(ui, peeropts, dest)
450
450
451 # Recomputing branch cache might be slow on big repos,
451 # Recomputing branch cache might be slow on big repos,
452 # so just copy it
452 # so just copy it
453 def _copycache(srcrepo, dstcachedir, fname):
453 def _copycache(srcrepo, dstcachedir, fname):
454 """copy a cache from srcrepo to destcachedir (if it exists)"""
454 """copy a cache from srcrepo to destcachedir (if it exists)"""
455 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
455 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
456 dstbranchcache = os.path.join(dstcachedir, fname)
456 dstbranchcache = os.path.join(dstcachedir, fname)
457 if os.path.exists(srcbranchcache):
457 if os.path.exists(srcbranchcache):
458 if not os.path.exists(dstcachedir):
458 if not os.path.exists(dstcachedir):
459 os.mkdir(dstcachedir)
459 os.mkdir(dstcachedir)
460 util.copyfile(srcbranchcache, dstbranchcache)
460 util.copyfile(srcbranchcache, dstbranchcache)
461
461
462 def _cachetocopy(srcrepo):
463 """return the list of cache file valuable to copy during a clone"""
464 # In local clones we're copying all nodes, not just served
465 # ones. Therefore copy all branch caches over.
466 cachefiles = ['branch2']
467 cachefiles += ['branch2-%s' % f for f in repoview.filtertable]
468 cachefiles += ['rbc-names-v1', 'rbc-revs-v1']
469 cachefiles += ['tags2']
470 cachefiles += ['tags2-%s' % f for f in repoview.filtertable]
471 cachefiles += ['hgtagsfnodes1']
472 return cachefiles
473
474 def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
462 def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
475 update=True, stream=False, branch=None, shareopts=None):
463 update=True, stream=False, branch=None, shareopts=None):
476 """Make a copy of an existing repository.
464 """Make a copy of an existing repository.
477
465
478 Create a copy of an existing repository in a new directory. The
466 Create a copy of an existing repository in a new directory. The
479 source and destination are URLs, as passed to the repository
467 source and destination are URLs, as passed to the repository
480 function. Returns a pair of repository peers, the source and
468 function. Returns a pair of repository peers, the source and
481 newly created destination.
469 newly created destination.
482
470
483 The location of the source is added to the new repository's
471 The location of the source is added to the new repository's
484 .hg/hgrc file, as the default to be used for future pulls and
472 .hg/hgrc file, as the default to be used for future pulls and
485 pushes.
473 pushes.
486
474
487 If an exception is raised, the partly cloned/updated destination
475 If an exception is raised, the partly cloned/updated destination
488 repository will be deleted.
476 repository will be deleted.
489
477
490 Arguments:
478 Arguments:
491
479
492 source: repository object or URL
480 source: repository object or URL
493
481
494 dest: URL of destination repository to create (defaults to base
482 dest: URL of destination repository to create (defaults to base
495 name of source repository)
483 name of source repository)
496
484
497 pull: always pull from source repository, even in local case or if the
485 pull: always pull from source repository, even in local case or if the
498 server prefers streaming
486 server prefers streaming
499
487
500 stream: stream raw data uncompressed from repository (fast over
488 stream: stream raw data uncompressed from repository (fast over
501 LAN, slow over WAN)
489 LAN, slow over WAN)
502
490
503 rev: revision to clone up to (implies pull=True)
491 rev: revision to clone up to (implies pull=True)
504
492
505 update: update working directory after clone completes, if
493 update: update working directory after clone completes, if
506 destination is local repository (True means update to default rev,
494 destination is local repository (True means update to default rev,
507 anything else is treated as a revision)
495 anything else is treated as a revision)
508
496
509 branch: branches to clone
497 branch: branches to clone
510
498
511 shareopts: dict of options to control auto sharing behavior. The "pool" key
499 shareopts: dict of options to control auto sharing behavior. The "pool" key
512 activates auto sharing mode and defines the directory for stores. The
500 activates auto sharing mode and defines the directory for stores. The
513 "mode" key determines how to construct the directory name of the shared
501 "mode" key determines how to construct the directory name of the shared
514 repository. "identity" means the name is derived from the node of the first
502 repository. "identity" means the name is derived from the node of the first
515 changeset in the repository. "remote" means the name is derived from the
503 changeset in the repository. "remote" means the name is derived from the
516 remote's path/URL. Defaults to "identity."
504 remote's path/URL. Defaults to "identity."
517 """
505 """
518
506
519 if isinstance(source, bytes):
507 if isinstance(source, bytes):
520 origsource = ui.expandpath(source)
508 origsource = ui.expandpath(source)
521 source, branch = parseurl(origsource, branch)
509 source, branch = parseurl(origsource, branch)
522 srcpeer = peer(ui, peeropts, source)
510 srcpeer = peer(ui, peeropts, source)
523 else:
511 else:
524 srcpeer = source.peer() # in case we were called with a localrepo
512 srcpeer = source.peer() # in case we were called with a localrepo
525 branch = (None, branch or [])
513 branch = (None, branch or [])
526 origsource = source = srcpeer.url()
514 origsource = source = srcpeer.url()
527 rev, checkout = addbranchrevs(srcpeer, srcpeer, branch, rev)
515 rev, checkout = addbranchrevs(srcpeer, srcpeer, branch, rev)
528
516
529 if dest is None:
517 if dest is None:
530 dest = defaultdest(source)
518 dest = defaultdest(source)
531 if dest:
519 if dest:
532 ui.status(_("destination directory: %s\n") % dest)
520 ui.status(_("destination directory: %s\n") % dest)
533 else:
521 else:
534 dest = ui.expandpath(dest)
522 dest = ui.expandpath(dest)
535
523
536 dest = util.urllocalpath(dest)
524 dest = util.urllocalpath(dest)
537 source = util.urllocalpath(source)
525 source = util.urllocalpath(source)
538
526
539 if not dest:
527 if not dest:
540 raise error.Abort(_("empty destination path is not valid"))
528 raise error.Abort(_("empty destination path is not valid"))
541
529
542 destvfs = vfsmod.vfs(dest, expandpath=True)
530 destvfs = vfsmod.vfs(dest, expandpath=True)
543 if destvfs.lexists():
531 if destvfs.lexists():
544 if not destvfs.isdir():
532 if not destvfs.isdir():
545 raise error.Abort(_("destination '%s' already exists") % dest)
533 raise error.Abort(_("destination '%s' already exists") % dest)
546 elif destvfs.listdir():
534 elif destvfs.listdir():
547 raise error.Abort(_("destination '%s' is not empty") % dest)
535 raise error.Abort(_("destination '%s' is not empty") % dest)
548
536
549 shareopts = shareopts or {}
537 shareopts = shareopts or {}
550 sharepool = shareopts.get('pool')
538 sharepool = shareopts.get('pool')
551 sharenamemode = shareopts.get('mode')
539 sharenamemode = shareopts.get('mode')
552 if sharepool and islocal(dest):
540 if sharepool and islocal(dest):
553 sharepath = None
541 sharepath = None
554 if sharenamemode == 'identity':
542 if sharenamemode == 'identity':
555 # Resolve the name from the initial changeset in the remote
543 # Resolve the name from the initial changeset in the remote
556 # repository. This returns nullid when the remote is empty. It
544 # repository. This returns nullid when the remote is empty. It
557 # raises RepoLookupError if revision 0 is filtered or otherwise
545 # raises RepoLookupError if revision 0 is filtered or otherwise
558 # not available. If we fail to resolve, sharing is not enabled.
546 # not available. If we fail to resolve, sharing is not enabled.
559 try:
547 try:
560 rootnode = srcpeer.lookup('0')
548 rootnode = srcpeer.lookup('0')
561 if rootnode != node.nullid:
549 if rootnode != node.nullid:
562 sharepath = os.path.join(sharepool, node.hex(rootnode))
550 sharepath = os.path.join(sharepool, node.hex(rootnode))
563 else:
551 else:
564 ui.status(_('(not using pooled storage: '
552 ui.status(_('(not using pooled storage: '
565 'remote appears to be empty)\n'))
553 'remote appears to be empty)\n'))
566 except error.RepoLookupError:
554 except error.RepoLookupError:
567 ui.status(_('(not using pooled storage: '
555 ui.status(_('(not using pooled storage: '
568 'unable to resolve identity of remote)\n'))
556 'unable to resolve identity of remote)\n'))
569 elif sharenamemode == 'remote':
557 elif sharenamemode == 'remote':
570 sharepath = os.path.join(
558 sharepath = os.path.join(
571 sharepool, node.hex(hashlib.sha1(source).digest()))
559 sharepool, node.hex(hashlib.sha1(source).digest()))
572 else:
560 else:
573 raise error.Abort(_('unknown share naming mode: %s') %
561 raise error.Abort(_('unknown share naming mode: %s') %
574 sharenamemode)
562 sharenamemode)
575
563
576 if sharepath:
564 if sharepath:
577 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
565 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
578 dest, pull=pull, rev=rev, update=update,
566 dest, pull=pull, rev=rev, update=update,
579 stream=stream)
567 stream=stream)
580
568
581 srclock = destlock = cleandir = None
569 srclock = destlock = cleandir = None
582 srcrepo = srcpeer.local()
570 srcrepo = srcpeer.local()
583 try:
571 try:
584 abspath = origsource
572 abspath = origsource
585 if islocal(origsource):
573 if islocal(origsource):
586 abspath = os.path.abspath(util.urllocalpath(origsource))
574 abspath = os.path.abspath(util.urllocalpath(origsource))
587
575
588 if islocal(dest):
576 if islocal(dest):
589 cleandir = dest
577 cleandir = dest
590
578
591 copy = False
579 copy = False
592 if (srcrepo and srcrepo.cancopy() and islocal(dest)
580 if (srcrepo and srcrepo.cancopy() and islocal(dest)
593 and not phases.hassecret(srcrepo)):
581 and not phases.hassecret(srcrepo)):
594 copy = not pull and not rev
582 copy = not pull and not rev
595
583
596 if copy:
584 if copy:
597 try:
585 try:
598 # we use a lock here because if we race with commit, we
586 # we use a lock here because if we race with commit, we
599 # can end up with extra data in the cloned revlogs that's
587 # can end up with extra data in the cloned revlogs that's
600 # not pointed to by changesets, thus causing verify to
588 # not pointed to by changesets, thus causing verify to
601 # fail
589 # fail
602 srclock = srcrepo.lock(wait=False)
590 srclock = srcrepo.lock(wait=False)
603 except error.LockError:
591 except error.LockError:
604 copy = False
592 copy = False
605
593
606 if copy:
594 if copy:
607 srcrepo.hook('preoutgoing', throw=True, source='clone')
595 srcrepo.hook('preoutgoing', throw=True, source='clone')
608 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
596 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
609 if not os.path.exists(dest):
597 if not os.path.exists(dest):
610 os.mkdir(dest)
598 os.mkdir(dest)
611 else:
599 else:
612 # only clean up directories we create ourselves
600 # only clean up directories we create ourselves
613 cleandir = hgdir
601 cleandir = hgdir
614 try:
602 try:
615 destpath = hgdir
603 destpath = hgdir
616 util.makedir(destpath, notindexed=True)
604 util.makedir(destpath, notindexed=True)
617 except OSError as inst:
605 except OSError as inst:
618 if inst.errno == errno.EEXIST:
606 if inst.errno == errno.EEXIST:
619 cleandir = None
607 cleandir = None
620 raise error.Abort(_("destination '%s' already exists")
608 raise error.Abort(_("destination '%s' already exists")
621 % dest)
609 % dest)
622 raise
610 raise
623
611
624 destlock = copystore(ui, srcrepo, destpath)
612 destlock = copystore(ui, srcrepo, destpath)
625 # copy bookmarks over
613 # copy bookmarks over
626 srcbookmarks = srcrepo.vfs.join('bookmarks')
614 srcbookmarks = srcrepo.vfs.join('bookmarks')
627 dstbookmarks = os.path.join(destpath, 'bookmarks')
615 dstbookmarks = os.path.join(destpath, 'bookmarks')
628 if os.path.exists(srcbookmarks):
616 if os.path.exists(srcbookmarks):
629 util.copyfile(srcbookmarks, dstbookmarks)
617 util.copyfile(srcbookmarks, dstbookmarks)
630
618
631 dstcachedir = os.path.join(destpath, 'cache')
619 dstcachedir = os.path.join(destpath, 'cache')
632 for cache in _cachetocopy(srcrepo):
620 for cache in cacheutil.cachetocopy(srcrepo):
633 _copycache(srcrepo, dstcachedir, cache)
621 _copycache(srcrepo, dstcachedir, cache)
634
622
635 # we need to re-init the repo after manually copying the data
623 # we need to re-init the repo after manually copying the data
636 # into it
624 # into it
637 destpeer = peer(srcrepo, peeropts, dest)
625 destpeer = peer(srcrepo, peeropts, dest)
638 srcrepo.hook('outgoing', source='clone',
626 srcrepo.hook('outgoing', source='clone',
639 node=node.hex(node.nullid))
627 node=node.hex(node.nullid))
640 else:
628 else:
641 try:
629 try:
642 destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
630 destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
643 # only pass ui when no srcrepo
631 # only pass ui when no srcrepo
644 except OSError as inst:
632 except OSError as inst:
645 if inst.errno == errno.EEXIST:
633 if inst.errno == errno.EEXIST:
646 cleandir = None
634 cleandir = None
647 raise error.Abort(_("destination '%s' already exists")
635 raise error.Abort(_("destination '%s' already exists")
648 % dest)
636 % dest)
649 raise
637 raise
650
638
651 revs = None
639 revs = None
652 if rev:
640 if rev:
653 if not srcpeer.capable('lookup'):
641 if not srcpeer.capable('lookup'):
654 raise error.Abort(_("src repository does not support "
642 raise error.Abort(_("src repository does not support "
655 "revision lookup and so doesn't "
643 "revision lookup and so doesn't "
656 "support clone by revision"))
644 "support clone by revision"))
657 revs = [srcpeer.lookup(r) for r in rev]
645 revs = [srcpeer.lookup(r) for r in rev]
658 checkout = revs[0]
646 checkout = revs[0]
659 local = destpeer.local()
647 local = destpeer.local()
660 if local:
648 if local:
661 u = util.url(abspath)
649 u = util.url(abspath)
662 defaulturl = bytes(u)
650 defaulturl = bytes(u)
663 local.ui.setconfig('paths', 'default', defaulturl, 'clone')
651 local.ui.setconfig('paths', 'default', defaulturl, 'clone')
664 if not stream:
652 if not stream:
665 if pull:
653 if pull:
666 stream = False
654 stream = False
667 else:
655 else:
668 stream = None
656 stream = None
669 # internal config: ui.quietbookmarkmove
657 # internal config: ui.quietbookmarkmove
670 overrides = {('ui', 'quietbookmarkmove'): True}
658 overrides = {('ui', 'quietbookmarkmove'): True}
671 with local.ui.configoverride(overrides, 'clone'):
659 with local.ui.configoverride(overrides, 'clone'):
672 exchange.pull(local, srcpeer, revs,
660 exchange.pull(local, srcpeer, revs,
673 streamclonerequested=stream)
661 streamclonerequested=stream)
674 elif srcrepo:
662 elif srcrepo:
675 exchange.push(srcrepo, destpeer, revs=revs,
663 exchange.push(srcrepo, destpeer, revs=revs,
676 bookmarks=srcrepo._bookmarks.keys())
664 bookmarks=srcrepo._bookmarks.keys())
677 else:
665 else:
678 raise error.Abort(_("clone from remote to remote not supported")
666 raise error.Abort(_("clone from remote to remote not supported")
679 )
667 )
680
668
681 cleandir = None
669 cleandir = None
682
670
683 destrepo = destpeer.local()
671 destrepo = destpeer.local()
684 if destrepo:
672 if destrepo:
685 template = uimod.samplehgrcs['cloned']
673 template = uimod.samplehgrcs['cloned']
686 u = util.url(abspath)
674 u = util.url(abspath)
687 u.passwd = None
675 u.passwd = None
688 defaulturl = bytes(u)
676 defaulturl = bytes(u)
689 destrepo.vfs.write('hgrc', util.tonativeeol(template % defaulturl))
677 destrepo.vfs.write('hgrc', util.tonativeeol(template % defaulturl))
690 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
678 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
691
679
692 if ui.configbool('experimental', 'remotenames'):
680 if ui.configbool('experimental', 'remotenames'):
693 logexchange.pullremotenames(destrepo, srcpeer)
681 logexchange.pullremotenames(destrepo, srcpeer)
694
682
695 if update:
683 if update:
696 if update is not True:
684 if update is not True:
697 checkout = srcpeer.lookup(update)
685 checkout = srcpeer.lookup(update)
698 uprev = None
686 uprev = None
699 status = None
687 status = None
700 if checkout is not None:
688 if checkout is not None:
701 try:
689 try:
702 uprev = destrepo.lookup(checkout)
690 uprev = destrepo.lookup(checkout)
703 except error.RepoLookupError:
691 except error.RepoLookupError:
704 if update is not True:
692 if update is not True:
705 try:
693 try:
706 uprev = destrepo.lookup(update)
694 uprev = destrepo.lookup(update)
707 except error.RepoLookupError:
695 except error.RepoLookupError:
708 pass
696 pass
709 if uprev is None:
697 if uprev is None:
710 try:
698 try:
711 uprev = destrepo._bookmarks['@']
699 uprev = destrepo._bookmarks['@']
712 update = '@'
700 update = '@'
713 bn = destrepo[uprev].branch()
701 bn = destrepo[uprev].branch()
714 if bn == 'default':
702 if bn == 'default':
715 status = _("updating to bookmark @\n")
703 status = _("updating to bookmark @\n")
716 else:
704 else:
717 status = (_("updating to bookmark @ on branch %s\n")
705 status = (_("updating to bookmark @ on branch %s\n")
718 % bn)
706 % bn)
719 except KeyError:
707 except KeyError:
720 try:
708 try:
721 uprev = destrepo.branchtip('default')
709 uprev = destrepo.branchtip('default')
722 except error.RepoLookupError:
710 except error.RepoLookupError:
723 uprev = destrepo.lookup('tip')
711 uprev = destrepo.lookup('tip')
724 if not status:
712 if not status:
725 bn = destrepo[uprev].branch()
713 bn = destrepo[uprev].branch()
726 status = _("updating to branch %s\n") % bn
714 status = _("updating to branch %s\n") % bn
727 destrepo.ui.status(status)
715 destrepo.ui.status(status)
728 _update(destrepo, uprev)
716 _update(destrepo, uprev)
729 if update in destrepo._bookmarks:
717 if update in destrepo._bookmarks:
730 bookmarks.activate(destrepo, update)
718 bookmarks.activate(destrepo, update)
731 finally:
719 finally:
732 release(srclock, destlock)
720 release(srclock, destlock)
733 if cleandir is not None:
721 if cleandir is not None:
734 shutil.rmtree(cleandir, True)
722 shutil.rmtree(cleandir, True)
735 if srcpeer is not None:
723 if srcpeer is not None:
736 srcpeer.close()
724 srcpeer.close()
737 return srcpeer, destpeer
725 return srcpeer, destpeer
738
726
739 def _showstats(repo, stats, quietempty=False):
727 def _showstats(repo, stats, quietempty=False):
740 if quietempty and not any(stats):
728 if quietempty and not any(stats):
741 return
729 return
742 repo.ui.status(_("%d files updated, %d files merged, "
730 repo.ui.status(_("%d files updated, %d files merged, "
743 "%d files removed, %d files unresolved\n") % stats)
731 "%d files removed, %d files unresolved\n") % stats)
744
732
745 def updaterepo(repo, node, overwrite, updatecheck=None):
733 def updaterepo(repo, node, overwrite, updatecheck=None):
746 """Update the working directory to node.
734 """Update the working directory to node.
747
735
748 When overwrite is set, changes are clobbered, merged else
736 When overwrite is set, changes are clobbered, merged else
749
737
750 returns stats (see pydoc mercurial.merge.applyupdates)"""
738 returns stats (see pydoc mercurial.merge.applyupdates)"""
751 return mergemod.update(repo, node, False, overwrite,
739 return mergemod.update(repo, node, False, overwrite,
752 labels=['working copy', 'destination'],
740 labels=['working copy', 'destination'],
753 updatecheck=updatecheck)
741 updatecheck=updatecheck)
754
742
755 def update(repo, node, quietempty=False, updatecheck=None):
743 def update(repo, node, quietempty=False, updatecheck=None):
756 """update the working directory to node"""
744 """update the working directory to node"""
757 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
745 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
758 _showstats(repo, stats, quietempty)
746 _showstats(repo, stats, quietempty)
759 if stats[3]:
747 if stats[3]:
760 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
748 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
761 return stats[3] > 0
749 return stats[3] > 0
762
750
763 # naming conflict in clone()
751 # naming conflict in clone()
764 _update = update
752 _update = update
765
753
766 def clean(repo, node, show_stats=True, quietempty=False):
754 def clean(repo, node, show_stats=True, quietempty=False):
767 """forcibly switch the working directory to node, clobbering changes"""
755 """forcibly switch the working directory to node, clobbering changes"""
768 stats = updaterepo(repo, node, True)
756 stats = updaterepo(repo, node, True)
769 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
757 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
770 if show_stats:
758 if show_stats:
771 _showstats(repo, stats, quietempty)
759 _showstats(repo, stats, quietempty)
772 return stats[3] > 0
760 return stats[3] > 0
773
761
774 # naming conflict in updatetotally()
762 # naming conflict in updatetotally()
775 _clean = clean
763 _clean = clean
776
764
777 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
765 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
778 """Update the working directory with extra care for non-file components
766 """Update the working directory with extra care for non-file components
779
767
780 This takes care of non-file components below:
768 This takes care of non-file components below:
781
769
782 :bookmark: might be advanced or (in)activated
770 :bookmark: might be advanced or (in)activated
783
771
784 This takes arguments below:
772 This takes arguments below:
785
773
786 :checkout: to which revision the working directory is updated
774 :checkout: to which revision the working directory is updated
787 :brev: a name, which might be a bookmark to be activated after updating
775 :brev: a name, which might be a bookmark to be activated after updating
788 :clean: whether changes in the working directory can be discarded
776 :clean: whether changes in the working directory can be discarded
789 :updatecheck: how to deal with a dirty working directory
777 :updatecheck: how to deal with a dirty working directory
790
778
791 Valid values for updatecheck are (None => linear):
779 Valid values for updatecheck are (None => linear):
792
780
793 * abort: abort if the working directory is dirty
781 * abort: abort if the working directory is dirty
794 * none: don't check (merge working directory changes into destination)
782 * none: don't check (merge working directory changes into destination)
795 * linear: check that update is linear before merging working directory
783 * linear: check that update is linear before merging working directory
796 changes into destination
784 changes into destination
797 * noconflict: check that the update does not result in file merges
785 * noconflict: check that the update does not result in file merges
798
786
799 This returns whether conflict is detected at updating or not.
787 This returns whether conflict is detected at updating or not.
800 """
788 """
801 if updatecheck is None:
789 if updatecheck is None:
802 updatecheck = ui.config('commands', 'update.check')
790 updatecheck = ui.config('commands', 'update.check')
803 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
791 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
804 # If not configured, or invalid value configured
792 # If not configured, or invalid value configured
805 updatecheck = 'linear'
793 updatecheck = 'linear'
806 with repo.wlock():
794 with repo.wlock():
807 movemarkfrom = None
795 movemarkfrom = None
808 warndest = False
796 warndest = False
809 if checkout is None:
797 if checkout is None:
810 updata = destutil.destupdate(repo, clean=clean)
798 updata = destutil.destupdate(repo, clean=clean)
811 checkout, movemarkfrom, brev = updata
799 checkout, movemarkfrom, brev = updata
812 warndest = True
800 warndest = True
813
801
814 if clean:
802 if clean:
815 ret = _clean(repo, checkout)
803 ret = _clean(repo, checkout)
816 else:
804 else:
817 if updatecheck == 'abort':
805 if updatecheck == 'abort':
818 cmdutil.bailifchanged(repo, merge=False)
806 cmdutil.bailifchanged(repo, merge=False)
819 updatecheck = 'none'
807 updatecheck = 'none'
820 ret = _update(repo, checkout, updatecheck=updatecheck)
808 ret = _update(repo, checkout, updatecheck=updatecheck)
821
809
822 if not ret and movemarkfrom:
810 if not ret and movemarkfrom:
823 if movemarkfrom == repo['.'].node():
811 if movemarkfrom == repo['.'].node():
824 pass # no-op update
812 pass # no-op update
825 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
813 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
826 b = ui.label(repo._activebookmark, 'bookmarks.active')
814 b = ui.label(repo._activebookmark, 'bookmarks.active')
827 ui.status(_("updating bookmark %s\n") % b)
815 ui.status(_("updating bookmark %s\n") % b)
828 else:
816 else:
829 # this can happen with a non-linear update
817 # this can happen with a non-linear update
830 b = ui.label(repo._activebookmark, 'bookmarks')
818 b = ui.label(repo._activebookmark, 'bookmarks')
831 ui.status(_("(leaving bookmark %s)\n") % b)
819 ui.status(_("(leaving bookmark %s)\n") % b)
832 bookmarks.deactivate(repo)
820 bookmarks.deactivate(repo)
833 elif brev in repo._bookmarks:
821 elif brev in repo._bookmarks:
834 if brev != repo._activebookmark:
822 if brev != repo._activebookmark:
835 b = ui.label(brev, 'bookmarks.active')
823 b = ui.label(brev, 'bookmarks.active')
836 ui.status(_("(activating bookmark %s)\n") % b)
824 ui.status(_("(activating bookmark %s)\n") % b)
837 bookmarks.activate(repo, brev)
825 bookmarks.activate(repo, brev)
838 elif brev:
826 elif brev:
839 if repo._activebookmark:
827 if repo._activebookmark:
840 b = ui.label(repo._activebookmark, 'bookmarks')
828 b = ui.label(repo._activebookmark, 'bookmarks')
841 ui.status(_("(leaving bookmark %s)\n") % b)
829 ui.status(_("(leaving bookmark %s)\n") % b)
842 bookmarks.deactivate(repo)
830 bookmarks.deactivate(repo)
843
831
844 if warndest:
832 if warndest:
845 destutil.statusotherdests(ui, repo)
833 destutil.statusotherdests(ui, repo)
846
834
847 return ret
835 return ret
848
836
849 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None,
837 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None,
850 abort=False):
838 abort=False):
851 """Branch merge with node, resolving changes. Return true if any
839 """Branch merge with node, resolving changes. Return true if any
852 unresolved conflicts."""
840 unresolved conflicts."""
853 if not abort:
841 if not abort:
854 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce,
842 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce,
855 labels=labels)
843 labels=labels)
856 else:
844 else:
857 ms = mergemod.mergestate.read(repo)
845 ms = mergemod.mergestate.read(repo)
858 if ms.active():
846 if ms.active():
859 # there were conflicts
847 # there were conflicts
860 node = ms.localctx.hex()
848 node = ms.localctx.hex()
861 else:
849 else:
862 # there were no conficts, mergestate was not stored
850 # there were no conficts, mergestate was not stored
863 node = repo['.'].hex()
851 node = repo['.'].hex()
864
852
865 repo.ui.status(_("aborting the merge, updating back to"
853 repo.ui.status(_("aborting the merge, updating back to"
866 " %s\n") % node[:12])
854 " %s\n") % node[:12])
867 stats = mergemod.update(repo, node, branchmerge=False, force=True,
855 stats = mergemod.update(repo, node, branchmerge=False, force=True,
868 labels=labels)
856 labels=labels)
869
857
870 _showstats(repo, stats)
858 _showstats(repo, stats)
871 if stats[3]:
859 if stats[3]:
872 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
860 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
873 "or 'hg merge --abort' to abandon\n"))
861 "or 'hg merge --abort' to abandon\n"))
874 elif remind and not abort:
862 elif remind and not abort:
875 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
863 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
876 return stats[3] > 0
864 return stats[3] > 0
877
865
878 def _incoming(displaychlist, subreporecurse, ui, repo, source,
866 def _incoming(displaychlist, subreporecurse, ui, repo, source,
879 opts, buffered=False):
867 opts, buffered=False):
880 """
868 """
881 Helper for incoming / gincoming.
869 Helper for incoming / gincoming.
882 displaychlist gets called with
870 displaychlist gets called with
883 (remoterepo, incomingchangesetlist, displayer) parameters,
871 (remoterepo, incomingchangesetlist, displayer) parameters,
884 and is supposed to contain only code that can't be unified.
872 and is supposed to contain only code that can't be unified.
885 """
873 """
886 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
874 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
887 other = peer(repo, opts, source)
875 other = peer(repo, opts, source)
888 ui.status(_('comparing with %s\n') % util.hidepassword(source))
876 ui.status(_('comparing with %s\n') % util.hidepassword(source))
889 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
877 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
890
878
891 if revs:
879 if revs:
892 revs = [other.lookup(rev) for rev in revs]
880 revs = [other.lookup(rev) for rev in revs]
893 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
881 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
894 revs, opts["bundle"], opts["force"])
882 revs, opts["bundle"], opts["force"])
895 try:
883 try:
896 if not chlist:
884 if not chlist:
897 ui.status(_("no changes found\n"))
885 ui.status(_("no changes found\n"))
898 return subreporecurse()
886 return subreporecurse()
899 ui.pager('incoming')
887 ui.pager('incoming')
900 displayer = cmdutil.show_changeset(ui, other, opts, buffered)
888 displayer = cmdutil.show_changeset(ui, other, opts, buffered)
901 displaychlist(other, chlist, displayer)
889 displaychlist(other, chlist, displayer)
902 displayer.close()
890 displayer.close()
903 finally:
891 finally:
904 cleanupfn()
892 cleanupfn()
905 subreporecurse()
893 subreporecurse()
906 return 0 # exit code is zero since we found incoming changes
894 return 0 # exit code is zero since we found incoming changes
907
895
908 def incoming(ui, repo, source, opts):
896 def incoming(ui, repo, source, opts):
909 def subreporecurse():
897 def subreporecurse():
910 ret = 1
898 ret = 1
911 if opts.get('subrepos'):
899 if opts.get('subrepos'):
912 ctx = repo[None]
900 ctx = repo[None]
913 for subpath in sorted(ctx.substate):
901 for subpath in sorted(ctx.substate):
914 sub = ctx.sub(subpath)
902 sub = ctx.sub(subpath)
915 ret = min(ret, sub.incoming(ui, source, opts))
903 ret = min(ret, sub.incoming(ui, source, opts))
916 return ret
904 return ret
917
905
918 def display(other, chlist, displayer):
906 def display(other, chlist, displayer):
919 limit = cmdutil.loglimit(opts)
907 limit = cmdutil.loglimit(opts)
920 if opts.get('newest_first'):
908 if opts.get('newest_first'):
921 chlist.reverse()
909 chlist.reverse()
922 count = 0
910 count = 0
923 for n in chlist:
911 for n in chlist:
924 if limit is not None and count >= limit:
912 if limit is not None and count >= limit:
925 break
913 break
926 parents = [p for p in other.changelog.parents(n) if p != nullid]
914 parents = [p for p in other.changelog.parents(n) if p != nullid]
927 if opts.get('no_merges') and len(parents) == 2:
915 if opts.get('no_merges') and len(parents) == 2:
928 continue
916 continue
929 count += 1
917 count += 1
930 displayer.show(other[n])
918 displayer.show(other[n])
931 return _incoming(display, subreporecurse, ui, repo, source, opts)
919 return _incoming(display, subreporecurse, ui, repo, source, opts)
932
920
933 def _outgoing(ui, repo, dest, opts):
921 def _outgoing(ui, repo, dest, opts):
934 path = ui.paths.getpath(dest, default=('default-push', 'default'))
922 path = ui.paths.getpath(dest, default=('default-push', 'default'))
935 if not path:
923 if not path:
936 raise error.Abort(_('default repository not configured!'),
924 raise error.Abort(_('default repository not configured!'),
937 hint=_("see 'hg help config.paths'"))
925 hint=_("see 'hg help config.paths'"))
938 dest = path.pushloc or path.loc
926 dest = path.pushloc or path.loc
939 branches = path.branch, opts.get('branch') or []
927 branches = path.branch, opts.get('branch') or []
940
928
941 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
929 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
942 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
930 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
943 if revs:
931 if revs:
944 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
932 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
945
933
946 other = peer(repo, opts, dest)
934 other = peer(repo, opts, dest)
947 outgoing = discovery.findcommonoutgoing(repo, other, revs,
935 outgoing = discovery.findcommonoutgoing(repo, other, revs,
948 force=opts.get('force'))
936 force=opts.get('force'))
949 o = outgoing.missing
937 o = outgoing.missing
950 if not o:
938 if not o:
951 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
939 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
952 return o, other
940 return o, other
953
941
954 def outgoing(ui, repo, dest, opts):
942 def outgoing(ui, repo, dest, opts):
955 def recurse():
943 def recurse():
956 ret = 1
944 ret = 1
957 if opts.get('subrepos'):
945 if opts.get('subrepos'):
958 ctx = repo[None]
946 ctx = repo[None]
959 for subpath in sorted(ctx.substate):
947 for subpath in sorted(ctx.substate):
960 sub = ctx.sub(subpath)
948 sub = ctx.sub(subpath)
961 ret = min(ret, sub.outgoing(ui, dest, opts))
949 ret = min(ret, sub.outgoing(ui, dest, opts))
962 return ret
950 return ret
963
951
964 limit = cmdutil.loglimit(opts)
952 limit = cmdutil.loglimit(opts)
965 o, other = _outgoing(ui, repo, dest, opts)
953 o, other = _outgoing(ui, repo, dest, opts)
966 if not o:
954 if not o:
967 cmdutil.outgoinghooks(ui, repo, other, opts, o)
955 cmdutil.outgoinghooks(ui, repo, other, opts, o)
968 return recurse()
956 return recurse()
969
957
970 if opts.get('newest_first'):
958 if opts.get('newest_first'):
971 o.reverse()
959 o.reverse()
972 ui.pager('outgoing')
960 ui.pager('outgoing')
973 displayer = cmdutil.show_changeset(ui, repo, opts)
961 displayer = cmdutil.show_changeset(ui, repo, opts)
974 count = 0
962 count = 0
975 for n in o:
963 for n in o:
976 if limit is not None and count >= limit:
964 if limit is not None and count >= limit:
977 break
965 break
978 parents = [p for p in repo.changelog.parents(n) if p != nullid]
966 parents = [p for p in repo.changelog.parents(n) if p != nullid]
979 if opts.get('no_merges') and len(parents) == 2:
967 if opts.get('no_merges') and len(parents) == 2:
980 continue
968 continue
981 count += 1
969 count += 1
982 displayer.show(repo[n])
970 displayer.show(repo[n])
983 displayer.close()
971 displayer.close()
984 cmdutil.outgoinghooks(ui, repo, other, opts, o)
972 cmdutil.outgoinghooks(ui, repo, other, opts, o)
985 recurse()
973 recurse()
986 return 0 # exit code is zero since we found outgoing changes
974 return 0 # exit code is zero since we found outgoing changes
987
975
988 def verify(repo):
976 def verify(repo):
989 """verify the consistency of a repository"""
977 """verify the consistency of a repository"""
990 ret = verifymod.verify(repo)
978 ret = verifymod.verify(repo)
991
979
992 # Broken subrepo references in hidden csets don't seem worth worrying about,
980 # Broken subrepo references in hidden csets don't seem worth worrying about,
993 # since they can't be pushed/pulled, and --hidden can be used if they are a
981 # since they can't be pushed/pulled, and --hidden can be used if they are a
994 # concern.
982 # concern.
995
983
996 # pathto() is needed for -R case
984 # pathto() is needed for -R case
997 revs = repo.revs("filelog(%s)",
985 revs = repo.revs("filelog(%s)",
998 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
986 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
999
987
1000 if revs:
988 if revs:
1001 repo.ui.status(_('checking subrepo links\n'))
989 repo.ui.status(_('checking subrepo links\n'))
1002 for rev in revs:
990 for rev in revs:
1003 ctx = repo[rev]
991 ctx = repo[rev]
1004 try:
992 try:
1005 for subpath in ctx.substate:
993 for subpath in ctx.substate:
1006 try:
994 try:
1007 ret = (ctx.sub(subpath, allowcreate=False).verify()
995 ret = (ctx.sub(subpath, allowcreate=False).verify()
1008 or ret)
996 or ret)
1009 except error.RepoError as e:
997 except error.RepoError as e:
1010 repo.ui.warn(('%s: %s\n') % (rev, e))
998 repo.ui.warn(('%s: %s\n') % (rev, e))
1011 except Exception:
999 except Exception:
1012 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
1000 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
1013 node.short(ctx.node()))
1001 node.short(ctx.node()))
1014
1002
1015 return ret
1003 return ret
1016
1004
1017 def remoteui(src, opts):
1005 def remoteui(src, opts):
1018 'build a remote ui from ui or repo and opts'
1006 'build a remote ui from ui or repo and opts'
1019 if util.safehasattr(src, 'baseui'): # looks like a repository
1007 if util.safehasattr(src, 'baseui'): # looks like a repository
1020 dst = src.baseui.copy() # drop repo-specific config
1008 dst = src.baseui.copy() # drop repo-specific config
1021 src = src.ui # copy target options from repo
1009 src = src.ui # copy target options from repo
1022 else: # assume it's a global ui object
1010 else: # assume it's a global ui object
1023 dst = src.copy() # keep all global options
1011 dst = src.copy() # keep all global options
1024
1012
1025 # copy ssh-specific options
1013 # copy ssh-specific options
1026 for o in 'ssh', 'remotecmd':
1014 for o in 'ssh', 'remotecmd':
1027 v = opts.get(o) or src.config('ui', o)
1015 v = opts.get(o) or src.config('ui', o)
1028 if v:
1016 if v:
1029 dst.setconfig("ui", o, v, 'copied')
1017 dst.setconfig("ui", o, v, 'copied')
1030
1018
1031 # copy bundle-specific options
1019 # copy bundle-specific options
1032 r = src.config('bundle', 'mainreporoot')
1020 r = src.config('bundle', 'mainreporoot')
1033 if r:
1021 if r:
1034 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
1022 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
1035
1023
1036 # copy selected local settings to the remote ui
1024 # copy selected local settings to the remote ui
1037 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
1025 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
1038 for key, val in src.configitems(sect):
1026 for key, val in src.configitems(sect):
1039 dst.setconfig(sect, key, val, 'copied')
1027 dst.setconfig(sect, key, val, 'copied')
1040 v = src.config('web', 'cacerts')
1028 v = src.config('web', 'cacerts')
1041 if v:
1029 if v:
1042 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
1030 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
1043
1031
1044 return dst
1032 return dst
1045
1033
1046 # Files of interest
1034 # Files of interest
1047 # Used to check if the repository has changed looking at mtime and size of
1035 # Used to check if the repository has changed looking at mtime and size of
1048 # these files.
1036 # these files.
1049 foi = [('spath', '00changelog.i'),
1037 foi = [('spath', '00changelog.i'),
1050 ('spath', 'phaseroots'), # ! phase can change content at the same size
1038 ('spath', 'phaseroots'), # ! phase can change content at the same size
1051 ('spath', 'obsstore'),
1039 ('spath', 'obsstore'),
1052 ('path', 'bookmarks'), # ! bookmark can change content at the same size
1040 ('path', 'bookmarks'), # ! bookmark can change content at the same size
1053 ]
1041 ]
1054
1042
1055 class cachedlocalrepo(object):
1043 class cachedlocalrepo(object):
1056 """Holds a localrepository that can be cached and reused."""
1044 """Holds a localrepository that can be cached and reused."""
1057
1045
1058 def __init__(self, repo):
1046 def __init__(self, repo):
1059 """Create a new cached repo from an existing repo.
1047 """Create a new cached repo from an existing repo.
1060
1048
1061 We assume the passed in repo was recently created. If the
1049 We assume the passed in repo was recently created. If the
1062 repo has changed between when it was created and when it was
1050 repo has changed between when it was created and when it was
1063 turned into a cache, it may not refresh properly.
1051 turned into a cache, it may not refresh properly.
1064 """
1052 """
1065 assert isinstance(repo, localrepo.localrepository)
1053 assert isinstance(repo, localrepo.localrepository)
1066 self._repo = repo
1054 self._repo = repo
1067 self._state, self.mtime = self._repostate()
1055 self._state, self.mtime = self._repostate()
1068 self._filtername = repo.filtername
1056 self._filtername = repo.filtername
1069
1057
1070 def fetch(self):
1058 def fetch(self):
1071 """Refresh (if necessary) and return a repository.
1059 """Refresh (if necessary) and return a repository.
1072
1060
1073 If the cached instance is out of date, it will be recreated
1061 If the cached instance is out of date, it will be recreated
1074 automatically and returned.
1062 automatically and returned.
1075
1063
1076 Returns a tuple of the repo and a boolean indicating whether a new
1064 Returns a tuple of the repo and a boolean indicating whether a new
1077 repo instance was created.
1065 repo instance was created.
1078 """
1066 """
1079 # We compare the mtimes and sizes of some well-known files to
1067 # We compare the mtimes and sizes of some well-known files to
1080 # determine if the repo changed. This is not precise, as mtimes
1068 # determine if the repo changed. This is not precise, as mtimes
1081 # are susceptible to clock skew and imprecise filesystems and
1069 # are susceptible to clock skew and imprecise filesystems and
1082 # file content can change while maintaining the same size.
1070 # file content can change while maintaining the same size.
1083
1071
1084 state, mtime = self._repostate()
1072 state, mtime = self._repostate()
1085 if state == self._state:
1073 if state == self._state:
1086 return self._repo, False
1074 return self._repo, False
1087
1075
1088 repo = repository(self._repo.baseui, self._repo.url())
1076 repo = repository(self._repo.baseui, self._repo.url())
1089 if self._filtername:
1077 if self._filtername:
1090 self._repo = repo.filtered(self._filtername)
1078 self._repo = repo.filtered(self._filtername)
1091 else:
1079 else:
1092 self._repo = repo.unfiltered()
1080 self._repo = repo.unfiltered()
1093 self._state = state
1081 self._state = state
1094 self.mtime = mtime
1082 self.mtime = mtime
1095
1083
1096 return self._repo, True
1084 return self._repo, True
1097
1085
1098 def _repostate(self):
1086 def _repostate(self):
1099 state = []
1087 state = []
1100 maxmtime = -1
1088 maxmtime = -1
1101 for attr, fname in foi:
1089 for attr, fname in foi:
1102 prefix = getattr(self._repo, attr)
1090 prefix = getattr(self._repo, attr)
1103 p = os.path.join(prefix, fname)
1091 p = os.path.join(prefix, fname)
1104 try:
1092 try:
1105 st = os.stat(p)
1093 st = os.stat(p)
1106 except OSError:
1094 except OSError:
1107 st = os.stat(prefix)
1095 st = os.stat(prefix)
1108 state.append((st.st_mtime, st.st_size))
1096 state.append((st.st_mtime, st.st_size))
1109 maxmtime = max(maxmtime, st.st_mtime)
1097 maxmtime = max(maxmtime, st.st_mtime)
1110
1098
1111 return tuple(state), maxmtime
1099 return tuple(state), maxmtime
1112
1100
1113 def copy(self):
1101 def copy(self):
1114 """Obtain a copy of this class instance.
1102 """Obtain a copy of this class instance.
1115
1103
1116 A new localrepository instance is obtained. The new instance should be
1104 A new localrepository instance is obtained. The new instance should be
1117 completely independent of the original.
1105 completely independent of the original.
1118 """
1106 """
1119 repo = repository(self._repo.baseui, self._repo.origroot)
1107 repo = repository(self._repo.baseui, self._repo.origroot)
1120 if self._filtername:
1108 if self._filtername:
1121 repo = repo.filtered(self._filtername)
1109 repo = repo.filtered(self._filtername)
1122 else:
1110 else:
1123 repo = repo.unfiltered()
1111 repo = repo.unfiltered()
1124 c = cachedlocalrepo(repo)
1112 c = cachedlocalrepo(repo)
1125 c._state = self._state
1113 c._state = self._state
1126 c.mtime = self.mtime
1114 c.mtime = self.mtime
1127 return c
1115 return c
General Comments 0
You need to be logged in to leave comments. Login now