##// END OF EJS Templates
share: stop using 'islocal' with repo instance...
marmoute -
r50581:229e0ed8 default
parent child Browse files
Show More
@@ -1,1611 +1,1616 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9
9
10 import os
10 import os
11 import posixpath
11 import posixpath
12 import shutil
12 import shutil
13 import stat
13 import stat
14 import weakref
14 import weakref
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import (
17 from .node import (
18 hex,
18 hex,
19 sha1nodeconstants,
19 sha1nodeconstants,
20 short,
20 short,
21 )
21 )
22 from .pycompat import getattr
22 from .pycompat import getattr
23
23
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 bundlerepo,
26 bundlerepo,
27 cmdutil,
27 cmdutil,
28 destutil,
28 destutil,
29 discovery,
29 discovery,
30 error,
30 error,
31 exchange,
31 exchange,
32 extensions,
32 extensions,
33 graphmod,
33 graphmod,
34 httppeer,
34 httppeer,
35 localrepo,
35 localrepo,
36 lock,
36 lock,
37 logcmdutil,
37 logcmdutil,
38 logexchange,
38 logexchange,
39 merge as mergemod,
39 merge as mergemod,
40 mergestate as mergestatemod,
40 mergestate as mergestatemod,
41 narrowspec,
41 narrowspec,
42 phases,
42 phases,
43 requirements,
43 requirements,
44 scmutil,
44 scmutil,
45 sshpeer,
45 sshpeer,
46 statichttprepo,
46 statichttprepo,
47 ui as uimod,
47 ui as uimod,
48 unionrepo,
48 unionrepo,
49 url,
49 url,
50 util,
50 util,
51 verify as verifymod,
51 verify as verifymod,
52 vfs as vfsmod,
52 vfs as vfsmod,
53 )
53 )
54 from .interfaces import repository as repositorymod
54 from .interfaces import repository as repositorymod
55 from .utils import (
55 from .utils import (
56 hashutil,
56 hashutil,
57 stringutil,
57 stringutil,
58 urlutil,
58 urlutil,
59 )
59 )
60
60
61
61
62 release = lock.release
62 release = lock.release
63
63
64 # shared features
64 # shared features
65 sharedbookmarks = b'bookmarks'
65 sharedbookmarks = b'bookmarks'
66
66
67
67
68 def addbranchrevs(lrepo, other, branches, revs):
68 def addbranchrevs(lrepo, other, branches, revs):
69 peer = other.peer() # a courtesy to callers using a localrepo for other
69 peer = other.peer() # a courtesy to callers using a localrepo for other
70 hashbranch, branches = branches
70 hashbranch, branches = branches
71 if not hashbranch and not branches:
71 if not hashbranch and not branches:
72 x = revs or None
72 x = revs or None
73 if revs:
73 if revs:
74 y = revs[0]
74 y = revs[0]
75 else:
75 else:
76 y = None
76 y = None
77 return x, y
77 return x, y
78 if revs:
78 if revs:
79 revs = list(revs)
79 revs = list(revs)
80 else:
80 else:
81 revs = []
81 revs = []
82
82
83 if not peer.capable(b'branchmap'):
83 if not peer.capable(b'branchmap'):
84 if branches:
84 if branches:
85 raise error.Abort(_(b"remote branch lookup not supported"))
85 raise error.Abort(_(b"remote branch lookup not supported"))
86 revs.append(hashbranch)
86 revs.append(hashbranch)
87 return revs, revs[0]
87 return revs, revs[0]
88
88
89 with peer.commandexecutor() as e:
89 with peer.commandexecutor() as e:
90 branchmap = e.callcommand(b'branchmap', {}).result()
90 branchmap = e.callcommand(b'branchmap', {}).result()
91
91
92 def primary(branch):
92 def primary(branch):
93 if branch == b'.':
93 if branch == b'.':
94 if not lrepo:
94 if not lrepo:
95 raise error.Abort(_(b"dirstate branch not accessible"))
95 raise error.Abort(_(b"dirstate branch not accessible"))
96 branch = lrepo.dirstate.branch()
96 branch = lrepo.dirstate.branch()
97 if branch in branchmap:
97 if branch in branchmap:
98 revs.extend(hex(r) for r in reversed(branchmap[branch]))
98 revs.extend(hex(r) for r in reversed(branchmap[branch]))
99 return True
99 return True
100 else:
100 else:
101 return False
101 return False
102
102
103 for branch in branches:
103 for branch in branches:
104 if not primary(branch):
104 if not primary(branch):
105 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
105 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
106 if hashbranch:
106 if hashbranch:
107 if not primary(hashbranch):
107 if not primary(hashbranch):
108 revs.append(hashbranch)
108 revs.append(hashbranch)
109 return revs, revs[0]
109 return revs, revs[0]
110
110
111
111
112 def _isfile(path):
112 def _isfile(path):
113 try:
113 try:
114 # we use os.stat() directly here instead of os.path.isfile()
114 # we use os.stat() directly here instead of os.path.isfile()
115 # because the latter started returning `False` on invalid path
115 # because the latter started returning `False` on invalid path
116 # exceptions starting in 3.8 and we care about handling
116 # exceptions starting in 3.8 and we care about handling
117 # invalid paths specially here.
117 # invalid paths specially here.
118 st = os.stat(path)
118 st = os.stat(path)
119 except ValueError as e:
119 except ValueError as e:
120 msg = stringutil.forcebytestr(e)
120 msg = stringutil.forcebytestr(e)
121 raise error.Abort(_(b'invalid path %s: %s') % (path, msg))
121 raise error.Abort(_(b'invalid path %s: %s') % (path, msg))
122 except OSError:
122 except OSError:
123 return False
123 return False
124 else:
124 else:
125 return stat.S_ISREG(st.st_mode)
125 return stat.S_ISREG(st.st_mode)
126
126
127
127
128 class LocalFactory:
128 class LocalFactory:
129 """thin wrapper to dispatch between localrepo and bundle repo"""
129 """thin wrapper to dispatch between localrepo and bundle repo"""
130
130
131 @staticmethod
131 @staticmethod
132 def islocal(path: bytes) -> bool:
132 def islocal(path: bytes) -> bool:
133 path = util.expandpath(urlutil.urllocalpath(path))
133 path = util.expandpath(urlutil.urllocalpath(path))
134 return not _isfile(path)
134 return not _isfile(path)
135
135
136 @staticmethod
136 @staticmethod
137 def instance(ui, path, *args, **kwargs):
137 def instance(ui, path, *args, **kwargs):
138 path = util.expandpath(urlutil.urllocalpath(path))
138 path = util.expandpath(urlutil.urllocalpath(path))
139 if _isfile(path):
139 if _isfile(path):
140 cls = bundlerepo
140 cls = bundlerepo
141 else:
141 else:
142 cls = localrepo
142 cls = localrepo
143 return cls.instance(ui, path, *args, **kwargs)
143 return cls.instance(ui, path, *args, **kwargs)
144
144
145
145
146 schemes = {
146 schemes = {
147 b'bundle': bundlerepo,
147 b'bundle': bundlerepo,
148 b'union': unionrepo,
148 b'union': unionrepo,
149 b'file': LocalFactory,
149 b'file': LocalFactory,
150 b'http': httppeer,
150 b'http': httppeer,
151 b'https': httppeer,
151 b'https': httppeer,
152 b'ssh': sshpeer,
152 b'ssh': sshpeer,
153 b'static-http': statichttprepo,
153 b'static-http': statichttprepo,
154 }
154 }
155
155
156
156
157 def _peerlookup(path):
157 def _peerlookup(path):
158 u = urlutil.url(path)
158 u = urlutil.url(path)
159 scheme = u.scheme or b'file'
159 scheme = u.scheme or b'file'
160 thing = schemes.get(scheme) or schemes[b'file']
160 thing = schemes.get(scheme) or schemes[b'file']
161 return thing
161 return thing
162
162
163
163
164 def islocal(repo):
164 def islocal(repo):
165 '''return true if repo (or path pointing to repo) is local'''
165 '''return true if repo (or path pointing to repo) is local'''
166 if isinstance(repo, bytes):
166 if isinstance(repo, bytes):
167 try:
167 try:
168 return _peerlookup(repo).islocal(repo)
168 return _peerlookup(repo).islocal(repo)
169 except AttributeError:
169 except AttributeError:
170 return False
170 return False
171 return repo.local()
171 return repo.local()
172
172
173
173
174 def openpath(ui, path, sendaccept=True):
174 def openpath(ui, path, sendaccept=True):
175 '''open path with open if local, url.open if remote'''
175 '''open path with open if local, url.open if remote'''
176 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
176 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
177 if pathurl.islocal():
177 if pathurl.islocal():
178 return util.posixfile(pathurl.localpath(), b'rb')
178 return util.posixfile(pathurl.localpath(), b'rb')
179 else:
179 else:
180 return url.open(ui, path, sendaccept=sendaccept)
180 return url.open(ui, path, sendaccept=sendaccept)
181
181
182
182
183 # a list of (ui, repo) functions called for wire peer initialization
183 # a list of (ui, repo) functions called for wire peer initialization
184 wirepeersetupfuncs = []
184 wirepeersetupfuncs = []
185
185
186
186
187 def _peerorrepo(
187 def _peerorrepo(
188 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
188 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
189 ):
189 ):
190 """return a repository object for the specified path"""
190 """return a repository object for the specified path"""
191 cls = _peerlookup(path)
191 cls = _peerlookup(path)
192 obj = cls.instance(ui, path, create, intents=intents, createopts=createopts)
192 obj = cls.instance(ui, path, create, intents=intents, createopts=createopts)
193 _setup_repo_or_peer(ui, obj, presetupfuncs)
193 _setup_repo_or_peer(ui, obj, presetupfuncs)
194 return obj
194 return obj
195
195
196
196
197 def _setup_repo_or_peer(ui, obj, presetupfuncs=None):
197 def _setup_repo_or_peer(ui, obj, presetupfuncs=None):
198 ui = getattr(obj, "ui", ui)
198 ui = getattr(obj, "ui", ui)
199 for f in presetupfuncs or []:
199 for f in presetupfuncs or []:
200 f(ui, obj)
200 f(ui, obj)
201 ui.log(b'extension', b'- executing reposetup hooks\n')
201 ui.log(b'extension', b'- executing reposetup hooks\n')
202 with util.timedcm('all reposetup') as allreposetupstats:
202 with util.timedcm('all reposetup') as allreposetupstats:
203 for name, module in extensions.extensions(ui):
203 for name, module in extensions.extensions(ui):
204 ui.log(b'extension', b' - running reposetup for %s\n', name)
204 ui.log(b'extension', b' - running reposetup for %s\n', name)
205 hook = getattr(module, 'reposetup', None)
205 hook = getattr(module, 'reposetup', None)
206 if hook:
206 if hook:
207 with util.timedcm('reposetup %r', name) as stats:
207 with util.timedcm('reposetup %r', name) as stats:
208 hook(ui, obj)
208 hook(ui, obj)
209 msg = b' > reposetup for %s took %s\n'
209 msg = b' > reposetup for %s took %s\n'
210 ui.log(b'extension', msg, name, stats)
210 ui.log(b'extension', msg, name, stats)
211 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
211 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
212 if not obj.local():
212 if not obj.local():
213 for f in wirepeersetupfuncs:
213 for f in wirepeersetupfuncs:
214 f(ui, obj)
214 f(ui, obj)
215
215
216
216
217 def repository(
217 def repository(
218 ui,
218 ui,
219 path=b'',
219 path=b'',
220 create=False,
220 create=False,
221 presetupfuncs=None,
221 presetupfuncs=None,
222 intents=None,
222 intents=None,
223 createopts=None,
223 createopts=None,
224 ):
224 ):
225 """return a repository object for the specified path"""
225 """return a repository object for the specified path"""
226 peer = _peerorrepo(
226 peer = _peerorrepo(
227 ui,
227 ui,
228 path,
228 path,
229 create,
229 create,
230 presetupfuncs=presetupfuncs,
230 presetupfuncs=presetupfuncs,
231 intents=intents,
231 intents=intents,
232 createopts=createopts,
232 createopts=createopts,
233 )
233 )
234 repo = peer.local()
234 repo = peer.local()
235 if not repo:
235 if not repo:
236 raise error.Abort(
236 raise error.Abort(
237 _(b"repository '%s' is not local") % (path or peer.url())
237 _(b"repository '%s' is not local") % (path or peer.url())
238 )
238 )
239 return repo.filtered(b'visible')
239 return repo.filtered(b'visible')
240
240
241
241
242 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
242 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
243 '''return a repository peer for the specified path'''
243 '''return a repository peer for the specified path'''
244 rui = remoteui(uiorrepo, opts)
244 rui = remoteui(uiorrepo, opts)
245 return _peerorrepo(
245 return _peerorrepo(
246 rui, path, create, intents=intents, createopts=createopts
246 rui, path, create, intents=intents, createopts=createopts
247 ).peer()
247 ).peer()
248
248
249
249
250 def defaultdest(source):
250 def defaultdest(source):
251 """return default destination of clone if none is given
251 """return default destination of clone if none is given
252
252
253 >>> defaultdest(b'foo')
253 >>> defaultdest(b'foo')
254 'foo'
254 'foo'
255 >>> defaultdest(b'/foo/bar')
255 >>> defaultdest(b'/foo/bar')
256 'bar'
256 'bar'
257 >>> defaultdest(b'/')
257 >>> defaultdest(b'/')
258 ''
258 ''
259 >>> defaultdest(b'')
259 >>> defaultdest(b'')
260 ''
260 ''
261 >>> defaultdest(b'http://example.org/')
261 >>> defaultdest(b'http://example.org/')
262 ''
262 ''
263 >>> defaultdest(b'http://example.org/foo/')
263 >>> defaultdest(b'http://example.org/foo/')
264 'foo'
264 'foo'
265 """
265 """
266 path = urlutil.url(source).path
266 path = urlutil.url(source).path
267 if not path:
267 if not path:
268 return b''
268 return b''
269 return os.path.basename(os.path.normpath(path))
269 return os.path.basename(os.path.normpath(path))
270
270
271
271
272 def sharedreposource(repo):
272 def sharedreposource(repo):
273 """Returns repository object for source repository of a shared repo.
273 """Returns repository object for source repository of a shared repo.
274
274
275 If repo is not a shared repository, returns None.
275 If repo is not a shared repository, returns None.
276 """
276 """
277 if repo.sharedpath == repo.path:
277 if repo.sharedpath == repo.path:
278 return None
278 return None
279
279
280 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
280 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
281 return repo.srcrepo
281 return repo.srcrepo
282
282
283 # the sharedpath always ends in the .hg; we want the path to the repo
283 # the sharedpath always ends in the .hg; we want the path to the repo
284 source = repo.vfs.split(repo.sharedpath)[0]
284 source = repo.vfs.split(repo.sharedpath)[0]
285 srcurl, branches = urlutil.parseurl(source)
285 srcurl, branches = urlutil.parseurl(source)
286 srcrepo = repository(repo.ui, srcurl)
286 srcrepo = repository(repo.ui, srcurl)
287 repo.srcrepo = srcrepo
287 repo.srcrepo = srcrepo
288 return srcrepo
288 return srcrepo
289
289
290
290
291 def share(
291 def share(
292 ui,
292 ui,
293 source,
293 source,
294 dest=None,
294 dest=None,
295 update=True,
295 update=True,
296 bookmarks=True,
296 bookmarks=True,
297 defaultpath=None,
297 defaultpath=None,
298 relative=False,
298 relative=False,
299 ):
299 ):
300 '''create a shared repository'''
300 '''create a shared repository'''
301
301
302 if not islocal(source):
302 not_local_msg = _(b'can only share local repositories')
303 raise error.Abort(_(b'can only share local repositories'))
303 if util.safehasattr(source, 'local'):
304 if source.local() is None:
305 raise error.Abort(not_local_msg)
306 elif not islocal(source):
307 # XXX why are we getting bytes here ?
308 raise error.Abort(not_local_msg)
304
309
305 if not dest:
310 if not dest:
306 dest = defaultdest(source)
311 dest = defaultdest(source)
307 else:
312 else:
308 dest = urlutil.get_clone_path(ui, dest)[1]
313 dest = urlutil.get_clone_path(ui, dest)[1]
309
314
310 if isinstance(source, bytes):
315 if isinstance(source, bytes):
311 origsource, source, branches = urlutil.get_clone_path(ui, source)
316 origsource, source, branches = urlutil.get_clone_path(ui, source)
312 srcrepo = repository(ui, source)
317 srcrepo = repository(ui, source)
313 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
318 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
314 else:
319 else:
315 srcrepo = source.local()
320 srcrepo = source.local()
316 checkout = None
321 checkout = None
317
322
318 shareditems = set()
323 shareditems = set()
319 if bookmarks:
324 if bookmarks:
320 shareditems.add(sharedbookmarks)
325 shareditems.add(sharedbookmarks)
321
326
322 r = repository(
327 r = repository(
323 ui,
328 ui,
324 dest,
329 dest,
325 create=True,
330 create=True,
326 createopts={
331 createopts={
327 b'sharedrepo': srcrepo,
332 b'sharedrepo': srcrepo,
328 b'sharedrelative': relative,
333 b'sharedrelative': relative,
329 b'shareditems': shareditems,
334 b'shareditems': shareditems,
330 },
335 },
331 )
336 )
332
337
333 postshare(srcrepo, r, defaultpath=defaultpath)
338 postshare(srcrepo, r, defaultpath=defaultpath)
334 r = repository(ui, dest)
339 r = repository(ui, dest)
335 _postshareupdate(r, update, checkout=checkout)
340 _postshareupdate(r, update, checkout=checkout)
336 return r
341 return r
337
342
338
343
339 def _prependsourcehgrc(repo):
344 def _prependsourcehgrc(repo):
340 """copies the source repo config and prepend it in current repo .hg/hgrc
345 """copies the source repo config and prepend it in current repo .hg/hgrc
341 on unshare. This is only done if the share was perfomed using share safe
346 on unshare. This is only done if the share was perfomed using share safe
342 method where we share config of source in shares"""
347 method where we share config of source in shares"""
343 srcvfs = vfsmod.vfs(repo.sharedpath)
348 srcvfs = vfsmod.vfs(repo.sharedpath)
344 dstvfs = vfsmod.vfs(repo.path)
349 dstvfs = vfsmod.vfs(repo.path)
345
350
346 if not srcvfs.exists(b'hgrc'):
351 if not srcvfs.exists(b'hgrc'):
347 return
352 return
348
353
349 currentconfig = b''
354 currentconfig = b''
350 if dstvfs.exists(b'hgrc'):
355 if dstvfs.exists(b'hgrc'):
351 currentconfig = dstvfs.read(b'hgrc')
356 currentconfig = dstvfs.read(b'hgrc')
352
357
353 with dstvfs(b'hgrc', b'wb') as fp:
358 with dstvfs(b'hgrc', b'wb') as fp:
354 sourceconfig = srcvfs.read(b'hgrc')
359 sourceconfig = srcvfs.read(b'hgrc')
355 fp.write(b"# Config copied from shared source\n")
360 fp.write(b"# Config copied from shared source\n")
356 fp.write(sourceconfig)
361 fp.write(sourceconfig)
357 fp.write(b'\n')
362 fp.write(b'\n')
358 fp.write(currentconfig)
363 fp.write(currentconfig)
359
364
360
365
361 def unshare(ui, repo):
366 def unshare(ui, repo):
362 """convert a shared repository to a normal one
367 """convert a shared repository to a normal one
363
368
364 Copy the store data to the repo and remove the sharedpath data.
369 Copy the store data to the repo and remove the sharedpath data.
365
370
366 Returns a new repository object representing the unshared repository.
371 Returns a new repository object representing the unshared repository.
367
372
368 The passed repository object is not usable after this function is
373 The passed repository object is not usable after this function is
369 called.
374 called.
370 """
375 """
371
376
372 with repo.lock():
377 with repo.lock():
373 # we use locks here because if we race with commit, we
378 # we use locks here because if we race with commit, we
374 # can end up with extra data in the cloned revlogs that's
379 # can end up with extra data in the cloned revlogs that's
375 # not pointed to by changesets, thus causing verify to
380 # not pointed to by changesets, thus causing verify to
376 # fail
381 # fail
377 destlock = copystore(ui, repo, repo.path)
382 destlock = copystore(ui, repo, repo.path)
378 with destlock or util.nullcontextmanager():
383 with destlock or util.nullcontextmanager():
379 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
384 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
380 # we were sharing .hg/hgrc of the share source with the current
385 # we were sharing .hg/hgrc of the share source with the current
381 # repo. We need to copy that while unsharing otherwise it can
386 # repo. We need to copy that while unsharing otherwise it can
382 # disable hooks and other checks
387 # disable hooks and other checks
383 _prependsourcehgrc(repo)
388 _prependsourcehgrc(repo)
384
389
385 sharefile = repo.vfs.join(b'sharedpath')
390 sharefile = repo.vfs.join(b'sharedpath')
386 util.rename(sharefile, sharefile + b'.old')
391 util.rename(sharefile, sharefile + b'.old')
387
392
388 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
393 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
389 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
394 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
390 scmutil.writereporequirements(repo)
395 scmutil.writereporequirements(repo)
391
396
392 # Removing share changes some fundamental properties of the repo instance.
397 # Removing share changes some fundamental properties of the repo instance.
393 # So we instantiate a new repo object and operate on it rather than
398 # So we instantiate a new repo object and operate on it rather than
394 # try to keep the existing repo usable.
399 # try to keep the existing repo usable.
395 newrepo = repository(repo.baseui, repo.root, create=False)
400 newrepo = repository(repo.baseui, repo.root, create=False)
396
401
397 # TODO: figure out how to access subrepos that exist, but were previously
402 # TODO: figure out how to access subrepos that exist, but were previously
398 # removed from .hgsub
403 # removed from .hgsub
399 c = newrepo[b'.']
404 c = newrepo[b'.']
400 subs = c.substate
405 subs = c.substate
401 for s in sorted(subs):
406 for s in sorted(subs):
402 c.sub(s).unshare()
407 c.sub(s).unshare()
403
408
404 localrepo.poisonrepository(repo)
409 localrepo.poisonrepository(repo)
405
410
406 return newrepo
411 return newrepo
407
412
408
413
409 def postshare(sourcerepo, destrepo, defaultpath=None):
414 def postshare(sourcerepo, destrepo, defaultpath=None):
410 """Called after a new shared repo is created.
415 """Called after a new shared repo is created.
411
416
412 The new repo only has a requirements file and pointer to the source.
417 The new repo only has a requirements file and pointer to the source.
413 This function configures additional shared data.
418 This function configures additional shared data.
414
419
415 Extensions can wrap this function and write additional entries to
420 Extensions can wrap this function and write additional entries to
416 destrepo/.hg/shared to indicate additional pieces of data to be shared.
421 destrepo/.hg/shared to indicate additional pieces of data to be shared.
417 """
422 """
418 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
423 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
419 if default:
424 if default:
420 template = b'[paths]\ndefault = %s\n'
425 template = b'[paths]\ndefault = %s\n'
421 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
426 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
422 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
427 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
423 with destrepo.wlock():
428 with destrepo.wlock():
424 narrowspec.copytoworkingcopy(destrepo)
429 narrowspec.copytoworkingcopy(destrepo)
425
430
426
431
427 def _postshareupdate(repo, update, checkout=None):
432 def _postshareupdate(repo, update, checkout=None):
428 """Maybe perform a working directory update after a shared repo is created.
433 """Maybe perform a working directory update after a shared repo is created.
429
434
430 ``update`` can be a boolean or a revision to update to.
435 ``update`` can be a boolean or a revision to update to.
431 """
436 """
432 if not update:
437 if not update:
433 return
438 return
434
439
435 repo.ui.status(_(b"updating working directory\n"))
440 repo.ui.status(_(b"updating working directory\n"))
436 if update is not True:
441 if update is not True:
437 checkout = update
442 checkout = update
438 for test in (checkout, b'default', b'tip'):
443 for test in (checkout, b'default', b'tip'):
439 if test is None:
444 if test is None:
440 continue
445 continue
441 try:
446 try:
442 uprev = repo.lookup(test)
447 uprev = repo.lookup(test)
443 break
448 break
444 except error.RepoLookupError:
449 except error.RepoLookupError:
445 continue
450 continue
446 _update(repo, uprev)
451 _update(repo, uprev)
447
452
448
453
449 def copystore(ui, srcrepo, destpath):
454 def copystore(ui, srcrepo, destpath):
450 """copy files from store of srcrepo in destpath
455 """copy files from store of srcrepo in destpath
451
456
452 returns destlock
457 returns destlock
453 """
458 """
454 destlock = None
459 destlock = None
455 try:
460 try:
456 hardlink = None
461 hardlink = None
457 topic = _(b'linking') if hardlink else _(b'copying')
462 topic = _(b'linking') if hardlink else _(b'copying')
458 with ui.makeprogress(topic, unit=_(b'files')) as progress:
463 with ui.makeprogress(topic, unit=_(b'files')) as progress:
459 num = 0
464 num = 0
460 srcpublishing = srcrepo.publishing()
465 srcpublishing = srcrepo.publishing()
461 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
466 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
462 dstvfs = vfsmod.vfs(destpath)
467 dstvfs = vfsmod.vfs(destpath)
463 for f in srcrepo.store.copylist():
468 for f in srcrepo.store.copylist():
464 if srcpublishing and f.endswith(b'phaseroots'):
469 if srcpublishing and f.endswith(b'phaseroots'):
465 continue
470 continue
466 dstbase = os.path.dirname(f)
471 dstbase = os.path.dirname(f)
467 if dstbase and not dstvfs.exists(dstbase):
472 if dstbase and not dstvfs.exists(dstbase):
468 dstvfs.mkdir(dstbase)
473 dstvfs.mkdir(dstbase)
469 if srcvfs.exists(f):
474 if srcvfs.exists(f):
470 if f.endswith(b'data'):
475 if f.endswith(b'data'):
471 # 'dstbase' may be empty (e.g. revlog format 0)
476 # 'dstbase' may be empty (e.g. revlog format 0)
472 lockfile = os.path.join(dstbase, b"lock")
477 lockfile = os.path.join(dstbase, b"lock")
473 # lock to avoid premature writing to the target
478 # lock to avoid premature writing to the target
474 destlock = lock.lock(dstvfs, lockfile)
479 destlock = lock.lock(dstvfs, lockfile)
475 hardlink, n = util.copyfiles(
480 hardlink, n = util.copyfiles(
476 srcvfs.join(f), dstvfs.join(f), hardlink, progress
481 srcvfs.join(f), dstvfs.join(f), hardlink, progress
477 )
482 )
478 num += n
483 num += n
479 if hardlink:
484 if hardlink:
480 ui.debug(b"linked %d files\n" % num)
485 ui.debug(b"linked %d files\n" % num)
481 else:
486 else:
482 ui.debug(b"copied %d files\n" % num)
487 ui.debug(b"copied %d files\n" % num)
483 return destlock
488 return destlock
484 except: # re-raises
489 except: # re-raises
485 release(destlock)
490 release(destlock)
486 raise
491 raise
487
492
488
493
489 def clonewithshare(
494 def clonewithshare(
490 ui,
495 ui,
491 peeropts,
496 peeropts,
492 sharepath,
497 sharepath,
493 source,
498 source,
494 srcpeer,
499 srcpeer,
495 dest,
500 dest,
496 pull=False,
501 pull=False,
497 rev=None,
502 rev=None,
498 update=True,
503 update=True,
499 stream=False,
504 stream=False,
500 ):
505 ):
501 """Perform a clone using a shared repo.
506 """Perform a clone using a shared repo.
502
507
503 The store for the repository will be located at <sharepath>/.hg. The
508 The store for the repository will be located at <sharepath>/.hg. The
504 specified revisions will be cloned or pulled from "source". A shared repo
509 specified revisions will be cloned or pulled from "source". A shared repo
505 will be created at "dest" and a working copy will be created if "update" is
510 will be created at "dest" and a working copy will be created if "update" is
506 True.
511 True.
507 """
512 """
508 revs = None
513 revs = None
509 if rev:
514 if rev:
510 if not srcpeer.capable(b'lookup'):
515 if not srcpeer.capable(b'lookup'):
511 raise error.Abort(
516 raise error.Abort(
512 _(
517 _(
513 b"src repository does not support "
518 b"src repository does not support "
514 b"revision lookup and so doesn't "
519 b"revision lookup and so doesn't "
515 b"support clone by revision"
520 b"support clone by revision"
516 )
521 )
517 )
522 )
518
523
519 # TODO this is batchable.
524 # TODO this is batchable.
520 remoterevs = []
525 remoterevs = []
521 for r in rev:
526 for r in rev:
522 with srcpeer.commandexecutor() as e:
527 with srcpeer.commandexecutor() as e:
523 remoterevs.append(
528 remoterevs.append(
524 e.callcommand(
529 e.callcommand(
525 b'lookup',
530 b'lookup',
526 {
531 {
527 b'key': r,
532 b'key': r,
528 },
533 },
529 ).result()
534 ).result()
530 )
535 )
531 revs = remoterevs
536 revs = remoterevs
532
537
533 # Obtain a lock before checking for or cloning the pooled repo otherwise
538 # Obtain a lock before checking for or cloning the pooled repo otherwise
534 # 2 clients may race creating or populating it.
539 # 2 clients may race creating or populating it.
535 pooldir = os.path.dirname(sharepath)
540 pooldir = os.path.dirname(sharepath)
536 # lock class requires the directory to exist.
541 # lock class requires the directory to exist.
537 try:
542 try:
538 util.makedir(pooldir, False)
543 util.makedir(pooldir, False)
539 except FileExistsError:
544 except FileExistsError:
540 pass
545 pass
541
546
542 poolvfs = vfsmod.vfs(pooldir)
547 poolvfs = vfsmod.vfs(pooldir)
543 basename = os.path.basename(sharepath)
548 basename = os.path.basename(sharepath)
544
549
545 with lock.lock(poolvfs, b'%s.lock' % basename):
550 with lock.lock(poolvfs, b'%s.lock' % basename):
546 if os.path.exists(sharepath):
551 if os.path.exists(sharepath):
547 ui.status(
552 ui.status(
548 _(b'(sharing from existing pooled repository %s)\n') % basename
553 _(b'(sharing from existing pooled repository %s)\n') % basename
549 )
554 )
550 else:
555 else:
551 ui.status(
556 ui.status(
552 _(b'(sharing from new pooled repository %s)\n') % basename
557 _(b'(sharing from new pooled repository %s)\n') % basename
553 )
558 )
554 # Always use pull mode because hardlinks in share mode don't work
559 # Always use pull mode because hardlinks in share mode don't work
555 # well. Never update because working copies aren't necessary in
560 # well. Never update because working copies aren't necessary in
556 # share mode.
561 # share mode.
557 clone(
562 clone(
558 ui,
563 ui,
559 peeropts,
564 peeropts,
560 source,
565 source,
561 dest=sharepath,
566 dest=sharepath,
562 pull=True,
567 pull=True,
563 revs=rev,
568 revs=rev,
564 update=False,
569 update=False,
565 stream=stream,
570 stream=stream,
566 )
571 )
567
572
568 # Resolve the value to put in [paths] section for the source.
573 # Resolve the value to put in [paths] section for the source.
569 if islocal(source):
574 if islocal(source):
570 defaultpath = util.abspath(urlutil.urllocalpath(source))
575 defaultpath = util.abspath(urlutil.urllocalpath(source))
571 else:
576 else:
572 defaultpath = source
577 defaultpath = source
573
578
574 sharerepo = repository(ui, path=sharepath)
579 sharerepo = repository(ui, path=sharepath)
575 destrepo = share(
580 destrepo = share(
576 ui,
581 ui,
577 sharerepo,
582 sharerepo,
578 dest=dest,
583 dest=dest,
579 update=False,
584 update=False,
580 bookmarks=False,
585 bookmarks=False,
581 defaultpath=defaultpath,
586 defaultpath=defaultpath,
582 )
587 )
583
588
584 # We need to perform a pull against the dest repo to fetch bookmarks
589 # We need to perform a pull against the dest repo to fetch bookmarks
585 # and other non-store data that isn't shared by default. In the case of
590 # and other non-store data that isn't shared by default. In the case of
586 # non-existing shared repo, this means we pull from the remote twice. This
591 # non-existing shared repo, this means we pull from the remote twice. This
587 # is a bit weird. But at the time it was implemented, there wasn't an easy
592 # is a bit weird. But at the time it was implemented, there wasn't an easy
588 # way to pull just non-changegroup data.
593 # way to pull just non-changegroup data.
589 exchange.pull(destrepo, srcpeer, heads=revs)
594 exchange.pull(destrepo, srcpeer, heads=revs)
590
595
591 _postshareupdate(destrepo, update)
596 _postshareupdate(destrepo, update)
592
597
593 return srcpeer, peer(ui, peeropts, dest)
598 return srcpeer, peer(ui, peeropts, dest)
594
599
595
600
596 # Recomputing caches is often slow on big repos, so copy them.
601 # Recomputing caches is often slow on big repos, so copy them.
597 def _copycache(srcrepo, dstcachedir, fname):
602 def _copycache(srcrepo, dstcachedir, fname):
598 """copy a cache from srcrepo to destcachedir (if it exists)"""
603 """copy a cache from srcrepo to destcachedir (if it exists)"""
599 srcfname = srcrepo.cachevfs.join(fname)
604 srcfname = srcrepo.cachevfs.join(fname)
600 dstfname = os.path.join(dstcachedir, fname)
605 dstfname = os.path.join(dstcachedir, fname)
601 if os.path.exists(srcfname):
606 if os.path.exists(srcfname):
602 if not os.path.exists(dstcachedir):
607 if not os.path.exists(dstcachedir):
603 os.mkdir(dstcachedir)
608 os.mkdir(dstcachedir)
604 util.copyfile(srcfname, dstfname)
609 util.copyfile(srcfname, dstfname)
605
610
606
611
607 def clone(
612 def clone(
608 ui,
613 ui,
609 peeropts,
614 peeropts,
610 source,
615 source,
611 dest=None,
616 dest=None,
612 pull=False,
617 pull=False,
613 revs=None,
618 revs=None,
614 update=True,
619 update=True,
615 stream=False,
620 stream=False,
616 branch=None,
621 branch=None,
617 shareopts=None,
622 shareopts=None,
618 storeincludepats=None,
623 storeincludepats=None,
619 storeexcludepats=None,
624 storeexcludepats=None,
620 depth=None,
625 depth=None,
621 ):
626 ):
622 """Make a copy of an existing repository.
627 """Make a copy of an existing repository.
623
628
624 Create a copy of an existing repository in a new directory. The
629 Create a copy of an existing repository in a new directory. The
625 source and destination are URLs, as passed to the repository
630 source and destination are URLs, as passed to the repository
626 function. Returns a pair of repository peers, the source and
631 function. Returns a pair of repository peers, the source and
627 newly created destination.
632 newly created destination.
628
633
629 The location of the source is added to the new repository's
634 The location of the source is added to the new repository's
630 .hg/hgrc file, as the default to be used for future pulls and
635 .hg/hgrc file, as the default to be used for future pulls and
631 pushes.
636 pushes.
632
637
633 If an exception is raised, the partly cloned/updated destination
638 If an exception is raised, the partly cloned/updated destination
634 repository will be deleted.
639 repository will be deleted.
635
640
636 Arguments:
641 Arguments:
637
642
638 source: repository object or URL
643 source: repository object or URL
639
644
640 dest: URL of destination repository to create (defaults to base
645 dest: URL of destination repository to create (defaults to base
641 name of source repository)
646 name of source repository)
642
647
643 pull: always pull from source repository, even in local case or if the
648 pull: always pull from source repository, even in local case or if the
644 server prefers streaming
649 server prefers streaming
645
650
646 stream: stream raw data uncompressed from repository (fast over
651 stream: stream raw data uncompressed from repository (fast over
647 LAN, slow over WAN)
652 LAN, slow over WAN)
648
653
649 revs: revision to clone up to (implies pull=True)
654 revs: revision to clone up to (implies pull=True)
650
655
651 update: update working directory after clone completes, if
656 update: update working directory after clone completes, if
652 destination is local repository (True means update to default rev,
657 destination is local repository (True means update to default rev,
653 anything else is treated as a revision)
658 anything else is treated as a revision)
654
659
655 branch: branches to clone
660 branch: branches to clone
656
661
657 shareopts: dict of options to control auto sharing behavior. The "pool" key
662 shareopts: dict of options to control auto sharing behavior. The "pool" key
658 activates auto sharing mode and defines the directory for stores. The
663 activates auto sharing mode and defines the directory for stores. The
659 "mode" key determines how to construct the directory name of the shared
664 "mode" key determines how to construct the directory name of the shared
660 repository. "identity" means the name is derived from the node of the first
665 repository. "identity" means the name is derived from the node of the first
661 changeset in the repository. "remote" means the name is derived from the
666 changeset in the repository. "remote" means the name is derived from the
662 remote's path/URL. Defaults to "identity."
667 remote's path/URL. Defaults to "identity."
663
668
664 storeincludepats and storeexcludepats: sets of file patterns to include and
669 storeincludepats and storeexcludepats: sets of file patterns to include and
665 exclude in the repository copy, respectively. If not defined, all files
670 exclude in the repository copy, respectively. If not defined, all files
666 will be included (a "full" clone). Otherwise a "narrow" clone containing
671 will be included (a "full" clone). Otherwise a "narrow" clone containing
667 only the requested files will be performed. If ``storeincludepats`` is not
672 only the requested files will be performed. If ``storeincludepats`` is not
668 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
673 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
669 ``path:.``. If both are empty sets, no files will be cloned.
674 ``path:.``. If both are empty sets, no files will be cloned.
670 """
675 """
671
676
672 if isinstance(source, bytes):
677 if isinstance(source, bytes):
673 src = urlutil.get_clone_path(ui, source, branch)
678 src = urlutil.get_clone_path(ui, source, branch)
674 origsource, source, branches = src
679 origsource, source, branches = src
675 srcpeer = peer(ui, peeropts, source)
680 srcpeer = peer(ui, peeropts, source)
676 else:
681 else:
677 srcpeer = source.peer() # in case we were called with a localrepo
682 srcpeer = source.peer() # in case we were called with a localrepo
678 branches = (None, branch or [])
683 branches = (None, branch or [])
679 origsource = source = srcpeer.url()
684 origsource = source = srcpeer.url()
680 srclock = destlock = destwlock = cleandir = None
685 srclock = destlock = destwlock = cleandir = None
681 destpeer = None
686 destpeer = None
682 try:
687 try:
683 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
688 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
684
689
685 if dest is None:
690 if dest is None:
686 dest = defaultdest(source)
691 dest = defaultdest(source)
687 if dest:
692 if dest:
688 ui.status(_(b"destination directory: %s\n") % dest)
693 ui.status(_(b"destination directory: %s\n") % dest)
689 else:
694 else:
690 dest = urlutil.get_clone_path(ui, dest)[0]
695 dest = urlutil.get_clone_path(ui, dest)[0]
691
696
692 dest = urlutil.urllocalpath(dest)
697 dest = urlutil.urllocalpath(dest)
693 source = urlutil.urllocalpath(source)
698 source = urlutil.urllocalpath(source)
694
699
695 if not dest:
700 if not dest:
696 raise error.InputError(_(b"empty destination path is not valid"))
701 raise error.InputError(_(b"empty destination path is not valid"))
697
702
698 destvfs = vfsmod.vfs(dest, expandpath=True)
703 destvfs = vfsmod.vfs(dest, expandpath=True)
699 if destvfs.lexists():
704 if destvfs.lexists():
700 if not destvfs.isdir():
705 if not destvfs.isdir():
701 raise error.InputError(
706 raise error.InputError(
702 _(b"destination '%s' already exists") % dest
707 _(b"destination '%s' already exists") % dest
703 )
708 )
704 elif destvfs.listdir():
709 elif destvfs.listdir():
705 raise error.InputError(
710 raise error.InputError(
706 _(b"destination '%s' is not empty") % dest
711 _(b"destination '%s' is not empty") % dest
707 )
712 )
708
713
709 createopts = {}
714 createopts = {}
710 narrow = False
715 narrow = False
711
716
712 if storeincludepats is not None:
717 if storeincludepats is not None:
713 narrowspec.validatepatterns(storeincludepats)
718 narrowspec.validatepatterns(storeincludepats)
714 narrow = True
719 narrow = True
715
720
716 if storeexcludepats is not None:
721 if storeexcludepats is not None:
717 narrowspec.validatepatterns(storeexcludepats)
722 narrowspec.validatepatterns(storeexcludepats)
718 narrow = True
723 narrow = True
719
724
720 if narrow:
725 if narrow:
721 # Include everything by default if only exclusion patterns defined.
726 # Include everything by default if only exclusion patterns defined.
722 if storeexcludepats and not storeincludepats:
727 if storeexcludepats and not storeincludepats:
723 storeincludepats = {b'path:.'}
728 storeincludepats = {b'path:.'}
724
729
725 createopts[b'narrowfiles'] = True
730 createopts[b'narrowfiles'] = True
726
731
727 if depth:
732 if depth:
728 createopts[b'shallowfilestore'] = True
733 createopts[b'shallowfilestore'] = True
729
734
730 if srcpeer.capable(b'lfs-serve'):
735 if srcpeer.capable(b'lfs-serve'):
731 # Repository creation honors the config if it disabled the extension, so
736 # Repository creation honors the config if it disabled the extension, so
732 # we can't just announce that lfs will be enabled. This check avoids
737 # we can't just announce that lfs will be enabled. This check avoids
733 # saying that lfs will be enabled, and then saying it's an unknown
738 # saying that lfs will be enabled, and then saying it's an unknown
734 # feature. The lfs creation option is set in either case so that a
739 # feature. The lfs creation option is set in either case so that a
735 # requirement is added. If the extension is explicitly disabled but the
740 # requirement is added. If the extension is explicitly disabled but the
736 # requirement is set, the clone aborts early, before transferring any
741 # requirement is set, the clone aborts early, before transferring any
737 # data.
742 # data.
738 createopts[b'lfs'] = True
743 createopts[b'lfs'] = True
739
744
740 if extensions.disabled_help(b'lfs'):
745 if extensions.disabled_help(b'lfs'):
741 ui.status(
746 ui.status(
742 _(
747 _(
743 b'(remote is using large file support (lfs), but it is '
748 b'(remote is using large file support (lfs), but it is '
744 b'explicitly disabled in the local configuration)\n'
749 b'explicitly disabled in the local configuration)\n'
745 )
750 )
746 )
751 )
747 else:
752 else:
748 ui.status(
753 ui.status(
749 _(
754 _(
750 b'(remote is using large file support (lfs); lfs will '
755 b'(remote is using large file support (lfs); lfs will '
751 b'be enabled for this repository)\n'
756 b'be enabled for this repository)\n'
752 )
757 )
753 )
758 )
754
759
755 shareopts = shareopts or {}
760 shareopts = shareopts or {}
756 sharepool = shareopts.get(b'pool')
761 sharepool = shareopts.get(b'pool')
757 sharenamemode = shareopts.get(b'mode')
762 sharenamemode = shareopts.get(b'mode')
758 if sharepool and islocal(dest):
763 if sharepool and islocal(dest):
759 sharepath = None
764 sharepath = None
760 if sharenamemode == b'identity':
765 if sharenamemode == b'identity':
761 # Resolve the name from the initial changeset in the remote
766 # Resolve the name from the initial changeset in the remote
762 # repository. This returns nullid when the remote is empty. It
767 # repository. This returns nullid when the remote is empty. It
763 # raises RepoLookupError if revision 0 is filtered or otherwise
768 # raises RepoLookupError if revision 0 is filtered or otherwise
764 # not available. If we fail to resolve, sharing is not enabled.
769 # not available. If we fail to resolve, sharing is not enabled.
765 try:
770 try:
766 with srcpeer.commandexecutor() as e:
771 with srcpeer.commandexecutor() as e:
767 rootnode = e.callcommand(
772 rootnode = e.callcommand(
768 b'lookup',
773 b'lookup',
769 {
774 {
770 b'key': b'0',
775 b'key': b'0',
771 },
776 },
772 ).result()
777 ).result()
773
778
774 if rootnode != sha1nodeconstants.nullid:
779 if rootnode != sha1nodeconstants.nullid:
775 sharepath = os.path.join(sharepool, hex(rootnode))
780 sharepath = os.path.join(sharepool, hex(rootnode))
776 else:
781 else:
777 ui.status(
782 ui.status(
778 _(
783 _(
779 b'(not using pooled storage: '
784 b'(not using pooled storage: '
780 b'remote appears to be empty)\n'
785 b'remote appears to be empty)\n'
781 )
786 )
782 )
787 )
783 except error.RepoLookupError:
788 except error.RepoLookupError:
784 ui.status(
789 ui.status(
785 _(
790 _(
786 b'(not using pooled storage: '
791 b'(not using pooled storage: '
787 b'unable to resolve identity of remote)\n'
792 b'unable to resolve identity of remote)\n'
788 )
793 )
789 )
794 )
790 elif sharenamemode == b'remote':
795 elif sharenamemode == b'remote':
791 sharepath = os.path.join(
796 sharepath = os.path.join(
792 sharepool, hex(hashutil.sha1(source).digest())
797 sharepool, hex(hashutil.sha1(source).digest())
793 )
798 )
794 else:
799 else:
795 raise error.Abort(
800 raise error.Abort(
796 _(b'unknown share naming mode: %s') % sharenamemode
801 _(b'unknown share naming mode: %s') % sharenamemode
797 )
802 )
798
803
799 # TODO this is a somewhat arbitrary restriction.
804 # TODO this is a somewhat arbitrary restriction.
800 if narrow:
805 if narrow:
801 ui.status(
806 ui.status(
802 _(b'(pooled storage not supported for narrow clones)\n')
807 _(b'(pooled storage not supported for narrow clones)\n')
803 )
808 )
804 sharepath = None
809 sharepath = None
805
810
806 if sharepath:
811 if sharepath:
807 return clonewithshare(
812 return clonewithshare(
808 ui,
813 ui,
809 peeropts,
814 peeropts,
810 sharepath,
815 sharepath,
811 source,
816 source,
812 srcpeer,
817 srcpeer,
813 dest,
818 dest,
814 pull=pull,
819 pull=pull,
815 rev=revs,
820 rev=revs,
816 update=update,
821 update=update,
817 stream=stream,
822 stream=stream,
818 )
823 )
819
824
820 srcrepo = srcpeer.local()
825 srcrepo = srcpeer.local()
821
826
822 abspath = origsource
827 abspath = origsource
823 if islocal(origsource):
828 if islocal(origsource):
824 abspath = util.abspath(urlutil.urllocalpath(origsource))
829 abspath = util.abspath(urlutil.urllocalpath(origsource))
825
830
826 if islocal(dest):
831 if islocal(dest):
827 if os.path.exists(dest):
832 if os.path.exists(dest):
828 # only clean up directories we create ourselves
833 # only clean up directories we create ourselves
829 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
834 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
830 cleandir = hgdir
835 cleandir = hgdir
831 else:
836 else:
832 cleandir = dest
837 cleandir = dest
833
838
834 copy = False
839 copy = False
835 if (
840 if (
836 srcrepo
841 srcrepo
837 and srcrepo.cancopy()
842 and srcrepo.cancopy()
838 and islocal(dest)
843 and islocal(dest)
839 and not phases.hassecret(srcrepo)
844 and not phases.hassecret(srcrepo)
840 ):
845 ):
841 copy = not pull and not revs
846 copy = not pull and not revs
842
847
843 # TODO this is a somewhat arbitrary restriction.
848 # TODO this is a somewhat arbitrary restriction.
844 if narrow:
849 if narrow:
845 copy = False
850 copy = False
846
851
847 if copy:
852 if copy:
848 try:
853 try:
849 # we use a lock here because if we race with commit, we
854 # we use a lock here because if we race with commit, we
850 # can end up with extra data in the cloned revlogs that's
855 # can end up with extra data in the cloned revlogs that's
851 # not pointed to by changesets, thus causing verify to
856 # not pointed to by changesets, thus causing verify to
852 # fail
857 # fail
853 srclock = srcrepo.lock(wait=False)
858 srclock = srcrepo.lock(wait=False)
854 except error.LockError:
859 except error.LockError:
855 copy = False
860 copy = False
856
861
857 if copy:
862 if copy:
858 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
863 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
859
864
860 destrootpath = urlutil.urllocalpath(dest)
865 destrootpath = urlutil.urllocalpath(dest)
861 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
866 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
862 localrepo.createrepository(
867 localrepo.createrepository(
863 ui,
868 ui,
864 destrootpath,
869 destrootpath,
865 requirements=dest_reqs,
870 requirements=dest_reqs,
866 )
871 )
867 destrepo = localrepo.makelocalrepository(ui, destrootpath)
872 destrepo = localrepo.makelocalrepository(ui, destrootpath)
868
873
869 destwlock = destrepo.wlock()
874 destwlock = destrepo.wlock()
870 destlock = destrepo.lock()
875 destlock = destrepo.lock()
871 from . import streamclone # avoid cycle
876 from . import streamclone # avoid cycle
872
877
873 streamclone.local_copy(srcrepo, destrepo)
878 streamclone.local_copy(srcrepo, destrepo)
874
879
875 # we need to re-init the repo after manually copying the data
880 # we need to re-init the repo after manually copying the data
876 # into it
881 # into it
877 destpeer = peer(srcrepo, peeropts, dest)
882 destpeer = peer(srcrepo, peeropts, dest)
878
883
879 # make the peer aware that is it already locked
884 # make the peer aware that is it already locked
880 #
885 #
881 # important:
886 # important:
882 #
887 #
883 # We still need to release that lock at the end of the function
888 # We still need to release that lock at the end of the function
884 destpeer.local()._lockref = weakref.ref(destlock)
889 destpeer.local()._lockref = weakref.ref(destlock)
885 destpeer.local()._wlockref = weakref.ref(destwlock)
890 destpeer.local()._wlockref = weakref.ref(destwlock)
886 # dirstate also needs to be copied because `_wlockref` has a reference
891 # dirstate also needs to be copied because `_wlockref` has a reference
887 # to it: this dirstate is saved to disk when the wlock is released
892 # to it: this dirstate is saved to disk when the wlock is released
888 destpeer.local().dirstate = destrepo.dirstate
893 destpeer.local().dirstate = destrepo.dirstate
889
894
890 srcrepo.hook(
895 srcrepo.hook(
891 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
896 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
892 )
897 )
893 else:
898 else:
894 try:
899 try:
895 # only pass ui when no srcrepo
900 # only pass ui when no srcrepo
896 destpeer = peer(
901 destpeer = peer(
897 srcrepo or ui,
902 srcrepo or ui,
898 peeropts,
903 peeropts,
899 dest,
904 dest,
900 create=True,
905 create=True,
901 createopts=createopts,
906 createopts=createopts,
902 )
907 )
903 except FileExistsError:
908 except FileExistsError:
904 cleandir = None
909 cleandir = None
905 raise error.Abort(_(b"destination '%s' already exists") % dest)
910 raise error.Abort(_(b"destination '%s' already exists") % dest)
906
911
907 if revs:
912 if revs:
908 if not srcpeer.capable(b'lookup'):
913 if not srcpeer.capable(b'lookup'):
909 raise error.Abort(
914 raise error.Abort(
910 _(
915 _(
911 b"src repository does not support "
916 b"src repository does not support "
912 b"revision lookup and so doesn't "
917 b"revision lookup and so doesn't "
913 b"support clone by revision"
918 b"support clone by revision"
914 )
919 )
915 )
920 )
916
921
917 # TODO this is batchable.
922 # TODO this is batchable.
918 remoterevs = []
923 remoterevs = []
919 for rev in revs:
924 for rev in revs:
920 with srcpeer.commandexecutor() as e:
925 with srcpeer.commandexecutor() as e:
921 remoterevs.append(
926 remoterevs.append(
922 e.callcommand(
927 e.callcommand(
923 b'lookup',
928 b'lookup',
924 {
929 {
925 b'key': rev,
930 b'key': rev,
926 },
931 },
927 ).result()
932 ).result()
928 )
933 )
929 revs = remoterevs
934 revs = remoterevs
930
935
931 checkout = revs[0]
936 checkout = revs[0]
932 else:
937 else:
933 revs = None
938 revs = None
934 local = destpeer.local()
939 local = destpeer.local()
935 if local:
940 if local:
936 if narrow:
941 if narrow:
937 with local.wlock(), local.lock():
942 with local.wlock(), local.lock():
938 local.setnarrowpats(storeincludepats, storeexcludepats)
943 local.setnarrowpats(storeincludepats, storeexcludepats)
939 narrowspec.copytoworkingcopy(local)
944 narrowspec.copytoworkingcopy(local)
940
945
941 u = urlutil.url(abspath)
946 u = urlutil.url(abspath)
942 defaulturl = bytes(u)
947 defaulturl = bytes(u)
943 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
948 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
944 if not stream:
949 if not stream:
945 if pull:
950 if pull:
946 stream = False
951 stream = False
947 else:
952 else:
948 stream = None
953 stream = None
949 # internal config: ui.quietbookmarkmove
954 # internal config: ui.quietbookmarkmove
950 overrides = {(b'ui', b'quietbookmarkmove'): True}
955 overrides = {(b'ui', b'quietbookmarkmove'): True}
951 with local.ui.configoverride(overrides, b'clone'):
956 with local.ui.configoverride(overrides, b'clone'):
952 exchange.pull(
957 exchange.pull(
953 local,
958 local,
954 srcpeer,
959 srcpeer,
955 heads=revs,
960 heads=revs,
956 streamclonerequested=stream,
961 streamclonerequested=stream,
957 includepats=storeincludepats,
962 includepats=storeincludepats,
958 excludepats=storeexcludepats,
963 excludepats=storeexcludepats,
959 depth=depth,
964 depth=depth,
960 )
965 )
961 elif srcrepo:
966 elif srcrepo:
962 # TODO lift restriction once exchange.push() accepts narrow
967 # TODO lift restriction once exchange.push() accepts narrow
963 # push.
968 # push.
964 if narrow:
969 if narrow:
965 raise error.Abort(
970 raise error.Abort(
966 _(
971 _(
967 b'narrow clone not available for '
972 b'narrow clone not available for '
968 b'remote destinations'
973 b'remote destinations'
969 )
974 )
970 )
975 )
971
976
972 exchange.push(
977 exchange.push(
973 srcrepo,
978 srcrepo,
974 destpeer,
979 destpeer,
975 revs=revs,
980 revs=revs,
976 bookmarks=srcrepo._bookmarks.keys(),
981 bookmarks=srcrepo._bookmarks.keys(),
977 )
982 )
978 else:
983 else:
979 raise error.Abort(
984 raise error.Abort(
980 _(b"clone from remote to remote not supported")
985 _(b"clone from remote to remote not supported")
981 )
986 )
982
987
983 cleandir = None
988 cleandir = None
984
989
985 destrepo = destpeer.local()
990 destrepo = destpeer.local()
986 if destrepo:
991 if destrepo:
987 template = uimod.samplehgrcs[b'cloned']
992 template = uimod.samplehgrcs[b'cloned']
988 u = urlutil.url(abspath)
993 u = urlutil.url(abspath)
989 u.passwd = None
994 u.passwd = None
990 defaulturl = bytes(u)
995 defaulturl = bytes(u)
991 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
996 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
992 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
997 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
993
998
994 if ui.configbool(b'experimental', b'remotenames'):
999 if ui.configbool(b'experimental', b'remotenames'):
995 logexchange.pullremotenames(destrepo, srcpeer)
1000 logexchange.pullremotenames(destrepo, srcpeer)
996
1001
997 if update:
1002 if update:
998 if update is not True:
1003 if update is not True:
999 with srcpeer.commandexecutor() as e:
1004 with srcpeer.commandexecutor() as e:
1000 checkout = e.callcommand(
1005 checkout = e.callcommand(
1001 b'lookup',
1006 b'lookup',
1002 {
1007 {
1003 b'key': update,
1008 b'key': update,
1004 },
1009 },
1005 ).result()
1010 ).result()
1006
1011
1007 uprev = None
1012 uprev = None
1008 status = None
1013 status = None
1009 if checkout is not None:
1014 if checkout is not None:
1010 # Some extensions (at least hg-git and hg-subversion) have
1015 # Some extensions (at least hg-git and hg-subversion) have
1011 # a peer.lookup() implementation that returns a name instead
1016 # a peer.lookup() implementation that returns a name instead
1012 # of a nodeid. We work around it here until we've figured
1017 # of a nodeid. We work around it here until we've figured
1013 # out a better solution.
1018 # out a better solution.
1014 if len(checkout) == 20 and checkout in destrepo:
1019 if len(checkout) == 20 and checkout in destrepo:
1015 uprev = checkout
1020 uprev = checkout
1016 elif scmutil.isrevsymbol(destrepo, checkout):
1021 elif scmutil.isrevsymbol(destrepo, checkout):
1017 uprev = scmutil.revsymbol(destrepo, checkout).node()
1022 uprev = scmutil.revsymbol(destrepo, checkout).node()
1018 else:
1023 else:
1019 if update is not True:
1024 if update is not True:
1020 try:
1025 try:
1021 uprev = destrepo.lookup(update)
1026 uprev = destrepo.lookup(update)
1022 except error.RepoLookupError:
1027 except error.RepoLookupError:
1023 pass
1028 pass
1024 if uprev is None:
1029 if uprev is None:
1025 try:
1030 try:
1026 if destrepo._activebookmark:
1031 if destrepo._activebookmark:
1027 uprev = destrepo.lookup(destrepo._activebookmark)
1032 uprev = destrepo.lookup(destrepo._activebookmark)
1028 update = destrepo._activebookmark
1033 update = destrepo._activebookmark
1029 else:
1034 else:
1030 uprev = destrepo._bookmarks[b'@']
1035 uprev = destrepo._bookmarks[b'@']
1031 update = b'@'
1036 update = b'@'
1032 bn = destrepo[uprev].branch()
1037 bn = destrepo[uprev].branch()
1033 if bn == b'default':
1038 if bn == b'default':
1034 status = _(b"updating to bookmark %s\n" % update)
1039 status = _(b"updating to bookmark %s\n" % update)
1035 else:
1040 else:
1036 status = (
1041 status = (
1037 _(b"updating to bookmark %s on branch %s\n")
1042 _(b"updating to bookmark %s on branch %s\n")
1038 ) % (update, bn)
1043 ) % (update, bn)
1039 except KeyError:
1044 except KeyError:
1040 try:
1045 try:
1041 uprev = destrepo.branchtip(b'default')
1046 uprev = destrepo.branchtip(b'default')
1042 except error.RepoLookupError:
1047 except error.RepoLookupError:
1043 uprev = destrepo.lookup(b'tip')
1048 uprev = destrepo.lookup(b'tip')
1044 if not status:
1049 if not status:
1045 bn = destrepo[uprev].branch()
1050 bn = destrepo[uprev].branch()
1046 status = _(b"updating to branch %s\n") % bn
1051 status = _(b"updating to branch %s\n") % bn
1047 destrepo.ui.status(status)
1052 destrepo.ui.status(status)
1048 _update(destrepo, uprev)
1053 _update(destrepo, uprev)
1049 if update in destrepo._bookmarks:
1054 if update in destrepo._bookmarks:
1050 bookmarks.activate(destrepo, update)
1055 bookmarks.activate(destrepo, update)
1051 if destlock is not None:
1056 if destlock is not None:
1052 release(destlock)
1057 release(destlock)
1053 if destwlock is not None:
1058 if destwlock is not None:
1054 release(destlock)
1059 release(destlock)
1055 # here is a tiny windows were someone could end up writing the
1060 # here is a tiny windows were someone could end up writing the
1056 # repository before the cache are sure to be warm. This is "fine"
1061 # repository before the cache are sure to be warm. This is "fine"
1057 # as the only "bad" outcome would be some slowness. That potential
1062 # as the only "bad" outcome would be some slowness. That potential
1058 # slowness already affect reader.
1063 # slowness already affect reader.
1059 with destrepo.lock():
1064 with destrepo.lock():
1060 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1065 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1061 finally:
1066 finally:
1062 release(srclock, destlock, destwlock)
1067 release(srclock, destlock, destwlock)
1063 if cleandir is not None:
1068 if cleandir is not None:
1064 shutil.rmtree(cleandir, True)
1069 shutil.rmtree(cleandir, True)
1065 if srcpeer is not None:
1070 if srcpeer is not None:
1066 srcpeer.close()
1071 srcpeer.close()
1067 if destpeer and destpeer.local() is None:
1072 if destpeer and destpeer.local() is None:
1068 destpeer.close()
1073 destpeer.close()
1069 return srcpeer, destpeer
1074 return srcpeer, destpeer
1070
1075
1071
1076
1072 def _showstats(repo, stats, quietempty=False):
1077 def _showstats(repo, stats, quietempty=False):
1073 if quietempty and stats.isempty():
1078 if quietempty and stats.isempty():
1074 return
1079 return
1075 repo.ui.status(
1080 repo.ui.status(
1076 _(
1081 _(
1077 b"%d files updated, %d files merged, "
1082 b"%d files updated, %d files merged, "
1078 b"%d files removed, %d files unresolved\n"
1083 b"%d files removed, %d files unresolved\n"
1079 )
1084 )
1080 % (
1085 % (
1081 stats.updatedcount,
1086 stats.updatedcount,
1082 stats.mergedcount,
1087 stats.mergedcount,
1083 stats.removedcount,
1088 stats.removedcount,
1084 stats.unresolvedcount,
1089 stats.unresolvedcount,
1085 )
1090 )
1086 )
1091 )
1087
1092
1088
1093
1089 def updaterepo(repo, node, overwrite, updatecheck=None):
1094 def updaterepo(repo, node, overwrite, updatecheck=None):
1090 """Update the working directory to node.
1095 """Update the working directory to node.
1091
1096
1092 When overwrite is set, changes are clobbered, merged else
1097 When overwrite is set, changes are clobbered, merged else
1093
1098
1094 returns stats (see pydoc mercurial.merge.applyupdates)"""
1099 returns stats (see pydoc mercurial.merge.applyupdates)"""
1095 repo.ui.deprecwarn(
1100 repo.ui.deprecwarn(
1096 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1101 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1097 b'5.7',
1102 b'5.7',
1098 )
1103 )
1099 return mergemod._update(
1104 return mergemod._update(
1100 repo,
1105 repo,
1101 node,
1106 node,
1102 branchmerge=False,
1107 branchmerge=False,
1103 force=overwrite,
1108 force=overwrite,
1104 labels=[b'working copy', b'destination'],
1109 labels=[b'working copy', b'destination'],
1105 updatecheck=updatecheck,
1110 updatecheck=updatecheck,
1106 )
1111 )
1107
1112
1108
1113
1109 def update(repo, node, quietempty=False, updatecheck=None):
1114 def update(repo, node, quietempty=False, updatecheck=None):
1110 """update the working directory to node"""
1115 """update the working directory to node"""
1111 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1116 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1112 _showstats(repo, stats, quietempty)
1117 _showstats(repo, stats, quietempty)
1113 if stats.unresolvedcount:
1118 if stats.unresolvedcount:
1114 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1119 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1115 return stats.unresolvedcount > 0
1120 return stats.unresolvedcount > 0
1116
1121
1117
1122
1118 # naming conflict in clone()
1123 # naming conflict in clone()
1119 _update = update
1124 _update = update
1120
1125
1121
1126
1122 def clean(repo, node, show_stats=True, quietempty=False):
1127 def clean(repo, node, show_stats=True, quietempty=False):
1123 """forcibly switch the working directory to node, clobbering changes"""
1128 """forcibly switch the working directory to node, clobbering changes"""
1124 stats = mergemod.clean_update(repo[node])
1129 stats = mergemod.clean_update(repo[node])
1125 assert stats.unresolvedcount == 0
1130 assert stats.unresolvedcount == 0
1126 if show_stats:
1131 if show_stats:
1127 _showstats(repo, stats, quietempty)
1132 _showstats(repo, stats, quietempty)
1128 return False
1133 return False
1129
1134
1130
1135
1131 # naming conflict in updatetotally()
1136 # naming conflict in updatetotally()
1132 _clean = clean
1137 _clean = clean
1133
1138
1134 _VALID_UPDATECHECKS = {
1139 _VALID_UPDATECHECKS = {
1135 mergemod.UPDATECHECK_ABORT,
1140 mergemod.UPDATECHECK_ABORT,
1136 mergemod.UPDATECHECK_NONE,
1141 mergemod.UPDATECHECK_NONE,
1137 mergemod.UPDATECHECK_LINEAR,
1142 mergemod.UPDATECHECK_LINEAR,
1138 mergemod.UPDATECHECK_NO_CONFLICT,
1143 mergemod.UPDATECHECK_NO_CONFLICT,
1139 }
1144 }
1140
1145
1141
1146
1142 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1147 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1143 """Update the working directory with extra care for non-file components
1148 """Update the working directory with extra care for non-file components
1144
1149
1145 This takes care of non-file components below:
1150 This takes care of non-file components below:
1146
1151
1147 :bookmark: might be advanced or (in)activated
1152 :bookmark: might be advanced or (in)activated
1148
1153
1149 This takes arguments below:
1154 This takes arguments below:
1150
1155
1151 :checkout: to which revision the working directory is updated
1156 :checkout: to which revision the working directory is updated
1152 :brev: a name, which might be a bookmark to be activated after updating
1157 :brev: a name, which might be a bookmark to be activated after updating
1153 :clean: whether changes in the working directory can be discarded
1158 :clean: whether changes in the working directory can be discarded
1154 :updatecheck: how to deal with a dirty working directory
1159 :updatecheck: how to deal with a dirty working directory
1155
1160
1156 Valid values for updatecheck are the UPDATECHECK_* constants
1161 Valid values for updatecheck are the UPDATECHECK_* constants
1157 defined in the merge module. Passing `None` will result in using the
1162 defined in the merge module. Passing `None` will result in using the
1158 configured default.
1163 configured default.
1159
1164
1160 * ABORT: abort if the working directory is dirty
1165 * ABORT: abort if the working directory is dirty
1161 * NONE: don't check (merge working directory changes into destination)
1166 * NONE: don't check (merge working directory changes into destination)
1162 * LINEAR: check that update is linear before merging working directory
1167 * LINEAR: check that update is linear before merging working directory
1163 changes into destination
1168 changes into destination
1164 * NO_CONFLICT: check that the update does not result in file merges
1169 * NO_CONFLICT: check that the update does not result in file merges
1165
1170
1166 This returns whether conflict is detected at updating or not.
1171 This returns whether conflict is detected at updating or not.
1167 """
1172 """
1168 if updatecheck is None:
1173 if updatecheck is None:
1169 updatecheck = ui.config(b'commands', b'update.check')
1174 updatecheck = ui.config(b'commands', b'update.check')
1170 if updatecheck not in _VALID_UPDATECHECKS:
1175 if updatecheck not in _VALID_UPDATECHECKS:
1171 # If not configured, or invalid value configured
1176 # If not configured, or invalid value configured
1172 updatecheck = mergemod.UPDATECHECK_LINEAR
1177 updatecheck = mergemod.UPDATECHECK_LINEAR
1173 if updatecheck not in _VALID_UPDATECHECKS:
1178 if updatecheck not in _VALID_UPDATECHECKS:
1174 raise ValueError(
1179 raise ValueError(
1175 r'Invalid updatecheck value %r (can accept %r)'
1180 r'Invalid updatecheck value %r (can accept %r)'
1176 % (updatecheck, _VALID_UPDATECHECKS)
1181 % (updatecheck, _VALID_UPDATECHECKS)
1177 )
1182 )
1178 with repo.wlock():
1183 with repo.wlock():
1179 movemarkfrom = None
1184 movemarkfrom = None
1180 warndest = False
1185 warndest = False
1181 if checkout is None:
1186 if checkout is None:
1182 updata = destutil.destupdate(repo, clean=clean)
1187 updata = destutil.destupdate(repo, clean=clean)
1183 checkout, movemarkfrom, brev = updata
1188 checkout, movemarkfrom, brev = updata
1184 warndest = True
1189 warndest = True
1185
1190
1186 if clean:
1191 if clean:
1187 ret = _clean(repo, checkout)
1192 ret = _clean(repo, checkout)
1188 else:
1193 else:
1189 if updatecheck == mergemod.UPDATECHECK_ABORT:
1194 if updatecheck == mergemod.UPDATECHECK_ABORT:
1190 cmdutil.bailifchanged(repo, merge=False)
1195 cmdutil.bailifchanged(repo, merge=False)
1191 updatecheck = mergemod.UPDATECHECK_NONE
1196 updatecheck = mergemod.UPDATECHECK_NONE
1192 ret = _update(repo, checkout, updatecheck=updatecheck)
1197 ret = _update(repo, checkout, updatecheck=updatecheck)
1193
1198
1194 if not ret and movemarkfrom:
1199 if not ret and movemarkfrom:
1195 if movemarkfrom == repo[b'.'].node():
1200 if movemarkfrom == repo[b'.'].node():
1196 pass # no-op update
1201 pass # no-op update
1197 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1202 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1198 b = ui.label(repo._activebookmark, b'bookmarks.active')
1203 b = ui.label(repo._activebookmark, b'bookmarks.active')
1199 ui.status(_(b"updating bookmark %s\n") % b)
1204 ui.status(_(b"updating bookmark %s\n") % b)
1200 else:
1205 else:
1201 # this can happen with a non-linear update
1206 # this can happen with a non-linear update
1202 b = ui.label(repo._activebookmark, b'bookmarks')
1207 b = ui.label(repo._activebookmark, b'bookmarks')
1203 ui.status(_(b"(leaving bookmark %s)\n") % b)
1208 ui.status(_(b"(leaving bookmark %s)\n") % b)
1204 bookmarks.deactivate(repo)
1209 bookmarks.deactivate(repo)
1205 elif brev in repo._bookmarks:
1210 elif brev in repo._bookmarks:
1206 if brev != repo._activebookmark:
1211 if brev != repo._activebookmark:
1207 b = ui.label(brev, b'bookmarks.active')
1212 b = ui.label(brev, b'bookmarks.active')
1208 ui.status(_(b"(activating bookmark %s)\n") % b)
1213 ui.status(_(b"(activating bookmark %s)\n") % b)
1209 bookmarks.activate(repo, brev)
1214 bookmarks.activate(repo, brev)
1210 elif brev:
1215 elif brev:
1211 if repo._activebookmark:
1216 if repo._activebookmark:
1212 b = ui.label(repo._activebookmark, b'bookmarks')
1217 b = ui.label(repo._activebookmark, b'bookmarks')
1213 ui.status(_(b"(leaving bookmark %s)\n") % b)
1218 ui.status(_(b"(leaving bookmark %s)\n") % b)
1214 bookmarks.deactivate(repo)
1219 bookmarks.deactivate(repo)
1215
1220
1216 if warndest:
1221 if warndest:
1217 destutil.statusotherdests(ui, repo)
1222 destutil.statusotherdests(ui, repo)
1218
1223
1219 return ret
1224 return ret
1220
1225
1221
1226
1222 def merge(
1227 def merge(
1223 ctx,
1228 ctx,
1224 force=False,
1229 force=False,
1225 remind=True,
1230 remind=True,
1226 labels=None,
1231 labels=None,
1227 ):
1232 ):
1228 """Branch merge with node, resolving changes. Return true if any
1233 """Branch merge with node, resolving changes. Return true if any
1229 unresolved conflicts."""
1234 unresolved conflicts."""
1230 repo = ctx.repo()
1235 repo = ctx.repo()
1231 stats = mergemod.merge(ctx, force=force, labels=labels)
1236 stats = mergemod.merge(ctx, force=force, labels=labels)
1232 _showstats(repo, stats)
1237 _showstats(repo, stats)
1233 if stats.unresolvedcount:
1238 if stats.unresolvedcount:
1234 repo.ui.status(
1239 repo.ui.status(
1235 _(
1240 _(
1236 b"use 'hg resolve' to retry unresolved file merges "
1241 b"use 'hg resolve' to retry unresolved file merges "
1237 b"or 'hg merge --abort' to abandon\n"
1242 b"or 'hg merge --abort' to abandon\n"
1238 )
1243 )
1239 )
1244 )
1240 elif remind:
1245 elif remind:
1241 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1246 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1242 return stats.unresolvedcount > 0
1247 return stats.unresolvedcount > 0
1243
1248
1244
1249
1245 def abortmerge(ui, repo):
1250 def abortmerge(ui, repo):
1246 ms = mergestatemod.mergestate.read(repo)
1251 ms = mergestatemod.mergestate.read(repo)
1247 if ms.active():
1252 if ms.active():
1248 # there were conflicts
1253 # there were conflicts
1249 node = ms.localctx.hex()
1254 node = ms.localctx.hex()
1250 else:
1255 else:
1251 # there were no conficts, mergestate was not stored
1256 # there were no conficts, mergestate was not stored
1252 node = repo[b'.'].hex()
1257 node = repo[b'.'].hex()
1253
1258
1254 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1259 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1255 stats = mergemod.clean_update(repo[node])
1260 stats = mergemod.clean_update(repo[node])
1256 assert stats.unresolvedcount == 0
1261 assert stats.unresolvedcount == 0
1257 _showstats(repo, stats)
1262 _showstats(repo, stats)
1258
1263
1259
1264
1260 def _incoming(
1265 def _incoming(
1261 displaychlist,
1266 displaychlist,
1262 subreporecurse,
1267 subreporecurse,
1263 ui,
1268 ui,
1264 repo,
1269 repo,
1265 source,
1270 source,
1266 opts,
1271 opts,
1267 buffered=False,
1272 buffered=False,
1268 subpath=None,
1273 subpath=None,
1269 ):
1274 ):
1270 """
1275 """
1271 Helper for incoming / gincoming.
1276 Helper for incoming / gincoming.
1272 displaychlist gets called with
1277 displaychlist gets called with
1273 (remoterepo, incomingchangesetlist, displayer) parameters,
1278 (remoterepo, incomingchangesetlist, displayer) parameters,
1274 and is supposed to contain only code that can't be unified.
1279 and is supposed to contain only code that can't be unified.
1275 """
1280 """
1276 srcs = urlutil.get_pull_paths(repo, ui, [source])
1281 srcs = urlutil.get_pull_paths(repo, ui, [source])
1277 srcs = list(srcs)
1282 srcs = list(srcs)
1278 if len(srcs) != 1:
1283 if len(srcs) != 1:
1279 msg = _(b'for now, incoming supports only a single source, %d provided')
1284 msg = _(b'for now, incoming supports only a single source, %d provided')
1280 msg %= len(srcs)
1285 msg %= len(srcs)
1281 raise error.Abort(msg)
1286 raise error.Abort(msg)
1282 path = srcs[0]
1287 path = srcs[0]
1283 source, branches = urlutil.parseurl(path.rawloc, opts.get(b'branch'))
1288 source, branches = urlutil.parseurl(path.rawloc, opts.get(b'branch'))
1284 if subpath is not None:
1289 if subpath is not None:
1285 subpath = urlutil.url(subpath)
1290 subpath = urlutil.url(subpath)
1286 if subpath.isabs():
1291 if subpath.isabs():
1287 source = bytes(subpath)
1292 source = bytes(subpath)
1288 else:
1293 else:
1289 p = urlutil.url(source)
1294 p = urlutil.url(source)
1290 if p.islocal():
1295 if p.islocal():
1291 normpath = os.path.normpath
1296 normpath = os.path.normpath
1292 else:
1297 else:
1293 normpath = posixpath.normpath
1298 normpath = posixpath.normpath
1294 p.path = normpath(b'%s/%s' % (p.path, subpath))
1299 p.path = normpath(b'%s/%s' % (p.path, subpath))
1295 source = bytes(p)
1300 source = bytes(p)
1296 other = peer(repo, opts, source)
1301 other = peer(repo, opts, source)
1297 cleanupfn = other.close
1302 cleanupfn = other.close
1298 try:
1303 try:
1299 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source))
1304 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source))
1300 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1305 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1301
1306
1302 if revs:
1307 if revs:
1303 revs = [other.lookup(rev) for rev in revs]
1308 revs = [other.lookup(rev) for rev in revs]
1304 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1309 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1305 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1310 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1306 )
1311 )
1307
1312
1308 if not chlist:
1313 if not chlist:
1309 ui.status(_(b"no changes found\n"))
1314 ui.status(_(b"no changes found\n"))
1310 return subreporecurse()
1315 return subreporecurse()
1311 ui.pager(b'incoming')
1316 ui.pager(b'incoming')
1312 displayer = logcmdutil.changesetdisplayer(
1317 displayer = logcmdutil.changesetdisplayer(
1313 ui, other, opts, buffered=buffered
1318 ui, other, opts, buffered=buffered
1314 )
1319 )
1315 displaychlist(other, chlist, displayer)
1320 displaychlist(other, chlist, displayer)
1316 displayer.close()
1321 displayer.close()
1317 finally:
1322 finally:
1318 cleanupfn()
1323 cleanupfn()
1319 subreporecurse()
1324 subreporecurse()
1320 return 0 # exit code is zero since we found incoming changes
1325 return 0 # exit code is zero since we found incoming changes
1321
1326
1322
1327
1323 def incoming(ui, repo, source, opts, subpath=None):
1328 def incoming(ui, repo, source, opts, subpath=None):
1324 def subreporecurse():
1329 def subreporecurse():
1325 ret = 1
1330 ret = 1
1326 if opts.get(b'subrepos'):
1331 if opts.get(b'subrepos'):
1327 ctx = repo[None]
1332 ctx = repo[None]
1328 for subpath in sorted(ctx.substate):
1333 for subpath in sorted(ctx.substate):
1329 sub = ctx.sub(subpath)
1334 sub = ctx.sub(subpath)
1330 ret = min(ret, sub.incoming(ui, source, opts))
1335 ret = min(ret, sub.incoming(ui, source, opts))
1331 return ret
1336 return ret
1332
1337
1333 def display(other, chlist, displayer):
1338 def display(other, chlist, displayer):
1334 limit = logcmdutil.getlimit(opts)
1339 limit = logcmdutil.getlimit(opts)
1335 if opts.get(b'newest_first'):
1340 if opts.get(b'newest_first'):
1336 chlist.reverse()
1341 chlist.reverse()
1337 count = 0
1342 count = 0
1338 for n in chlist:
1343 for n in chlist:
1339 if limit is not None and count >= limit:
1344 if limit is not None and count >= limit:
1340 break
1345 break
1341 parents = [
1346 parents = [
1342 p for p in other.changelog.parents(n) if p != repo.nullid
1347 p for p in other.changelog.parents(n) if p != repo.nullid
1343 ]
1348 ]
1344 if opts.get(b'no_merges') and len(parents) == 2:
1349 if opts.get(b'no_merges') and len(parents) == 2:
1345 continue
1350 continue
1346 count += 1
1351 count += 1
1347 displayer.show(other[n])
1352 displayer.show(other[n])
1348
1353
1349 return _incoming(
1354 return _incoming(
1350 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1355 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1351 )
1356 )
1352
1357
1353
1358
1354 def _outgoing(ui, repo, dests, opts, subpath=None):
1359 def _outgoing(ui, repo, dests, opts, subpath=None):
1355 out = set()
1360 out = set()
1356 others = []
1361 others = []
1357 for path in urlutil.get_push_paths(repo, ui, dests):
1362 for path in urlutil.get_push_paths(repo, ui, dests):
1358 dest = path.pushloc or path.loc
1363 dest = path.pushloc or path.loc
1359 if subpath is not None:
1364 if subpath is not None:
1360 subpath = urlutil.url(subpath)
1365 subpath = urlutil.url(subpath)
1361 if subpath.isabs():
1366 if subpath.isabs():
1362 dest = bytes(subpath)
1367 dest = bytes(subpath)
1363 else:
1368 else:
1364 p = urlutil.url(dest)
1369 p = urlutil.url(dest)
1365 if p.islocal():
1370 if p.islocal():
1366 normpath = os.path.normpath
1371 normpath = os.path.normpath
1367 else:
1372 else:
1368 normpath = posixpath.normpath
1373 normpath = posixpath.normpath
1369 p.path = normpath(b'%s/%s' % (p.path, subpath))
1374 p.path = normpath(b'%s/%s' % (p.path, subpath))
1370 dest = bytes(p)
1375 dest = bytes(p)
1371 branches = path.branch, opts.get(b'branch') or []
1376 branches = path.branch, opts.get(b'branch') or []
1372
1377
1373 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1378 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1374 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1379 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1375 if revs:
1380 if revs:
1376 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1381 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1377
1382
1378 other = peer(repo, opts, dest)
1383 other = peer(repo, opts, dest)
1379 try:
1384 try:
1380 outgoing = discovery.findcommonoutgoing(
1385 outgoing = discovery.findcommonoutgoing(
1381 repo, other, revs, force=opts.get(b'force')
1386 repo, other, revs, force=opts.get(b'force')
1382 )
1387 )
1383 o = outgoing.missing
1388 o = outgoing.missing
1384 out.update(o)
1389 out.update(o)
1385 if not o:
1390 if not o:
1386 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1391 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1387 others.append(other)
1392 others.append(other)
1388 except: # re-raises
1393 except: # re-raises
1389 other.close()
1394 other.close()
1390 raise
1395 raise
1391 # make sure this is ordered by revision number
1396 # make sure this is ordered by revision number
1392 outgoing_revs = list(out)
1397 outgoing_revs = list(out)
1393 cl = repo.changelog
1398 cl = repo.changelog
1394 outgoing_revs.sort(key=cl.rev)
1399 outgoing_revs.sort(key=cl.rev)
1395 return outgoing_revs, others
1400 return outgoing_revs, others
1396
1401
1397
1402
1398 def _outgoing_recurse(ui, repo, dests, opts):
1403 def _outgoing_recurse(ui, repo, dests, opts):
1399 ret = 1
1404 ret = 1
1400 if opts.get(b'subrepos'):
1405 if opts.get(b'subrepos'):
1401 ctx = repo[None]
1406 ctx = repo[None]
1402 for subpath in sorted(ctx.substate):
1407 for subpath in sorted(ctx.substate):
1403 sub = ctx.sub(subpath)
1408 sub = ctx.sub(subpath)
1404 ret = min(ret, sub.outgoing(ui, dests, opts))
1409 ret = min(ret, sub.outgoing(ui, dests, opts))
1405 return ret
1410 return ret
1406
1411
1407
1412
1408 def _outgoing_filter(repo, revs, opts):
1413 def _outgoing_filter(repo, revs, opts):
1409 """apply revision filtering/ordering option for outgoing"""
1414 """apply revision filtering/ordering option for outgoing"""
1410 limit = logcmdutil.getlimit(opts)
1415 limit = logcmdutil.getlimit(opts)
1411 no_merges = opts.get(b'no_merges')
1416 no_merges = opts.get(b'no_merges')
1412 if opts.get(b'newest_first'):
1417 if opts.get(b'newest_first'):
1413 revs.reverse()
1418 revs.reverse()
1414 if limit is None and not no_merges:
1419 if limit is None and not no_merges:
1415 for r in revs:
1420 for r in revs:
1416 yield r
1421 yield r
1417 return
1422 return
1418
1423
1419 count = 0
1424 count = 0
1420 cl = repo.changelog
1425 cl = repo.changelog
1421 for n in revs:
1426 for n in revs:
1422 if limit is not None and count >= limit:
1427 if limit is not None and count >= limit:
1423 break
1428 break
1424 parents = [p for p in cl.parents(n) if p != repo.nullid]
1429 parents = [p for p in cl.parents(n) if p != repo.nullid]
1425 if no_merges and len(parents) == 2:
1430 if no_merges and len(parents) == 2:
1426 continue
1431 continue
1427 count += 1
1432 count += 1
1428 yield n
1433 yield n
1429
1434
1430
1435
1431 def outgoing(ui, repo, dests, opts, subpath=None):
1436 def outgoing(ui, repo, dests, opts, subpath=None):
1432 if opts.get(b'graph'):
1437 if opts.get(b'graph'):
1433 logcmdutil.checkunsupportedgraphflags([], opts)
1438 logcmdutil.checkunsupportedgraphflags([], opts)
1434 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1439 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1435 ret = 1
1440 ret = 1
1436 try:
1441 try:
1437 if o:
1442 if o:
1438 ret = 0
1443 ret = 0
1439
1444
1440 if opts.get(b'graph'):
1445 if opts.get(b'graph'):
1441 revdag = logcmdutil.graphrevs(repo, o, opts)
1446 revdag = logcmdutil.graphrevs(repo, o, opts)
1442 ui.pager(b'outgoing')
1447 ui.pager(b'outgoing')
1443 displayer = logcmdutil.changesetdisplayer(
1448 displayer = logcmdutil.changesetdisplayer(
1444 ui, repo, opts, buffered=True
1449 ui, repo, opts, buffered=True
1445 )
1450 )
1446 logcmdutil.displaygraph(
1451 logcmdutil.displaygraph(
1447 ui, repo, revdag, displayer, graphmod.asciiedges
1452 ui, repo, revdag, displayer, graphmod.asciiedges
1448 )
1453 )
1449 else:
1454 else:
1450 ui.pager(b'outgoing')
1455 ui.pager(b'outgoing')
1451 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1456 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1452 for n in _outgoing_filter(repo, o, opts):
1457 for n in _outgoing_filter(repo, o, opts):
1453 displayer.show(repo[n])
1458 displayer.show(repo[n])
1454 displayer.close()
1459 displayer.close()
1455 for oth in others:
1460 for oth in others:
1456 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1461 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1457 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1462 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1458 return ret # exit code is zero since we found outgoing changes
1463 return ret # exit code is zero since we found outgoing changes
1459 finally:
1464 finally:
1460 for oth in others:
1465 for oth in others:
1461 oth.close()
1466 oth.close()
1462
1467
1463
1468
1464 def verify(repo, level=None):
1469 def verify(repo, level=None):
1465 """verify the consistency of a repository"""
1470 """verify the consistency of a repository"""
1466 ret = verifymod.verify(repo, level=level)
1471 ret = verifymod.verify(repo, level=level)
1467
1472
1468 # Broken subrepo references in hidden csets don't seem worth worrying about,
1473 # Broken subrepo references in hidden csets don't seem worth worrying about,
1469 # since they can't be pushed/pulled, and --hidden can be used if they are a
1474 # since they can't be pushed/pulled, and --hidden can be used if they are a
1470 # concern.
1475 # concern.
1471
1476
1472 # pathto() is needed for -R case
1477 # pathto() is needed for -R case
1473 revs = repo.revs(
1478 revs = repo.revs(
1474 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1479 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1475 )
1480 )
1476
1481
1477 if revs:
1482 if revs:
1478 repo.ui.status(_(b'checking subrepo links\n'))
1483 repo.ui.status(_(b'checking subrepo links\n'))
1479 for rev in revs:
1484 for rev in revs:
1480 ctx = repo[rev]
1485 ctx = repo[rev]
1481 try:
1486 try:
1482 for subpath in ctx.substate:
1487 for subpath in ctx.substate:
1483 try:
1488 try:
1484 ret = (
1489 ret = (
1485 ctx.sub(subpath, allowcreate=False).verify() or ret
1490 ctx.sub(subpath, allowcreate=False).verify() or ret
1486 )
1491 )
1487 except error.RepoError as e:
1492 except error.RepoError as e:
1488 repo.ui.warn(b'%d: %s\n' % (rev, e))
1493 repo.ui.warn(b'%d: %s\n' % (rev, e))
1489 except Exception:
1494 except Exception:
1490 repo.ui.warn(
1495 repo.ui.warn(
1491 _(b'.hgsubstate is corrupt in revision %s\n')
1496 _(b'.hgsubstate is corrupt in revision %s\n')
1492 % short(ctx.node())
1497 % short(ctx.node())
1493 )
1498 )
1494
1499
1495 return ret
1500 return ret
1496
1501
1497
1502
1498 def remoteui(src, opts):
1503 def remoteui(src, opts):
1499 """build a remote ui from ui or repo and opts"""
1504 """build a remote ui from ui or repo and opts"""
1500 if util.safehasattr(src, b'baseui'): # looks like a repository
1505 if util.safehasattr(src, b'baseui'): # looks like a repository
1501 dst = src.baseui.copy() # drop repo-specific config
1506 dst = src.baseui.copy() # drop repo-specific config
1502 src = src.ui # copy target options from repo
1507 src = src.ui # copy target options from repo
1503 else: # assume it's a global ui object
1508 else: # assume it's a global ui object
1504 dst = src.copy() # keep all global options
1509 dst = src.copy() # keep all global options
1505
1510
1506 # copy ssh-specific options
1511 # copy ssh-specific options
1507 for o in b'ssh', b'remotecmd':
1512 for o in b'ssh', b'remotecmd':
1508 v = opts.get(o) or src.config(b'ui', o)
1513 v = opts.get(o) or src.config(b'ui', o)
1509 if v:
1514 if v:
1510 dst.setconfig(b"ui", o, v, b'copied')
1515 dst.setconfig(b"ui", o, v, b'copied')
1511
1516
1512 # copy bundle-specific options
1517 # copy bundle-specific options
1513 r = src.config(b'bundle', b'mainreporoot')
1518 r = src.config(b'bundle', b'mainreporoot')
1514 if r:
1519 if r:
1515 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1520 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1516
1521
1517 # copy selected local settings to the remote ui
1522 # copy selected local settings to the remote ui
1518 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1523 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1519 for key, val in src.configitems(sect):
1524 for key, val in src.configitems(sect):
1520 dst.setconfig(sect, key, val, b'copied')
1525 dst.setconfig(sect, key, val, b'copied')
1521 v = src.config(b'web', b'cacerts')
1526 v = src.config(b'web', b'cacerts')
1522 if v:
1527 if v:
1523 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1528 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1524
1529
1525 return dst
1530 return dst
1526
1531
1527
1532
1528 # Files of interest
1533 # Files of interest
1529 # Used to check if the repository has changed looking at mtime and size of
1534 # Used to check if the repository has changed looking at mtime and size of
1530 # these files.
1535 # these files.
1531 foi = [
1536 foi = [
1532 (b'spath', b'00changelog.i'),
1537 (b'spath', b'00changelog.i'),
1533 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1538 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1534 (b'spath', b'obsstore'),
1539 (b'spath', b'obsstore'),
1535 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1540 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1536 ]
1541 ]
1537
1542
1538
1543
1539 class cachedlocalrepo:
1544 class cachedlocalrepo:
1540 """Holds a localrepository that can be cached and reused."""
1545 """Holds a localrepository that can be cached and reused."""
1541
1546
1542 def __init__(self, repo):
1547 def __init__(self, repo):
1543 """Create a new cached repo from an existing repo.
1548 """Create a new cached repo from an existing repo.
1544
1549
1545 We assume the passed in repo was recently created. If the
1550 We assume the passed in repo was recently created. If the
1546 repo has changed between when it was created and when it was
1551 repo has changed between when it was created and when it was
1547 turned into a cache, it may not refresh properly.
1552 turned into a cache, it may not refresh properly.
1548 """
1553 """
1549 assert isinstance(repo, localrepo.localrepository)
1554 assert isinstance(repo, localrepo.localrepository)
1550 self._repo = repo
1555 self._repo = repo
1551 self._state, self.mtime = self._repostate()
1556 self._state, self.mtime = self._repostate()
1552 self._filtername = repo.filtername
1557 self._filtername = repo.filtername
1553
1558
1554 def fetch(self):
1559 def fetch(self):
1555 """Refresh (if necessary) and return a repository.
1560 """Refresh (if necessary) and return a repository.
1556
1561
1557 If the cached instance is out of date, it will be recreated
1562 If the cached instance is out of date, it will be recreated
1558 automatically and returned.
1563 automatically and returned.
1559
1564
1560 Returns a tuple of the repo and a boolean indicating whether a new
1565 Returns a tuple of the repo and a boolean indicating whether a new
1561 repo instance was created.
1566 repo instance was created.
1562 """
1567 """
1563 # We compare the mtimes and sizes of some well-known files to
1568 # We compare the mtimes and sizes of some well-known files to
1564 # determine if the repo changed. This is not precise, as mtimes
1569 # determine if the repo changed. This is not precise, as mtimes
1565 # are susceptible to clock skew and imprecise filesystems and
1570 # are susceptible to clock skew and imprecise filesystems and
1566 # file content can change while maintaining the same size.
1571 # file content can change while maintaining the same size.
1567
1572
1568 state, mtime = self._repostate()
1573 state, mtime = self._repostate()
1569 if state == self._state:
1574 if state == self._state:
1570 return self._repo, False
1575 return self._repo, False
1571
1576
1572 repo = repository(self._repo.baseui, self._repo.url())
1577 repo = repository(self._repo.baseui, self._repo.url())
1573 if self._filtername:
1578 if self._filtername:
1574 self._repo = repo.filtered(self._filtername)
1579 self._repo = repo.filtered(self._filtername)
1575 else:
1580 else:
1576 self._repo = repo.unfiltered()
1581 self._repo = repo.unfiltered()
1577 self._state = state
1582 self._state = state
1578 self.mtime = mtime
1583 self.mtime = mtime
1579
1584
1580 return self._repo, True
1585 return self._repo, True
1581
1586
1582 def _repostate(self):
1587 def _repostate(self):
1583 state = []
1588 state = []
1584 maxmtime = -1
1589 maxmtime = -1
1585 for attr, fname in foi:
1590 for attr, fname in foi:
1586 prefix = getattr(self._repo, attr)
1591 prefix = getattr(self._repo, attr)
1587 p = os.path.join(prefix, fname)
1592 p = os.path.join(prefix, fname)
1588 try:
1593 try:
1589 st = os.stat(p)
1594 st = os.stat(p)
1590 except OSError:
1595 except OSError:
1591 st = os.stat(prefix)
1596 st = os.stat(prefix)
1592 state.append((st[stat.ST_MTIME], st.st_size))
1597 state.append((st[stat.ST_MTIME], st.st_size))
1593 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1598 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1594
1599
1595 return tuple(state), maxmtime
1600 return tuple(state), maxmtime
1596
1601
1597 def copy(self):
1602 def copy(self):
1598 """Obtain a copy of this class instance.
1603 """Obtain a copy of this class instance.
1599
1604
1600 A new localrepository instance is obtained. The new instance should be
1605 A new localrepository instance is obtained. The new instance should be
1601 completely independent of the original.
1606 completely independent of the original.
1602 """
1607 """
1603 repo = repository(self._repo.baseui, self._repo.origroot)
1608 repo = repository(self._repo.baseui, self._repo.origroot)
1604 if self._filtername:
1609 if self._filtername:
1605 repo = repo.filtered(self._filtername)
1610 repo = repo.filtered(self._filtername)
1606 else:
1611 else:
1607 repo = repo.unfiltered()
1612 repo = repo.unfiltered()
1608 c = cachedlocalrepo(repo)
1613 c = cachedlocalrepo(repo)
1609 c._state = self._state
1614 c._state = self._state
1610 c.mtime = self.mtime
1615 c.mtime = self.mtime
1611 return c
1616 return c
General Comments 0
You need to be logged in to leave comments. Login now