##// END OF EJS Templates
peer-or-repo: stop relying on AttributeError in `islocal`...
marmoute -
r50583:0d5b2e01 default
parent child Browse files
Show More
@@ -1,1617 +1,1618 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9
9
10 import os
10 import os
11 import posixpath
11 import posixpath
12 import shutil
12 import shutil
13 import stat
13 import stat
14 import weakref
14 import weakref
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import (
17 from .node import (
18 hex,
18 hex,
19 sha1nodeconstants,
19 sha1nodeconstants,
20 short,
20 short,
21 )
21 )
22 from .pycompat import getattr
22 from .pycompat import getattr
23
23
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 bundlerepo,
26 bundlerepo,
27 cmdutil,
27 cmdutil,
28 destutil,
28 destutil,
29 discovery,
29 discovery,
30 error,
30 error,
31 exchange,
31 exchange,
32 extensions,
32 extensions,
33 graphmod,
33 graphmod,
34 httppeer,
34 httppeer,
35 localrepo,
35 localrepo,
36 lock,
36 lock,
37 logcmdutil,
37 logcmdutil,
38 logexchange,
38 logexchange,
39 merge as mergemod,
39 merge as mergemod,
40 mergestate as mergestatemod,
40 mergestate as mergestatemod,
41 narrowspec,
41 narrowspec,
42 phases,
42 phases,
43 requirements,
43 requirements,
44 scmutil,
44 scmutil,
45 sshpeer,
45 sshpeer,
46 statichttprepo,
46 statichttprepo,
47 ui as uimod,
47 ui as uimod,
48 unionrepo,
48 unionrepo,
49 url,
49 url,
50 util,
50 util,
51 verify as verifymod,
51 verify as verifymod,
52 vfs as vfsmod,
52 vfs as vfsmod,
53 )
53 )
54 from .interfaces import repository as repositorymod
54 from .interfaces import repository as repositorymod
55 from .utils import (
55 from .utils import (
56 hashutil,
56 hashutil,
57 stringutil,
57 stringutil,
58 urlutil,
58 urlutil,
59 )
59 )
60
60
61
61
62 release = lock.release
62 release = lock.release
63
63
64 # shared features
64 # shared features
65 sharedbookmarks = b'bookmarks'
65 sharedbookmarks = b'bookmarks'
66
66
67
67
68 def addbranchrevs(lrepo, other, branches, revs):
68 def addbranchrevs(lrepo, other, branches, revs):
69 peer = other.peer() # a courtesy to callers using a localrepo for other
69 peer = other.peer() # a courtesy to callers using a localrepo for other
70 hashbranch, branches = branches
70 hashbranch, branches = branches
71 if not hashbranch and not branches:
71 if not hashbranch and not branches:
72 x = revs or None
72 x = revs or None
73 if revs:
73 if revs:
74 y = revs[0]
74 y = revs[0]
75 else:
75 else:
76 y = None
76 y = None
77 return x, y
77 return x, y
78 if revs:
78 if revs:
79 revs = list(revs)
79 revs = list(revs)
80 else:
80 else:
81 revs = []
81 revs = []
82
82
83 if not peer.capable(b'branchmap'):
83 if not peer.capable(b'branchmap'):
84 if branches:
84 if branches:
85 raise error.Abort(_(b"remote branch lookup not supported"))
85 raise error.Abort(_(b"remote branch lookup not supported"))
86 revs.append(hashbranch)
86 revs.append(hashbranch)
87 return revs, revs[0]
87 return revs, revs[0]
88
88
89 with peer.commandexecutor() as e:
89 with peer.commandexecutor() as e:
90 branchmap = e.callcommand(b'branchmap', {}).result()
90 branchmap = e.callcommand(b'branchmap', {}).result()
91
91
92 def primary(branch):
92 def primary(branch):
93 if branch == b'.':
93 if branch == b'.':
94 if not lrepo:
94 if not lrepo:
95 raise error.Abort(_(b"dirstate branch not accessible"))
95 raise error.Abort(_(b"dirstate branch not accessible"))
96 branch = lrepo.dirstate.branch()
96 branch = lrepo.dirstate.branch()
97 if branch in branchmap:
97 if branch in branchmap:
98 revs.extend(hex(r) for r in reversed(branchmap[branch]))
98 revs.extend(hex(r) for r in reversed(branchmap[branch]))
99 return True
99 return True
100 else:
100 else:
101 return False
101 return False
102
102
103 for branch in branches:
103 for branch in branches:
104 if not primary(branch):
104 if not primary(branch):
105 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
105 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
106 if hashbranch:
106 if hashbranch:
107 if not primary(hashbranch):
107 if not primary(hashbranch):
108 revs.append(hashbranch)
108 revs.append(hashbranch)
109 return revs, revs[0]
109 return revs, revs[0]
110
110
111
111
112 def _isfile(path):
112 def _isfile(path):
113 try:
113 try:
114 # we use os.stat() directly here instead of os.path.isfile()
114 # we use os.stat() directly here instead of os.path.isfile()
115 # because the latter started returning `False` on invalid path
115 # because the latter started returning `False` on invalid path
116 # exceptions starting in 3.8 and we care about handling
116 # exceptions starting in 3.8 and we care about handling
117 # invalid paths specially here.
117 # invalid paths specially here.
118 st = os.stat(path)
118 st = os.stat(path)
119 except ValueError as e:
119 except ValueError as e:
120 msg = stringutil.forcebytestr(e)
120 msg = stringutil.forcebytestr(e)
121 raise error.Abort(_(b'invalid path %s: %s') % (path, msg))
121 raise error.Abort(_(b'invalid path %s: %s') % (path, msg))
122 except OSError:
122 except OSError:
123 return False
123 return False
124 else:
124 else:
125 return stat.S_ISREG(st.st_mode)
125 return stat.S_ISREG(st.st_mode)
126
126
127
127
128 class LocalFactory:
128 class LocalFactory:
129 """thin wrapper to dispatch between localrepo and bundle repo"""
129 """thin wrapper to dispatch between localrepo and bundle repo"""
130
130
131 @staticmethod
131 @staticmethod
132 def islocal(path: bytes) -> bool:
132 def islocal(path: bytes) -> bool:
133 path = util.expandpath(urlutil.urllocalpath(path))
133 path = util.expandpath(urlutil.urllocalpath(path))
134 return not _isfile(path)
134 return not _isfile(path)
135
135
136 @staticmethod
136 @staticmethod
137 def instance(ui, path, *args, **kwargs):
137 def instance(ui, path, *args, **kwargs):
138 path = util.expandpath(urlutil.urllocalpath(path))
138 path = util.expandpath(urlutil.urllocalpath(path))
139 if _isfile(path):
139 if _isfile(path):
140 cls = bundlerepo
140 cls = bundlerepo
141 else:
141 else:
142 cls = localrepo
142 cls = localrepo
143 return cls.instance(ui, path, *args, **kwargs)
143 return cls.instance(ui, path, *args, **kwargs)
144
144
145
145
146 schemes = {
146 schemes = {
147 b'bundle': bundlerepo,
147 b'bundle': bundlerepo,
148 b'union': unionrepo,
148 b'union': unionrepo,
149 b'file': LocalFactory,
149 b'file': LocalFactory,
150 b'http': httppeer,
150 b'http': httppeer,
151 b'https': httppeer,
151 b'https': httppeer,
152 b'ssh': sshpeer,
152 b'ssh': sshpeer,
153 b'static-http': statichttprepo,
153 b'static-http': statichttprepo,
154 }
154 }
155
155
156
156
157 def _peerlookup(path):
157 def _peerlookup(path):
158 u = urlutil.url(path)
158 u = urlutil.url(path)
159 scheme = u.scheme or b'file'
159 scheme = u.scheme or b'file'
160 thing = schemes.get(scheme) or schemes[b'file']
160 thing = schemes.get(scheme) or schemes[b'file']
161 return thing
161 return thing
162
162
163
163
164 def islocal(repo):
164 def islocal(repo):
165 '''return true if repo (or path pointing to repo) is local'''
165 '''return true if repo (or path pointing to repo) is local'''
166 if isinstance(repo, bytes):
166 if isinstance(repo, bytes):
167 try:
167 cls = _peerlookup(repo)
168 return _peerlookup(repo).islocal(repo)
168 cls.instance # make sure we load the module
169 except AttributeError:
169 if util.safehasattr(cls, 'islocal'):
170 return cls.islocal(repo) # pytype: disable=module-attr
170 return False
171 return False
171 repo.ui.deprecwarn(b"use obj.local() instead of islocal(obj)", b"6.4")
172 repo.ui.deprecwarn(b"use obj.local() instead of islocal(obj)", b"6.4")
172 return repo.local()
173 return repo.local()
173
174
174
175
175 def openpath(ui, path, sendaccept=True):
176 def openpath(ui, path, sendaccept=True):
176 '''open path with open if local, url.open if remote'''
177 '''open path with open if local, url.open if remote'''
177 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
178 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
178 if pathurl.islocal():
179 if pathurl.islocal():
179 return util.posixfile(pathurl.localpath(), b'rb')
180 return util.posixfile(pathurl.localpath(), b'rb')
180 else:
181 else:
181 return url.open(ui, path, sendaccept=sendaccept)
182 return url.open(ui, path, sendaccept=sendaccept)
182
183
183
184
184 # a list of (ui, repo) functions called for wire peer initialization
185 # a list of (ui, repo) functions called for wire peer initialization
185 wirepeersetupfuncs = []
186 wirepeersetupfuncs = []
186
187
187
188
188 def _peerorrepo(
189 def _peerorrepo(
189 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
190 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
190 ):
191 ):
191 """return a repository object for the specified path"""
192 """return a repository object for the specified path"""
192 cls = _peerlookup(path)
193 cls = _peerlookup(path)
193 obj = cls.instance(ui, path, create, intents=intents, createopts=createopts)
194 obj = cls.instance(ui, path, create, intents=intents, createopts=createopts)
194 _setup_repo_or_peer(ui, obj, presetupfuncs)
195 _setup_repo_or_peer(ui, obj, presetupfuncs)
195 return obj
196 return obj
196
197
197
198
198 def _setup_repo_or_peer(ui, obj, presetupfuncs=None):
199 def _setup_repo_or_peer(ui, obj, presetupfuncs=None):
199 ui = getattr(obj, "ui", ui)
200 ui = getattr(obj, "ui", ui)
200 for f in presetupfuncs or []:
201 for f in presetupfuncs or []:
201 f(ui, obj)
202 f(ui, obj)
202 ui.log(b'extension', b'- executing reposetup hooks\n')
203 ui.log(b'extension', b'- executing reposetup hooks\n')
203 with util.timedcm('all reposetup') as allreposetupstats:
204 with util.timedcm('all reposetup') as allreposetupstats:
204 for name, module in extensions.extensions(ui):
205 for name, module in extensions.extensions(ui):
205 ui.log(b'extension', b' - running reposetup for %s\n', name)
206 ui.log(b'extension', b' - running reposetup for %s\n', name)
206 hook = getattr(module, 'reposetup', None)
207 hook = getattr(module, 'reposetup', None)
207 if hook:
208 if hook:
208 with util.timedcm('reposetup %r', name) as stats:
209 with util.timedcm('reposetup %r', name) as stats:
209 hook(ui, obj)
210 hook(ui, obj)
210 msg = b' > reposetup for %s took %s\n'
211 msg = b' > reposetup for %s took %s\n'
211 ui.log(b'extension', msg, name, stats)
212 ui.log(b'extension', msg, name, stats)
212 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
213 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
213 if not obj.local():
214 if not obj.local():
214 for f in wirepeersetupfuncs:
215 for f in wirepeersetupfuncs:
215 f(ui, obj)
216 f(ui, obj)
216
217
217
218
218 def repository(
219 def repository(
219 ui,
220 ui,
220 path=b'',
221 path=b'',
221 create=False,
222 create=False,
222 presetupfuncs=None,
223 presetupfuncs=None,
223 intents=None,
224 intents=None,
224 createopts=None,
225 createopts=None,
225 ):
226 ):
226 """return a repository object for the specified path"""
227 """return a repository object for the specified path"""
227 peer = _peerorrepo(
228 peer = _peerorrepo(
228 ui,
229 ui,
229 path,
230 path,
230 create,
231 create,
231 presetupfuncs=presetupfuncs,
232 presetupfuncs=presetupfuncs,
232 intents=intents,
233 intents=intents,
233 createopts=createopts,
234 createopts=createopts,
234 )
235 )
235 repo = peer.local()
236 repo = peer.local()
236 if not repo:
237 if not repo:
237 raise error.Abort(
238 raise error.Abort(
238 _(b"repository '%s' is not local") % (path or peer.url())
239 _(b"repository '%s' is not local") % (path or peer.url())
239 )
240 )
240 return repo.filtered(b'visible')
241 return repo.filtered(b'visible')
241
242
242
243
243 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
244 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
244 '''return a repository peer for the specified path'''
245 '''return a repository peer for the specified path'''
245 rui = remoteui(uiorrepo, opts)
246 rui = remoteui(uiorrepo, opts)
246 return _peerorrepo(
247 return _peerorrepo(
247 rui, path, create, intents=intents, createopts=createopts
248 rui, path, create, intents=intents, createopts=createopts
248 ).peer()
249 ).peer()
249
250
250
251
251 def defaultdest(source):
252 def defaultdest(source):
252 """return default destination of clone if none is given
253 """return default destination of clone if none is given
253
254
254 >>> defaultdest(b'foo')
255 >>> defaultdest(b'foo')
255 'foo'
256 'foo'
256 >>> defaultdest(b'/foo/bar')
257 >>> defaultdest(b'/foo/bar')
257 'bar'
258 'bar'
258 >>> defaultdest(b'/')
259 >>> defaultdest(b'/')
259 ''
260 ''
260 >>> defaultdest(b'')
261 >>> defaultdest(b'')
261 ''
262 ''
262 >>> defaultdest(b'http://example.org/')
263 >>> defaultdest(b'http://example.org/')
263 ''
264 ''
264 >>> defaultdest(b'http://example.org/foo/')
265 >>> defaultdest(b'http://example.org/foo/')
265 'foo'
266 'foo'
266 """
267 """
267 path = urlutil.url(source).path
268 path = urlutil.url(source).path
268 if not path:
269 if not path:
269 return b''
270 return b''
270 return os.path.basename(os.path.normpath(path))
271 return os.path.basename(os.path.normpath(path))
271
272
272
273
273 def sharedreposource(repo):
274 def sharedreposource(repo):
274 """Returns repository object for source repository of a shared repo.
275 """Returns repository object for source repository of a shared repo.
275
276
276 If repo is not a shared repository, returns None.
277 If repo is not a shared repository, returns None.
277 """
278 """
278 if repo.sharedpath == repo.path:
279 if repo.sharedpath == repo.path:
279 return None
280 return None
280
281
281 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
282 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
282 return repo.srcrepo
283 return repo.srcrepo
283
284
284 # the sharedpath always ends in the .hg; we want the path to the repo
285 # the sharedpath always ends in the .hg; we want the path to the repo
285 source = repo.vfs.split(repo.sharedpath)[0]
286 source = repo.vfs.split(repo.sharedpath)[0]
286 srcurl, branches = urlutil.parseurl(source)
287 srcurl, branches = urlutil.parseurl(source)
287 srcrepo = repository(repo.ui, srcurl)
288 srcrepo = repository(repo.ui, srcurl)
288 repo.srcrepo = srcrepo
289 repo.srcrepo = srcrepo
289 return srcrepo
290 return srcrepo
290
291
291
292
292 def share(
293 def share(
293 ui,
294 ui,
294 source,
295 source,
295 dest=None,
296 dest=None,
296 update=True,
297 update=True,
297 bookmarks=True,
298 bookmarks=True,
298 defaultpath=None,
299 defaultpath=None,
299 relative=False,
300 relative=False,
300 ):
301 ):
301 '''create a shared repository'''
302 '''create a shared repository'''
302
303
303 not_local_msg = _(b'can only share local repositories')
304 not_local_msg = _(b'can only share local repositories')
304 if util.safehasattr(source, 'local'):
305 if util.safehasattr(source, 'local'):
305 if source.local() is None:
306 if source.local() is None:
306 raise error.Abort(not_local_msg)
307 raise error.Abort(not_local_msg)
307 elif not islocal(source):
308 elif not islocal(source):
308 # XXX why are we getting bytes here ?
309 # XXX why are we getting bytes here ?
309 raise error.Abort(not_local_msg)
310 raise error.Abort(not_local_msg)
310
311
311 if not dest:
312 if not dest:
312 dest = defaultdest(source)
313 dest = defaultdest(source)
313 else:
314 else:
314 dest = urlutil.get_clone_path(ui, dest)[1]
315 dest = urlutil.get_clone_path(ui, dest)[1]
315
316
316 if isinstance(source, bytes):
317 if isinstance(source, bytes):
317 origsource, source, branches = urlutil.get_clone_path(ui, source)
318 origsource, source, branches = urlutil.get_clone_path(ui, source)
318 srcrepo = repository(ui, source)
319 srcrepo = repository(ui, source)
319 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
320 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
320 else:
321 else:
321 srcrepo = source.local()
322 srcrepo = source.local()
322 checkout = None
323 checkout = None
323
324
324 shareditems = set()
325 shareditems = set()
325 if bookmarks:
326 if bookmarks:
326 shareditems.add(sharedbookmarks)
327 shareditems.add(sharedbookmarks)
327
328
328 r = repository(
329 r = repository(
329 ui,
330 ui,
330 dest,
331 dest,
331 create=True,
332 create=True,
332 createopts={
333 createopts={
333 b'sharedrepo': srcrepo,
334 b'sharedrepo': srcrepo,
334 b'sharedrelative': relative,
335 b'sharedrelative': relative,
335 b'shareditems': shareditems,
336 b'shareditems': shareditems,
336 },
337 },
337 )
338 )
338
339
339 postshare(srcrepo, r, defaultpath=defaultpath)
340 postshare(srcrepo, r, defaultpath=defaultpath)
340 r = repository(ui, dest)
341 r = repository(ui, dest)
341 _postshareupdate(r, update, checkout=checkout)
342 _postshareupdate(r, update, checkout=checkout)
342 return r
343 return r
343
344
344
345
345 def _prependsourcehgrc(repo):
346 def _prependsourcehgrc(repo):
346 """copies the source repo config and prepend it in current repo .hg/hgrc
347 """copies the source repo config and prepend it in current repo .hg/hgrc
347 on unshare. This is only done if the share was perfomed using share safe
348 on unshare. This is only done if the share was perfomed using share safe
348 method where we share config of source in shares"""
349 method where we share config of source in shares"""
349 srcvfs = vfsmod.vfs(repo.sharedpath)
350 srcvfs = vfsmod.vfs(repo.sharedpath)
350 dstvfs = vfsmod.vfs(repo.path)
351 dstvfs = vfsmod.vfs(repo.path)
351
352
352 if not srcvfs.exists(b'hgrc'):
353 if not srcvfs.exists(b'hgrc'):
353 return
354 return
354
355
355 currentconfig = b''
356 currentconfig = b''
356 if dstvfs.exists(b'hgrc'):
357 if dstvfs.exists(b'hgrc'):
357 currentconfig = dstvfs.read(b'hgrc')
358 currentconfig = dstvfs.read(b'hgrc')
358
359
359 with dstvfs(b'hgrc', b'wb') as fp:
360 with dstvfs(b'hgrc', b'wb') as fp:
360 sourceconfig = srcvfs.read(b'hgrc')
361 sourceconfig = srcvfs.read(b'hgrc')
361 fp.write(b"# Config copied from shared source\n")
362 fp.write(b"# Config copied from shared source\n")
362 fp.write(sourceconfig)
363 fp.write(sourceconfig)
363 fp.write(b'\n')
364 fp.write(b'\n')
364 fp.write(currentconfig)
365 fp.write(currentconfig)
365
366
366
367
367 def unshare(ui, repo):
368 def unshare(ui, repo):
368 """convert a shared repository to a normal one
369 """convert a shared repository to a normal one
369
370
370 Copy the store data to the repo and remove the sharedpath data.
371 Copy the store data to the repo and remove the sharedpath data.
371
372
372 Returns a new repository object representing the unshared repository.
373 Returns a new repository object representing the unshared repository.
373
374
374 The passed repository object is not usable after this function is
375 The passed repository object is not usable after this function is
375 called.
376 called.
376 """
377 """
377
378
378 with repo.lock():
379 with repo.lock():
379 # we use locks here because if we race with commit, we
380 # we use locks here because if we race with commit, we
380 # can end up with extra data in the cloned revlogs that's
381 # can end up with extra data in the cloned revlogs that's
381 # not pointed to by changesets, thus causing verify to
382 # not pointed to by changesets, thus causing verify to
382 # fail
383 # fail
383 destlock = copystore(ui, repo, repo.path)
384 destlock = copystore(ui, repo, repo.path)
384 with destlock or util.nullcontextmanager():
385 with destlock or util.nullcontextmanager():
385 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
386 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
386 # we were sharing .hg/hgrc of the share source with the current
387 # we were sharing .hg/hgrc of the share source with the current
387 # repo. We need to copy that while unsharing otherwise it can
388 # repo. We need to copy that while unsharing otherwise it can
388 # disable hooks and other checks
389 # disable hooks and other checks
389 _prependsourcehgrc(repo)
390 _prependsourcehgrc(repo)
390
391
391 sharefile = repo.vfs.join(b'sharedpath')
392 sharefile = repo.vfs.join(b'sharedpath')
392 util.rename(sharefile, sharefile + b'.old')
393 util.rename(sharefile, sharefile + b'.old')
393
394
394 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
395 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
395 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
396 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
396 scmutil.writereporequirements(repo)
397 scmutil.writereporequirements(repo)
397
398
398 # Removing share changes some fundamental properties of the repo instance.
399 # Removing share changes some fundamental properties of the repo instance.
399 # So we instantiate a new repo object and operate on it rather than
400 # So we instantiate a new repo object and operate on it rather than
400 # try to keep the existing repo usable.
401 # try to keep the existing repo usable.
401 newrepo = repository(repo.baseui, repo.root, create=False)
402 newrepo = repository(repo.baseui, repo.root, create=False)
402
403
403 # TODO: figure out how to access subrepos that exist, but were previously
404 # TODO: figure out how to access subrepos that exist, but were previously
404 # removed from .hgsub
405 # removed from .hgsub
405 c = newrepo[b'.']
406 c = newrepo[b'.']
406 subs = c.substate
407 subs = c.substate
407 for s in sorted(subs):
408 for s in sorted(subs):
408 c.sub(s).unshare()
409 c.sub(s).unshare()
409
410
410 localrepo.poisonrepository(repo)
411 localrepo.poisonrepository(repo)
411
412
412 return newrepo
413 return newrepo
413
414
414
415
415 def postshare(sourcerepo, destrepo, defaultpath=None):
416 def postshare(sourcerepo, destrepo, defaultpath=None):
416 """Called after a new shared repo is created.
417 """Called after a new shared repo is created.
417
418
418 The new repo only has a requirements file and pointer to the source.
419 The new repo only has a requirements file and pointer to the source.
419 This function configures additional shared data.
420 This function configures additional shared data.
420
421
421 Extensions can wrap this function and write additional entries to
422 Extensions can wrap this function and write additional entries to
422 destrepo/.hg/shared to indicate additional pieces of data to be shared.
423 destrepo/.hg/shared to indicate additional pieces of data to be shared.
423 """
424 """
424 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
425 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
425 if default:
426 if default:
426 template = b'[paths]\ndefault = %s\n'
427 template = b'[paths]\ndefault = %s\n'
427 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
428 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
428 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
429 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
429 with destrepo.wlock():
430 with destrepo.wlock():
430 narrowspec.copytoworkingcopy(destrepo)
431 narrowspec.copytoworkingcopy(destrepo)
431
432
432
433
433 def _postshareupdate(repo, update, checkout=None):
434 def _postshareupdate(repo, update, checkout=None):
434 """Maybe perform a working directory update after a shared repo is created.
435 """Maybe perform a working directory update after a shared repo is created.
435
436
436 ``update`` can be a boolean or a revision to update to.
437 ``update`` can be a boolean or a revision to update to.
437 """
438 """
438 if not update:
439 if not update:
439 return
440 return
440
441
441 repo.ui.status(_(b"updating working directory\n"))
442 repo.ui.status(_(b"updating working directory\n"))
442 if update is not True:
443 if update is not True:
443 checkout = update
444 checkout = update
444 for test in (checkout, b'default', b'tip'):
445 for test in (checkout, b'default', b'tip'):
445 if test is None:
446 if test is None:
446 continue
447 continue
447 try:
448 try:
448 uprev = repo.lookup(test)
449 uprev = repo.lookup(test)
449 break
450 break
450 except error.RepoLookupError:
451 except error.RepoLookupError:
451 continue
452 continue
452 _update(repo, uprev)
453 _update(repo, uprev)
453
454
454
455
455 def copystore(ui, srcrepo, destpath):
456 def copystore(ui, srcrepo, destpath):
456 """copy files from store of srcrepo in destpath
457 """copy files from store of srcrepo in destpath
457
458
458 returns destlock
459 returns destlock
459 """
460 """
460 destlock = None
461 destlock = None
461 try:
462 try:
462 hardlink = None
463 hardlink = None
463 topic = _(b'linking') if hardlink else _(b'copying')
464 topic = _(b'linking') if hardlink else _(b'copying')
464 with ui.makeprogress(topic, unit=_(b'files')) as progress:
465 with ui.makeprogress(topic, unit=_(b'files')) as progress:
465 num = 0
466 num = 0
466 srcpublishing = srcrepo.publishing()
467 srcpublishing = srcrepo.publishing()
467 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
468 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
468 dstvfs = vfsmod.vfs(destpath)
469 dstvfs = vfsmod.vfs(destpath)
469 for f in srcrepo.store.copylist():
470 for f in srcrepo.store.copylist():
470 if srcpublishing and f.endswith(b'phaseroots'):
471 if srcpublishing and f.endswith(b'phaseroots'):
471 continue
472 continue
472 dstbase = os.path.dirname(f)
473 dstbase = os.path.dirname(f)
473 if dstbase and not dstvfs.exists(dstbase):
474 if dstbase and not dstvfs.exists(dstbase):
474 dstvfs.mkdir(dstbase)
475 dstvfs.mkdir(dstbase)
475 if srcvfs.exists(f):
476 if srcvfs.exists(f):
476 if f.endswith(b'data'):
477 if f.endswith(b'data'):
477 # 'dstbase' may be empty (e.g. revlog format 0)
478 # 'dstbase' may be empty (e.g. revlog format 0)
478 lockfile = os.path.join(dstbase, b"lock")
479 lockfile = os.path.join(dstbase, b"lock")
479 # lock to avoid premature writing to the target
480 # lock to avoid premature writing to the target
480 destlock = lock.lock(dstvfs, lockfile)
481 destlock = lock.lock(dstvfs, lockfile)
481 hardlink, n = util.copyfiles(
482 hardlink, n = util.copyfiles(
482 srcvfs.join(f), dstvfs.join(f), hardlink, progress
483 srcvfs.join(f), dstvfs.join(f), hardlink, progress
483 )
484 )
484 num += n
485 num += n
485 if hardlink:
486 if hardlink:
486 ui.debug(b"linked %d files\n" % num)
487 ui.debug(b"linked %d files\n" % num)
487 else:
488 else:
488 ui.debug(b"copied %d files\n" % num)
489 ui.debug(b"copied %d files\n" % num)
489 return destlock
490 return destlock
490 except: # re-raises
491 except: # re-raises
491 release(destlock)
492 release(destlock)
492 raise
493 raise
493
494
494
495
495 def clonewithshare(
496 def clonewithshare(
496 ui,
497 ui,
497 peeropts,
498 peeropts,
498 sharepath,
499 sharepath,
499 source,
500 source,
500 srcpeer,
501 srcpeer,
501 dest,
502 dest,
502 pull=False,
503 pull=False,
503 rev=None,
504 rev=None,
504 update=True,
505 update=True,
505 stream=False,
506 stream=False,
506 ):
507 ):
507 """Perform a clone using a shared repo.
508 """Perform a clone using a shared repo.
508
509
509 The store for the repository will be located at <sharepath>/.hg. The
510 The store for the repository will be located at <sharepath>/.hg. The
510 specified revisions will be cloned or pulled from "source". A shared repo
511 specified revisions will be cloned or pulled from "source". A shared repo
511 will be created at "dest" and a working copy will be created if "update" is
512 will be created at "dest" and a working copy will be created if "update" is
512 True.
513 True.
513 """
514 """
514 revs = None
515 revs = None
515 if rev:
516 if rev:
516 if not srcpeer.capable(b'lookup'):
517 if not srcpeer.capable(b'lookup'):
517 raise error.Abort(
518 raise error.Abort(
518 _(
519 _(
519 b"src repository does not support "
520 b"src repository does not support "
520 b"revision lookup and so doesn't "
521 b"revision lookup and so doesn't "
521 b"support clone by revision"
522 b"support clone by revision"
522 )
523 )
523 )
524 )
524
525
525 # TODO this is batchable.
526 # TODO this is batchable.
526 remoterevs = []
527 remoterevs = []
527 for r in rev:
528 for r in rev:
528 with srcpeer.commandexecutor() as e:
529 with srcpeer.commandexecutor() as e:
529 remoterevs.append(
530 remoterevs.append(
530 e.callcommand(
531 e.callcommand(
531 b'lookup',
532 b'lookup',
532 {
533 {
533 b'key': r,
534 b'key': r,
534 },
535 },
535 ).result()
536 ).result()
536 )
537 )
537 revs = remoterevs
538 revs = remoterevs
538
539
539 # Obtain a lock before checking for or cloning the pooled repo otherwise
540 # Obtain a lock before checking for or cloning the pooled repo otherwise
540 # 2 clients may race creating or populating it.
541 # 2 clients may race creating or populating it.
541 pooldir = os.path.dirname(sharepath)
542 pooldir = os.path.dirname(sharepath)
542 # lock class requires the directory to exist.
543 # lock class requires the directory to exist.
543 try:
544 try:
544 util.makedir(pooldir, False)
545 util.makedir(pooldir, False)
545 except FileExistsError:
546 except FileExistsError:
546 pass
547 pass
547
548
548 poolvfs = vfsmod.vfs(pooldir)
549 poolvfs = vfsmod.vfs(pooldir)
549 basename = os.path.basename(sharepath)
550 basename = os.path.basename(sharepath)
550
551
551 with lock.lock(poolvfs, b'%s.lock' % basename):
552 with lock.lock(poolvfs, b'%s.lock' % basename):
552 if os.path.exists(sharepath):
553 if os.path.exists(sharepath):
553 ui.status(
554 ui.status(
554 _(b'(sharing from existing pooled repository %s)\n') % basename
555 _(b'(sharing from existing pooled repository %s)\n') % basename
555 )
556 )
556 else:
557 else:
557 ui.status(
558 ui.status(
558 _(b'(sharing from new pooled repository %s)\n') % basename
559 _(b'(sharing from new pooled repository %s)\n') % basename
559 )
560 )
560 # Always use pull mode because hardlinks in share mode don't work
561 # Always use pull mode because hardlinks in share mode don't work
561 # well. Never update because working copies aren't necessary in
562 # well. Never update because working copies aren't necessary in
562 # share mode.
563 # share mode.
563 clone(
564 clone(
564 ui,
565 ui,
565 peeropts,
566 peeropts,
566 source,
567 source,
567 dest=sharepath,
568 dest=sharepath,
568 pull=True,
569 pull=True,
569 revs=rev,
570 revs=rev,
570 update=False,
571 update=False,
571 stream=stream,
572 stream=stream,
572 )
573 )
573
574
574 # Resolve the value to put in [paths] section for the source.
575 # Resolve the value to put in [paths] section for the source.
575 if islocal(source):
576 if islocal(source):
576 defaultpath = util.abspath(urlutil.urllocalpath(source))
577 defaultpath = util.abspath(urlutil.urllocalpath(source))
577 else:
578 else:
578 defaultpath = source
579 defaultpath = source
579
580
580 sharerepo = repository(ui, path=sharepath)
581 sharerepo = repository(ui, path=sharepath)
581 destrepo = share(
582 destrepo = share(
582 ui,
583 ui,
583 sharerepo,
584 sharerepo,
584 dest=dest,
585 dest=dest,
585 update=False,
586 update=False,
586 bookmarks=False,
587 bookmarks=False,
587 defaultpath=defaultpath,
588 defaultpath=defaultpath,
588 )
589 )
589
590
590 # We need to perform a pull against the dest repo to fetch bookmarks
591 # We need to perform a pull against the dest repo to fetch bookmarks
591 # and other non-store data that isn't shared by default. In the case of
592 # and other non-store data that isn't shared by default. In the case of
592 # non-existing shared repo, this means we pull from the remote twice. This
593 # non-existing shared repo, this means we pull from the remote twice. This
593 # is a bit weird. But at the time it was implemented, there wasn't an easy
594 # is a bit weird. But at the time it was implemented, there wasn't an easy
594 # way to pull just non-changegroup data.
595 # way to pull just non-changegroup data.
595 exchange.pull(destrepo, srcpeer, heads=revs)
596 exchange.pull(destrepo, srcpeer, heads=revs)
596
597
597 _postshareupdate(destrepo, update)
598 _postshareupdate(destrepo, update)
598
599
599 return srcpeer, peer(ui, peeropts, dest)
600 return srcpeer, peer(ui, peeropts, dest)
600
601
601
602
602 # Recomputing caches is often slow on big repos, so copy them.
603 # Recomputing caches is often slow on big repos, so copy them.
603 def _copycache(srcrepo, dstcachedir, fname):
604 def _copycache(srcrepo, dstcachedir, fname):
604 """copy a cache from srcrepo to destcachedir (if it exists)"""
605 """copy a cache from srcrepo to destcachedir (if it exists)"""
605 srcfname = srcrepo.cachevfs.join(fname)
606 srcfname = srcrepo.cachevfs.join(fname)
606 dstfname = os.path.join(dstcachedir, fname)
607 dstfname = os.path.join(dstcachedir, fname)
607 if os.path.exists(srcfname):
608 if os.path.exists(srcfname):
608 if not os.path.exists(dstcachedir):
609 if not os.path.exists(dstcachedir):
609 os.mkdir(dstcachedir)
610 os.mkdir(dstcachedir)
610 util.copyfile(srcfname, dstfname)
611 util.copyfile(srcfname, dstfname)
611
612
612
613
613 def clone(
614 def clone(
614 ui,
615 ui,
615 peeropts,
616 peeropts,
616 source,
617 source,
617 dest=None,
618 dest=None,
618 pull=False,
619 pull=False,
619 revs=None,
620 revs=None,
620 update=True,
621 update=True,
621 stream=False,
622 stream=False,
622 branch=None,
623 branch=None,
623 shareopts=None,
624 shareopts=None,
624 storeincludepats=None,
625 storeincludepats=None,
625 storeexcludepats=None,
626 storeexcludepats=None,
626 depth=None,
627 depth=None,
627 ):
628 ):
628 """Make a copy of an existing repository.
629 """Make a copy of an existing repository.
629
630
630 Create a copy of an existing repository in a new directory. The
631 Create a copy of an existing repository in a new directory. The
631 source and destination are URLs, as passed to the repository
632 source and destination are URLs, as passed to the repository
632 function. Returns a pair of repository peers, the source and
633 function. Returns a pair of repository peers, the source and
633 newly created destination.
634 newly created destination.
634
635
635 The location of the source is added to the new repository's
636 The location of the source is added to the new repository's
636 .hg/hgrc file, as the default to be used for future pulls and
637 .hg/hgrc file, as the default to be used for future pulls and
637 pushes.
638 pushes.
638
639
639 If an exception is raised, the partly cloned/updated destination
640 If an exception is raised, the partly cloned/updated destination
640 repository will be deleted.
641 repository will be deleted.
641
642
642 Arguments:
643 Arguments:
643
644
644 source: repository object or URL
645 source: repository object or URL
645
646
646 dest: URL of destination repository to create (defaults to base
647 dest: URL of destination repository to create (defaults to base
647 name of source repository)
648 name of source repository)
648
649
649 pull: always pull from source repository, even in local case or if the
650 pull: always pull from source repository, even in local case or if the
650 server prefers streaming
651 server prefers streaming
651
652
652 stream: stream raw data uncompressed from repository (fast over
653 stream: stream raw data uncompressed from repository (fast over
653 LAN, slow over WAN)
654 LAN, slow over WAN)
654
655
655 revs: revision to clone up to (implies pull=True)
656 revs: revision to clone up to (implies pull=True)
656
657
657 update: update working directory after clone completes, if
658 update: update working directory after clone completes, if
658 destination is local repository (True means update to default rev,
659 destination is local repository (True means update to default rev,
659 anything else is treated as a revision)
660 anything else is treated as a revision)
660
661
661 branch: branches to clone
662 branch: branches to clone
662
663
663 shareopts: dict of options to control auto sharing behavior. The "pool" key
664 shareopts: dict of options to control auto sharing behavior. The "pool" key
664 activates auto sharing mode and defines the directory for stores. The
665 activates auto sharing mode and defines the directory for stores. The
665 "mode" key determines how to construct the directory name of the shared
666 "mode" key determines how to construct the directory name of the shared
666 repository. "identity" means the name is derived from the node of the first
667 repository. "identity" means the name is derived from the node of the first
667 changeset in the repository. "remote" means the name is derived from the
668 changeset in the repository. "remote" means the name is derived from the
668 remote's path/URL. Defaults to "identity."
669 remote's path/URL. Defaults to "identity."
669
670
670 storeincludepats and storeexcludepats: sets of file patterns to include and
671 storeincludepats and storeexcludepats: sets of file patterns to include and
671 exclude in the repository copy, respectively. If not defined, all files
672 exclude in the repository copy, respectively. If not defined, all files
672 will be included (a "full" clone). Otherwise a "narrow" clone containing
673 will be included (a "full" clone). Otherwise a "narrow" clone containing
673 only the requested files will be performed. If ``storeincludepats`` is not
674 only the requested files will be performed. If ``storeincludepats`` is not
674 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
675 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
675 ``path:.``. If both are empty sets, no files will be cloned.
676 ``path:.``. If both are empty sets, no files will be cloned.
676 """
677 """
677
678
678 if isinstance(source, bytes):
679 if isinstance(source, bytes):
679 src = urlutil.get_clone_path(ui, source, branch)
680 src = urlutil.get_clone_path(ui, source, branch)
680 origsource, source, branches = src
681 origsource, source, branches = src
681 srcpeer = peer(ui, peeropts, source)
682 srcpeer = peer(ui, peeropts, source)
682 else:
683 else:
683 srcpeer = source.peer() # in case we were called with a localrepo
684 srcpeer = source.peer() # in case we were called with a localrepo
684 branches = (None, branch or [])
685 branches = (None, branch or [])
685 origsource = source = srcpeer.url()
686 origsource = source = srcpeer.url()
686 srclock = destlock = destwlock = cleandir = None
687 srclock = destlock = destwlock = cleandir = None
687 destpeer = None
688 destpeer = None
688 try:
689 try:
689 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
690 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
690
691
691 if dest is None:
692 if dest is None:
692 dest = defaultdest(source)
693 dest = defaultdest(source)
693 if dest:
694 if dest:
694 ui.status(_(b"destination directory: %s\n") % dest)
695 ui.status(_(b"destination directory: %s\n") % dest)
695 else:
696 else:
696 dest = urlutil.get_clone_path(ui, dest)[0]
697 dest = urlutil.get_clone_path(ui, dest)[0]
697
698
698 dest = urlutil.urllocalpath(dest)
699 dest = urlutil.urllocalpath(dest)
699 source = urlutil.urllocalpath(source)
700 source = urlutil.urllocalpath(source)
700
701
701 if not dest:
702 if not dest:
702 raise error.InputError(_(b"empty destination path is not valid"))
703 raise error.InputError(_(b"empty destination path is not valid"))
703
704
704 destvfs = vfsmod.vfs(dest, expandpath=True)
705 destvfs = vfsmod.vfs(dest, expandpath=True)
705 if destvfs.lexists():
706 if destvfs.lexists():
706 if not destvfs.isdir():
707 if not destvfs.isdir():
707 raise error.InputError(
708 raise error.InputError(
708 _(b"destination '%s' already exists") % dest
709 _(b"destination '%s' already exists") % dest
709 )
710 )
710 elif destvfs.listdir():
711 elif destvfs.listdir():
711 raise error.InputError(
712 raise error.InputError(
712 _(b"destination '%s' is not empty") % dest
713 _(b"destination '%s' is not empty") % dest
713 )
714 )
714
715
715 createopts = {}
716 createopts = {}
716 narrow = False
717 narrow = False
717
718
718 if storeincludepats is not None:
719 if storeincludepats is not None:
719 narrowspec.validatepatterns(storeincludepats)
720 narrowspec.validatepatterns(storeincludepats)
720 narrow = True
721 narrow = True
721
722
722 if storeexcludepats is not None:
723 if storeexcludepats is not None:
723 narrowspec.validatepatterns(storeexcludepats)
724 narrowspec.validatepatterns(storeexcludepats)
724 narrow = True
725 narrow = True
725
726
726 if narrow:
727 if narrow:
727 # Include everything by default if only exclusion patterns defined.
728 # Include everything by default if only exclusion patterns defined.
728 if storeexcludepats and not storeincludepats:
729 if storeexcludepats and not storeincludepats:
729 storeincludepats = {b'path:.'}
730 storeincludepats = {b'path:.'}
730
731
731 createopts[b'narrowfiles'] = True
732 createopts[b'narrowfiles'] = True
732
733
733 if depth:
734 if depth:
734 createopts[b'shallowfilestore'] = True
735 createopts[b'shallowfilestore'] = True
735
736
736 if srcpeer.capable(b'lfs-serve'):
737 if srcpeer.capable(b'lfs-serve'):
737 # Repository creation honors the config if it disabled the extension, so
738 # Repository creation honors the config if it disabled the extension, so
738 # we can't just announce that lfs will be enabled. This check avoids
739 # we can't just announce that lfs will be enabled. This check avoids
739 # saying that lfs will be enabled, and then saying it's an unknown
740 # saying that lfs will be enabled, and then saying it's an unknown
740 # feature. The lfs creation option is set in either case so that a
741 # feature. The lfs creation option is set in either case so that a
741 # requirement is added. If the extension is explicitly disabled but the
742 # requirement is added. If the extension is explicitly disabled but the
742 # requirement is set, the clone aborts early, before transferring any
743 # requirement is set, the clone aborts early, before transferring any
743 # data.
744 # data.
744 createopts[b'lfs'] = True
745 createopts[b'lfs'] = True
745
746
746 if extensions.disabled_help(b'lfs'):
747 if extensions.disabled_help(b'lfs'):
747 ui.status(
748 ui.status(
748 _(
749 _(
749 b'(remote is using large file support (lfs), but it is '
750 b'(remote is using large file support (lfs), but it is '
750 b'explicitly disabled in the local configuration)\n'
751 b'explicitly disabled in the local configuration)\n'
751 )
752 )
752 )
753 )
753 else:
754 else:
754 ui.status(
755 ui.status(
755 _(
756 _(
756 b'(remote is using large file support (lfs); lfs will '
757 b'(remote is using large file support (lfs); lfs will '
757 b'be enabled for this repository)\n'
758 b'be enabled for this repository)\n'
758 )
759 )
759 )
760 )
760
761
761 shareopts = shareopts or {}
762 shareopts = shareopts or {}
762 sharepool = shareopts.get(b'pool')
763 sharepool = shareopts.get(b'pool')
763 sharenamemode = shareopts.get(b'mode')
764 sharenamemode = shareopts.get(b'mode')
764 if sharepool and islocal(dest):
765 if sharepool and islocal(dest):
765 sharepath = None
766 sharepath = None
766 if sharenamemode == b'identity':
767 if sharenamemode == b'identity':
767 # Resolve the name from the initial changeset in the remote
768 # Resolve the name from the initial changeset in the remote
768 # repository. This returns nullid when the remote is empty. It
769 # repository. This returns nullid when the remote is empty. It
769 # raises RepoLookupError if revision 0 is filtered or otherwise
770 # raises RepoLookupError if revision 0 is filtered or otherwise
770 # not available. If we fail to resolve, sharing is not enabled.
771 # not available. If we fail to resolve, sharing is not enabled.
771 try:
772 try:
772 with srcpeer.commandexecutor() as e:
773 with srcpeer.commandexecutor() as e:
773 rootnode = e.callcommand(
774 rootnode = e.callcommand(
774 b'lookup',
775 b'lookup',
775 {
776 {
776 b'key': b'0',
777 b'key': b'0',
777 },
778 },
778 ).result()
779 ).result()
779
780
780 if rootnode != sha1nodeconstants.nullid:
781 if rootnode != sha1nodeconstants.nullid:
781 sharepath = os.path.join(sharepool, hex(rootnode))
782 sharepath = os.path.join(sharepool, hex(rootnode))
782 else:
783 else:
783 ui.status(
784 ui.status(
784 _(
785 _(
785 b'(not using pooled storage: '
786 b'(not using pooled storage: '
786 b'remote appears to be empty)\n'
787 b'remote appears to be empty)\n'
787 )
788 )
788 )
789 )
789 except error.RepoLookupError:
790 except error.RepoLookupError:
790 ui.status(
791 ui.status(
791 _(
792 _(
792 b'(not using pooled storage: '
793 b'(not using pooled storage: '
793 b'unable to resolve identity of remote)\n'
794 b'unable to resolve identity of remote)\n'
794 )
795 )
795 )
796 )
796 elif sharenamemode == b'remote':
797 elif sharenamemode == b'remote':
797 sharepath = os.path.join(
798 sharepath = os.path.join(
798 sharepool, hex(hashutil.sha1(source).digest())
799 sharepool, hex(hashutil.sha1(source).digest())
799 )
800 )
800 else:
801 else:
801 raise error.Abort(
802 raise error.Abort(
802 _(b'unknown share naming mode: %s') % sharenamemode
803 _(b'unknown share naming mode: %s') % sharenamemode
803 )
804 )
804
805
805 # TODO this is a somewhat arbitrary restriction.
806 # TODO this is a somewhat arbitrary restriction.
806 if narrow:
807 if narrow:
807 ui.status(
808 ui.status(
808 _(b'(pooled storage not supported for narrow clones)\n')
809 _(b'(pooled storage not supported for narrow clones)\n')
809 )
810 )
810 sharepath = None
811 sharepath = None
811
812
812 if sharepath:
813 if sharepath:
813 return clonewithshare(
814 return clonewithshare(
814 ui,
815 ui,
815 peeropts,
816 peeropts,
816 sharepath,
817 sharepath,
817 source,
818 source,
818 srcpeer,
819 srcpeer,
819 dest,
820 dest,
820 pull=pull,
821 pull=pull,
821 rev=revs,
822 rev=revs,
822 update=update,
823 update=update,
823 stream=stream,
824 stream=stream,
824 )
825 )
825
826
826 srcrepo = srcpeer.local()
827 srcrepo = srcpeer.local()
827
828
828 abspath = origsource
829 abspath = origsource
829 if islocal(origsource):
830 if islocal(origsource):
830 abspath = util.abspath(urlutil.urllocalpath(origsource))
831 abspath = util.abspath(urlutil.urllocalpath(origsource))
831
832
832 if islocal(dest):
833 if islocal(dest):
833 if os.path.exists(dest):
834 if os.path.exists(dest):
834 # only clean up directories we create ourselves
835 # only clean up directories we create ourselves
835 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
836 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
836 cleandir = hgdir
837 cleandir = hgdir
837 else:
838 else:
838 cleandir = dest
839 cleandir = dest
839
840
840 copy = False
841 copy = False
841 if (
842 if (
842 srcrepo
843 srcrepo
843 and srcrepo.cancopy()
844 and srcrepo.cancopy()
844 and islocal(dest)
845 and islocal(dest)
845 and not phases.hassecret(srcrepo)
846 and not phases.hassecret(srcrepo)
846 ):
847 ):
847 copy = not pull and not revs
848 copy = not pull and not revs
848
849
849 # TODO this is a somewhat arbitrary restriction.
850 # TODO this is a somewhat arbitrary restriction.
850 if narrow:
851 if narrow:
851 copy = False
852 copy = False
852
853
853 if copy:
854 if copy:
854 try:
855 try:
855 # we use a lock here because if we race with commit, we
856 # we use a lock here because if we race with commit, we
856 # can end up with extra data in the cloned revlogs that's
857 # can end up with extra data in the cloned revlogs that's
857 # not pointed to by changesets, thus causing verify to
858 # not pointed to by changesets, thus causing verify to
858 # fail
859 # fail
859 srclock = srcrepo.lock(wait=False)
860 srclock = srcrepo.lock(wait=False)
860 except error.LockError:
861 except error.LockError:
861 copy = False
862 copy = False
862
863
863 if copy:
864 if copy:
864 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
865 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
865
866
866 destrootpath = urlutil.urllocalpath(dest)
867 destrootpath = urlutil.urllocalpath(dest)
867 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
868 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
868 localrepo.createrepository(
869 localrepo.createrepository(
869 ui,
870 ui,
870 destrootpath,
871 destrootpath,
871 requirements=dest_reqs,
872 requirements=dest_reqs,
872 )
873 )
873 destrepo = localrepo.makelocalrepository(ui, destrootpath)
874 destrepo = localrepo.makelocalrepository(ui, destrootpath)
874
875
875 destwlock = destrepo.wlock()
876 destwlock = destrepo.wlock()
876 destlock = destrepo.lock()
877 destlock = destrepo.lock()
877 from . import streamclone # avoid cycle
878 from . import streamclone # avoid cycle
878
879
879 streamclone.local_copy(srcrepo, destrepo)
880 streamclone.local_copy(srcrepo, destrepo)
880
881
881 # we need to re-init the repo after manually copying the data
882 # we need to re-init the repo after manually copying the data
882 # into it
883 # into it
883 destpeer = peer(srcrepo, peeropts, dest)
884 destpeer = peer(srcrepo, peeropts, dest)
884
885
885 # make the peer aware that is it already locked
886 # make the peer aware that is it already locked
886 #
887 #
887 # important:
888 # important:
888 #
889 #
889 # We still need to release that lock at the end of the function
890 # We still need to release that lock at the end of the function
890 destpeer.local()._lockref = weakref.ref(destlock)
891 destpeer.local()._lockref = weakref.ref(destlock)
891 destpeer.local()._wlockref = weakref.ref(destwlock)
892 destpeer.local()._wlockref = weakref.ref(destwlock)
892 # dirstate also needs to be copied because `_wlockref` has a reference
893 # dirstate also needs to be copied because `_wlockref` has a reference
893 # to it: this dirstate is saved to disk when the wlock is released
894 # to it: this dirstate is saved to disk when the wlock is released
894 destpeer.local().dirstate = destrepo.dirstate
895 destpeer.local().dirstate = destrepo.dirstate
895
896
896 srcrepo.hook(
897 srcrepo.hook(
897 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
898 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
898 )
899 )
899 else:
900 else:
900 try:
901 try:
901 # only pass ui when no srcrepo
902 # only pass ui when no srcrepo
902 destpeer = peer(
903 destpeer = peer(
903 srcrepo or ui,
904 srcrepo or ui,
904 peeropts,
905 peeropts,
905 dest,
906 dest,
906 create=True,
907 create=True,
907 createopts=createopts,
908 createopts=createopts,
908 )
909 )
909 except FileExistsError:
910 except FileExistsError:
910 cleandir = None
911 cleandir = None
911 raise error.Abort(_(b"destination '%s' already exists") % dest)
912 raise error.Abort(_(b"destination '%s' already exists") % dest)
912
913
913 if revs:
914 if revs:
914 if not srcpeer.capable(b'lookup'):
915 if not srcpeer.capable(b'lookup'):
915 raise error.Abort(
916 raise error.Abort(
916 _(
917 _(
917 b"src repository does not support "
918 b"src repository does not support "
918 b"revision lookup and so doesn't "
919 b"revision lookup and so doesn't "
919 b"support clone by revision"
920 b"support clone by revision"
920 )
921 )
921 )
922 )
922
923
923 # TODO this is batchable.
924 # TODO this is batchable.
924 remoterevs = []
925 remoterevs = []
925 for rev in revs:
926 for rev in revs:
926 with srcpeer.commandexecutor() as e:
927 with srcpeer.commandexecutor() as e:
927 remoterevs.append(
928 remoterevs.append(
928 e.callcommand(
929 e.callcommand(
929 b'lookup',
930 b'lookup',
930 {
931 {
931 b'key': rev,
932 b'key': rev,
932 },
933 },
933 ).result()
934 ).result()
934 )
935 )
935 revs = remoterevs
936 revs = remoterevs
936
937
937 checkout = revs[0]
938 checkout = revs[0]
938 else:
939 else:
939 revs = None
940 revs = None
940 local = destpeer.local()
941 local = destpeer.local()
941 if local:
942 if local:
942 if narrow:
943 if narrow:
943 with local.wlock(), local.lock():
944 with local.wlock(), local.lock():
944 local.setnarrowpats(storeincludepats, storeexcludepats)
945 local.setnarrowpats(storeincludepats, storeexcludepats)
945 narrowspec.copytoworkingcopy(local)
946 narrowspec.copytoworkingcopy(local)
946
947
947 u = urlutil.url(abspath)
948 u = urlutil.url(abspath)
948 defaulturl = bytes(u)
949 defaulturl = bytes(u)
949 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
950 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
950 if not stream:
951 if not stream:
951 if pull:
952 if pull:
952 stream = False
953 stream = False
953 else:
954 else:
954 stream = None
955 stream = None
955 # internal config: ui.quietbookmarkmove
956 # internal config: ui.quietbookmarkmove
956 overrides = {(b'ui', b'quietbookmarkmove'): True}
957 overrides = {(b'ui', b'quietbookmarkmove'): True}
957 with local.ui.configoverride(overrides, b'clone'):
958 with local.ui.configoverride(overrides, b'clone'):
958 exchange.pull(
959 exchange.pull(
959 local,
960 local,
960 srcpeer,
961 srcpeer,
961 heads=revs,
962 heads=revs,
962 streamclonerequested=stream,
963 streamclonerequested=stream,
963 includepats=storeincludepats,
964 includepats=storeincludepats,
964 excludepats=storeexcludepats,
965 excludepats=storeexcludepats,
965 depth=depth,
966 depth=depth,
966 )
967 )
967 elif srcrepo:
968 elif srcrepo:
968 # TODO lift restriction once exchange.push() accepts narrow
969 # TODO lift restriction once exchange.push() accepts narrow
969 # push.
970 # push.
970 if narrow:
971 if narrow:
971 raise error.Abort(
972 raise error.Abort(
972 _(
973 _(
973 b'narrow clone not available for '
974 b'narrow clone not available for '
974 b'remote destinations'
975 b'remote destinations'
975 )
976 )
976 )
977 )
977
978
978 exchange.push(
979 exchange.push(
979 srcrepo,
980 srcrepo,
980 destpeer,
981 destpeer,
981 revs=revs,
982 revs=revs,
982 bookmarks=srcrepo._bookmarks.keys(),
983 bookmarks=srcrepo._bookmarks.keys(),
983 )
984 )
984 else:
985 else:
985 raise error.Abort(
986 raise error.Abort(
986 _(b"clone from remote to remote not supported")
987 _(b"clone from remote to remote not supported")
987 )
988 )
988
989
989 cleandir = None
990 cleandir = None
990
991
991 destrepo = destpeer.local()
992 destrepo = destpeer.local()
992 if destrepo:
993 if destrepo:
993 template = uimod.samplehgrcs[b'cloned']
994 template = uimod.samplehgrcs[b'cloned']
994 u = urlutil.url(abspath)
995 u = urlutil.url(abspath)
995 u.passwd = None
996 u.passwd = None
996 defaulturl = bytes(u)
997 defaulturl = bytes(u)
997 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
998 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
998 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
999 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
999
1000
1000 if ui.configbool(b'experimental', b'remotenames'):
1001 if ui.configbool(b'experimental', b'remotenames'):
1001 logexchange.pullremotenames(destrepo, srcpeer)
1002 logexchange.pullremotenames(destrepo, srcpeer)
1002
1003
1003 if update:
1004 if update:
1004 if update is not True:
1005 if update is not True:
1005 with srcpeer.commandexecutor() as e:
1006 with srcpeer.commandexecutor() as e:
1006 checkout = e.callcommand(
1007 checkout = e.callcommand(
1007 b'lookup',
1008 b'lookup',
1008 {
1009 {
1009 b'key': update,
1010 b'key': update,
1010 },
1011 },
1011 ).result()
1012 ).result()
1012
1013
1013 uprev = None
1014 uprev = None
1014 status = None
1015 status = None
1015 if checkout is not None:
1016 if checkout is not None:
1016 # Some extensions (at least hg-git and hg-subversion) have
1017 # Some extensions (at least hg-git and hg-subversion) have
1017 # a peer.lookup() implementation that returns a name instead
1018 # a peer.lookup() implementation that returns a name instead
1018 # of a nodeid. We work around it here until we've figured
1019 # of a nodeid. We work around it here until we've figured
1019 # out a better solution.
1020 # out a better solution.
1020 if len(checkout) == 20 and checkout in destrepo:
1021 if len(checkout) == 20 and checkout in destrepo:
1021 uprev = checkout
1022 uprev = checkout
1022 elif scmutil.isrevsymbol(destrepo, checkout):
1023 elif scmutil.isrevsymbol(destrepo, checkout):
1023 uprev = scmutil.revsymbol(destrepo, checkout).node()
1024 uprev = scmutil.revsymbol(destrepo, checkout).node()
1024 else:
1025 else:
1025 if update is not True:
1026 if update is not True:
1026 try:
1027 try:
1027 uprev = destrepo.lookup(update)
1028 uprev = destrepo.lookup(update)
1028 except error.RepoLookupError:
1029 except error.RepoLookupError:
1029 pass
1030 pass
1030 if uprev is None:
1031 if uprev is None:
1031 try:
1032 try:
1032 if destrepo._activebookmark:
1033 if destrepo._activebookmark:
1033 uprev = destrepo.lookup(destrepo._activebookmark)
1034 uprev = destrepo.lookup(destrepo._activebookmark)
1034 update = destrepo._activebookmark
1035 update = destrepo._activebookmark
1035 else:
1036 else:
1036 uprev = destrepo._bookmarks[b'@']
1037 uprev = destrepo._bookmarks[b'@']
1037 update = b'@'
1038 update = b'@'
1038 bn = destrepo[uprev].branch()
1039 bn = destrepo[uprev].branch()
1039 if bn == b'default':
1040 if bn == b'default':
1040 status = _(b"updating to bookmark %s\n" % update)
1041 status = _(b"updating to bookmark %s\n" % update)
1041 else:
1042 else:
1042 status = (
1043 status = (
1043 _(b"updating to bookmark %s on branch %s\n")
1044 _(b"updating to bookmark %s on branch %s\n")
1044 ) % (update, bn)
1045 ) % (update, bn)
1045 except KeyError:
1046 except KeyError:
1046 try:
1047 try:
1047 uprev = destrepo.branchtip(b'default')
1048 uprev = destrepo.branchtip(b'default')
1048 except error.RepoLookupError:
1049 except error.RepoLookupError:
1049 uprev = destrepo.lookup(b'tip')
1050 uprev = destrepo.lookup(b'tip')
1050 if not status:
1051 if not status:
1051 bn = destrepo[uprev].branch()
1052 bn = destrepo[uprev].branch()
1052 status = _(b"updating to branch %s\n") % bn
1053 status = _(b"updating to branch %s\n") % bn
1053 destrepo.ui.status(status)
1054 destrepo.ui.status(status)
1054 _update(destrepo, uprev)
1055 _update(destrepo, uprev)
1055 if update in destrepo._bookmarks:
1056 if update in destrepo._bookmarks:
1056 bookmarks.activate(destrepo, update)
1057 bookmarks.activate(destrepo, update)
1057 if destlock is not None:
1058 if destlock is not None:
1058 release(destlock)
1059 release(destlock)
1059 if destwlock is not None:
1060 if destwlock is not None:
1060 release(destlock)
1061 release(destlock)
1061 # here is a tiny windows were someone could end up writing the
1062 # here is a tiny windows were someone could end up writing the
1062 # repository before the cache are sure to be warm. This is "fine"
1063 # repository before the cache are sure to be warm. This is "fine"
1063 # as the only "bad" outcome would be some slowness. That potential
1064 # as the only "bad" outcome would be some slowness. That potential
1064 # slowness already affect reader.
1065 # slowness already affect reader.
1065 with destrepo.lock():
1066 with destrepo.lock():
1066 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1067 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1067 finally:
1068 finally:
1068 release(srclock, destlock, destwlock)
1069 release(srclock, destlock, destwlock)
1069 if cleandir is not None:
1070 if cleandir is not None:
1070 shutil.rmtree(cleandir, True)
1071 shutil.rmtree(cleandir, True)
1071 if srcpeer is not None:
1072 if srcpeer is not None:
1072 srcpeer.close()
1073 srcpeer.close()
1073 if destpeer and destpeer.local() is None:
1074 if destpeer and destpeer.local() is None:
1074 destpeer.close()
1075 destpeer.close()
1075 return srcpeer, destpeer
1076 return srcpeer, destpeer
1076
1077
1077
1078
1078 def _showstats(repo, stats, quietempty=False):
1079 def _showstats(repo, stats, quietempty=False):
1079 if quietempty and stats.isempty():
1080 if quietempty and stats.isempty():
1080 return
1081 return
1081 repo.ui.status(
1082 repo.ui.status(
1082 _(
1083 _(
1083 b"%d files updated, %d files merged, "
1084 b"%d files updated, %d files merged, "
1084 b"%d files removed, %d files unresolved\n"
1085 b"%d files removed, %d files unresolved\n"
1085 )
1086 )
1086 % (
1087 % (
1087 stats.updatedcount,
1088 stats.updatedcount,
1088 stats.mergedcount,
1089 stats.mergedcount,
1089 stats.removedcount,
1090 stats.removedcount,
1090 stats.unresolvedcount,
1091 stats.unresolvedcount,
1091 )
1092 )
1092 )
1093 )
1093
1094
1094
1095
1095 def updaterepo(repo, node, overwrite, updatecheck=None):
1096 def updaterepo(repo, node, overwrite, updatecheck=None):
1096 """Update the working directory to node.
1097 """Update the working directory to node.
1097
1098
1098 When overwrite is set, changes are clobbered, merged else
1099 When overwrite is set, changes are clobbered, merged else
1099
1100
1100 returns stats (see pydoc mercurial.merge.applyupdates)"""
1101 returns stats (see pydoc mercurial.merge.applyupdates)"""
1101 repo.ui.deprecwarn(
1102 repo.ui.deprecwarn(
1102 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1103 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1103 b'5.7',
1104 b'5.7',
1104 )
1105 )
1105 return mergemod._update(
1106 return mergemod._update(
1106 repo,
1107 repo,
1107 node,
1108 node,
1108 branchmerge=False,
1109 branchmerge=False,
1109 force=overwrite,
1110 force=overwrite,
1110 labels=[b'working copy', b'destination'],
1111 labels=[b'working copy', b'destination'],
1111 updatecheck=updatecheck,
1112 updatecheck=updatecheck,
1112 )
1113 )
1113
1114
1114
1115
1115 def update(repo, node, quietempty=False, updatecheck=None):
1116 def update(repo, node, quietempty=False, updatecheck=None):
1116 """update the working directory to node"""
1117 """update the working directory to node"""
1117 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1118 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1118 _showstats(repo, stats, quietempty)
1119 _showstats(repo, stats, quietempty)
1119 if stats.unresolvedcount:
1120 if stats.unresolvedcount:
1120 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1121 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1121 return stats.unresolvedcount > 0
1122 return stats.unresolvedcount > 0
1122
1123
1123
1124
1124 # naming conflict in clone()
1125 # naming conflict in clone()
1125 _update = update
1126 _update = update
1126
1127
1127
1128
1128 def clean(repo, node, show_stats=True, quietempty=False):
1129 def clean(repo, node, show_stats=True, quietempty=False):
1129 """forcibly switch the working directory to node, clobbering changes"""
1130 """forcibly switch the working directory to node, clobbering changes"""
1130 stats = mergemod.clean_update(repo[node])
1131 stats = mergemod.clean_update(repo[node])
1131 assert stats.unresolvedcount == 0
1132 assert stats.unresolvedcount == 0
1132 if show_stats:
1133 if show_stats:
1133 _showstats(repo, stats, quietempty)
1134 _showstats(repo, stats, quietempty)
1134 return False
1135 return False
1135
1136
1136
1137
1137 # naming conflict in updatetotally()
1138 # naming conflict in updatetotally()
1138 _clean = clean
1139 _clean = clean
1139
1140
1140 _VALID_UPDATECHECKS = {
1141 _VALID_UPDATECHECKS = {
1141 mergemod.UPDATECHECK_ABORT,
1142 mergemod.UPDATECHECK_ABORT,
1142 mergemod.UPDATECHECK_NONE,
1143 mergemod.UPDATECHECK_NONE,
1143 mergemod.UPDATECHECK_LINEAR,
1144 mergemod.UPDATECHECK_LINEAR,
1144 mergemod.UPDATECHECK_NO_CONFLICT,
1145 mergemod.UPDATECHECK_NO_CONFLICT,
1145 }
1146 }
1146
1147
1147
1148
1148 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1149 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1149 """Update the working directory with extra care for non-file components
1150 """Update the working directory with extra care for non-file components
1150
1151
1151 This takes care of non-file components below:
1152 This takes care of non-file components below:
1152
1153
1153 :bookmark: might be advanced or (in)activated
1154 :bookmark: might be advanced or (in)activated
1154
1155
1155 This takes arguments below:
1156 This takes arguments below:
1156
1157
1157 :checkout: to which revision the working directory is updated
1158 :checkout: to which revision the working directory is updated
1158 :brev: a name, which might be a bookmark to be activated after updating
1159 :brev: a name, which might be a bookmark to be activated after updating
1159 :clean: whether changes in the working directory can be discarded
1160 :clean: whether changes in the working directory can be discarded
1160 :updatecheck: how to deal with a dirty working directory
1161 :updatecheck: how to deal with a dirty working directory
1161
1162
1162 Valid values for updatecheck are the UPDATECHECK_* constants
1163 Valid values for updatecheck are the UPDATECHECK_* constants
1163 defined in the merge module. Passing `None` will result in using the
1164 defined in the merge module. Passing `None` will result in using the
1164 configured default.
1165 configured default.
1165
1166
1166 * ABORT: abort if the working directory is dirty
1167 * ABORT: abort if the working directory is dirty
1167 * NONE: don't check (merge working directory changes into destination)
1168 * NONE: don't check (merge working directory changes into destination)
1168 * LINEAR: check that update is linear before merging working directory
1169 * LINEAR: check that update is linear before merging working directory
1169 changes into destination
1170 changes into destination
1170 * NO_CONFLICT: check that the update does not result in file merges
1171 * NO_CONFLICT: check that the update does not result in file merges
1171
1172
1172 This returns whether conflict is detected at updating or not.
1173 This returns whether conflict is detected at updating or not.
1173 """
1174 """
1174 if updatecheck is None:
1175 if updatecheck is None:
1175 updatecheck = ui.config(b'commands', b'update.check')
1176 updatecheck = ui.config(b'commands', b'update.check')
1176 if updatecheck not in _VALID_UPDATECHECKS:
1177 if updatecheck not in _VALID_UPDATECHECKS:
1177 # If not configured, or invalid value configured
1178 # If not configured, or invalid value configured
1178 updatecheck = mergemod.UPDATECHECK_LINEAR
1179 updatecheck = mergemod.UPDATECHECK_LINEAR
1179 if updatecheck not in _VALID_UPDATECHECKS:
1180 if updatecheck not in _VALID_UPDATECHECKS:
1180 raise ValueError(
1181 raise ValueError(
1181 r'Invalid updatecheck value %r (can accept %r)'
1182 r'Invalid updatecheck value %r (can accept %r)'
1182 % (updatecheck, _VALID_UPDATECHECKS)
1183 % (updatecheck, _VALID_UPDATECHECKS)
1183 )
1184 )
1184 with repo.wlock():
1185 with repo.wlock():
1185 movemarkfrom = None
1186 movemarkfrom = None
1186 warndest = False
1187 warndest = False
1187 if checkout is None:
1188 if checkout is None:
1188 updata = destutil.destupdate(repo, clean=clean)
1189 updata = destutil.destupdate(repo, clean=clean)
1189 checkout, movemarkfrom, brev = updata
1190 checkout, movemarkfrom, brev = updata
1190 warndest = True
1191 warndest = True
1191
1192
1192 if clean:
1193 if clean:
1193 ret = _clean(repo, checkout)
1194 ret = _clean(repo, checkout)
1194 else:
1195 else:
1195 if updatecheck == mergemod.UPDATECHECK_ABORT:
1196 if updatecheck == mergemod.UPDATECHECK_ABORT:
1196 cmdutil.bailifchanged(repo, merge=False)
1197 cmdutil.bailifchanged(repo, merge=False)
1197 updatecheck = mergemod.UPDATECHECK_NONE
1198 updatecheck = mergemod.UPDATECHECK_NONE
1198 ret = _update(repo, checkout, updatecheck=updatecheck)
1199 ret = _update(repo, checkout, updatecheck=updatecheck)
1199
1200
1200 if not ret and movemarkfrom:
1201 if not ret and movemarkfrom:
1201 if movemarkfrom == repo[b'.'].node():
1202 if movemarkfrom == repo[b'.'].node():
1202 pass # no-op update
1203 pass # no-op update
1203 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1204 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1204 b = ui.label(repo._activebookmark, b'bookmarks.active')
1205 b = ui.label(repo._activebookmark, b'bookmarks.active')
1205 ui.status(_(b"updating bookmark %s\n") % b)
1206 ui.status(_(b"updating bookmark %s\n") % b)
1206 else:
1207 else:
1207 # this can happen with a non-linear update
1208 # this can happen with a non-linear update
1208 b = ui.label(repo._activebookmark, b'bookmarks')
1209 b = ui.label(repo._activebookmark, b'bookmarks')
1209 ui.status(_(b"(leaving bookmark %s)\n") % b)
1210 ui.status(_(b"(leaving bookmark %s)\n") % b)
1210 bookmarks.deactivate(repo)
1211 bookmarks.deactivate(repo)
1211 elif brev in repo._bookmarks:
1212 elif brev in repo._bookmarks:
1212 if brev != repo._activebookmark:
1213 if brev != repo._activebookmark:
1213 b = ui.label(brev, b'bookmarks.active')
1214 b = ui.label(brev, b'bookmarks.active')
1214 ui.status(_(b"(activating bookmark %s)\n") % b)
1215 ui.status(_(b"(activating bookmark %s)\n") % b)
1215 bookmarks.activate(repo, brev)
1216 bookmarks.activate(repo, brev)
1216 elif brev:
1217 elif brev:
1217 if repo._activebookmark:
1218 if repo._activebookmark:
1218 b = ui.label(repo._activebookmark, b'bookmarks')
1219 b = ui.label(repo._activebookmark, b'bookmarks')
1219 ui.status(_(b"(leaving bookmark %s)\n") % b)
1220 ui.status(_(b"(leaving bookmark %s)\n") % b)
1220 bookmarks.deactivate(repo)
1221 bookmarks.deactivate(repo)
1221
1222
1222 if warndest:
1223 if warndest:
1223 destutil.statusotherdests(ui, repo)
1224 destutil.statusotherdests(ui, repo)
1224
1225
1225 return ret
1226 return ret
1226
1227
1227
1228
1228 def merge(
1229 def merge(
1229 ctx,
1230 ctx,
1230 force=False,
1231 force=False,
1231 remind=True,
1232 remind=True,
1232 labels=None,
1233 labels=None,
1233 ):
1234 ):
1234 """Branch merge with node, resolving changes. Return true if any
1235 """Branch merge with node, resolving changes. Return true if any
1235 unresolved conflicts."""
1236 unresolved conflicts."""
1236 repo = ctx.repo()
1237 repo = ctx.repo()
1237 stats = mergemod.merge(ctx, force=force, labels=labels)
1238 stats = mergemod.merge(ctx, force=force, labels=labels)
1238 _showstats(repo, stats)
1239 _showstats(repo, stats)
1239 if stats.unresolvedcount:
1240 if stats.unresolvedcount:
1240 repo.ui.status(
1241 repo.ui.status(
1241 _(
1242 _(
1242 b"use 'hg resolve' to retry unresolved file merges "
1243 b"use 'hg resolve' to retry unresolved file merges "
1243 b"or 'hg merge --abort' to abandon\n"
1244 b"or 'hg merge --abort' to abandon\n"
1244 )
1245 )
1245 )
1246 )
1246 elif remind:
1247 elif remind:
1247 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1248 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1248 return stats.unresolvedcount > 0
1249 return stats.unresolvedcount > 0
1249
1250
1250
1251
1251 def abortmerge(ui, repo):
1252 def abortmerge(ui, repo):
1252 ms = mergestatemod.mergestate.read(repo)
1253 ms = mergestatemod.mergestate.read(repo)
1253 if ms.active():
1254 if ms.active():
1254 # there were conflicts
1255 # there were conflicts
1255 node = ms.localctx.hex()
1256 node = ms.localctx.hex()
1256 else:
1257 else:
1257 # there were no conficts, mergestate was not stored
1258 # there were no conficts, mergestate was not stored
1258 node = repo[b'.'].hex()
1259 node = repo[b'.'].hex()
1259
1260
1260 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1261 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1261 stats = mergemod.clean_update(repo[node])
1262 stats = mergemod.clean_update(repo[node])
1262 assert stats.unresolvedcount == 0
1263 assert stats.unresolvedcount == 0
1263 _showstats(repo, stats)
1264 _showstats(repo, stats)
1264
1265
1265
1266
1266 def _incoming(
1267 def _incoming(
1267 displaychlist,
1268 displaychlist,
1268 subreporecurse,
1269 subreporecurse,
1269 ui,
1270 ui,
1270 repo,
1271 repo,
1271 source,
1272 source,
1272 opts,
1273 opts,
1273 buffered=False,
1274 buffered=False,
1274 subpath=None,
1275 subpath=None,
1275 ):
1276 ):
1276 """
1277 """
1277 Helper for incoming / gincoming.
1278 Helper for incoming / gincoming.
1278 displaychlist gets called with
1279 displaychlist gets called with
1279 (remoterepo, incomingchangesetlist, displayer) parameters,
1280 (remoterepo, incomingchangesetlist, displayer) parameters,
1280 and is supposed to contain only code that can't be unified.
1281 and is supposed to contain only code that can't be unified.
1281 """
1282 """
1282 srcs = urlutil.get_pull_paths(repo, ui, [source])
1283 srcs = urlutil.get_pull_paths(repo, ui, [source])
1283 srcs = list(srcs)
1284 srcs = list(srcs)
1284 if len(srcs) != 1:
1285 if len(srcs) != 1:
1285 msg = _(b'for now, incoming supports only a single source, %d provided')
1286 msg = _(b'for now, incoming supports only a single source, %d provided')
1286 msg %= len(srcs)
1287 msg %= len(srcs)
1287 raise error.Abort(msg)
1288 raise error.Abort(msg)
1288 path = srcs[0]
1289 path = srcs[0]
1289 source, branches = urlutil.parseurl(path.rawloc, opts.get(b'branch'))
1290 source, branches = urlutil.parseurl(path.rawloc, opts.get(b'branch'))
1290 if subpath is not None:
1291 if subpath is not None:
1291 subpath = urlutil.url(subpath)
1292 subpath = urlutil.url(subpath)
1292 if subpath.isabs():
1293 if subpath.isabs():
1293 source = bytes(subpath)
1294 source = bytes(subpath)
1294 else:
1295 else:
1295 p = urlutil.url(source)
1296 p = urlutil.url(source)
1296 if p.islocal():
1297 if p.islocal():
1297 normpath = os.path.normpath
1298 normpath = os.path.normpath
1298 else:
1299 else:
1299 normpath = posixpath.normpath
1300 normpath = posixpath.normpath
1300 p.path = normpath(b'%s/%s' % (p.path, subpath))
1301 p.path = normpath(b'%s/%s' % (p.path, subpath))
1301 source = bytes(p)
1302 source = bytes(p)
1302 other = peer(repo, opts, source)
1303 other = peer(repo, opts, source)
1303 cleanupfn = other.close
1304 cleanupfn = other.close
1304 try:
1305 try:
1305 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source))
1306 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source))
1306 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1307 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1307
1308
1308 if revs:
1309 if revs:
1309 revs = [other.lookup(rev) for rev in revs]
1310 revs = [other.lookup(rev) for rev in revs]
1310 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1311 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1311 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1312 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1312 )
1313 )
1313
1314
1314 if not chlist:
1315 if not chlist:
1315 ui.status(_(b"no changes found\n"))
1316 ui.status(_(b"no changes found\n"))
1316 return subreporecurse()
1317 return subreporecurse()
1317 ui.pager(b'incoming')
1318 ui.pager(b'incoming')
1318 displayer = logcmdutil.changesetdisplayer(
1319 displayer = logcmdutil.changesetdisplayer(
1319 ui, other, opts, buffered=buffered
1320 ui, other, opts, buffered=buffered
1320 )
1321 )
1321 displaychlist(other, chlist, displayer)
1322 displaychlist(other, chlist, displayer)
1322 displayer.close()
1323 displayer.close()
1323 finally:
1324 finally:
1324 cleanupfn()
1325 cleanupfn()
1325 subreporecurse()
1326 subreporecurse()
1326 return 0 # exit code is zero since we found incoming changes
1327 return 0 # exit code is zero since we found incoming changes
1327
1328
1328
1329
1329 def incoming(ui, repo, source, opts, subpath=None):
1330 def incoming(ui, repo, source, opts, subpath=None):
1330 def subreporecurse():
1331 def subreporecurse():
1331 ret = 1
1332 ret = 1
1332 if opts.get(b'subrepos'):
1333 if opts.get(b'subrepos'):
1333 ctx = repo[None]
1334 ctx = repo[None]
1334 for subpath in sorted(ctx.substate):
1335 for subpath in sorted(ctx.substate):
1335 sub = ctx.sub(subpath)
1336 sub = ctx.sub(subpath)
1336 ret = min(ret, sub.incoming(ui, source, opts))
1337 ret = min(ret, sub.incoming(ui, source, opts))
1337 return ret
1338 return ret
1338
1339
1339 def display(other, chlist, displayer):
1340 def display(other, chlist, displayer):
1340 limit = logcmdutil.getlimit(opts)
1341 limit = logcmdutil.getlimit(opts)
1341 if opts.get(b'newest_first'):
1342 if opts.get(b'newest_first'):
1342 chlist.reverse()
1343 chlist.reverse()
1343 count = 0
1344 count = 0
1344 for n in chlist:
1345 for n in chlist:
1345 if limit is not None and count >= limit:
1346 if limit is not None and count >= limit:
1346 break
1347 break
1347 parents = [
1348 parents = [
1348 p for p in other.changelog.parents(n) if p != repo.nullid
1349 p for p in other.changelog.parents(n) if p != repo.nullid
1349 ]
1350 ]
1350 if opts.get(b'no_merges') and len(parents) == 2:
1351 if opts.get(b'no_merges') and len(parents) == 2:
1351 continue
1352 continue
1352 count += 1
1353 count += 1
1353 displayer.show(other[n])
1354 displayer.show(other[n])
1354
1355
1355 return _incoming(
1356 return _incoming(
1356 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1357 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1357 )
1358 )
1358
1359
1359
1360
1360 def _outgoing(ui, repo, dests, opts, subpath=None):
1361 def _outgoing(ui, repo, dests, opts, subpath=None):
1361 out = set()
1362 out = set()
1362 others = []
1363 others = []
1363 for path in urlutil.get_push_paths(repo, ui, dests):
1364 for path in urlutil.get_push_paths(repo, ui, dests):
1364 dest = path.pushloc or path.loc
1365 dest = path.pushloc or path.loc
1365 if subpath is not None:
1366 if subpath is not None:
1366 subpath = urlutil.url(subpath)
1367 subpath = urlutil.url(subpath)
1367 if subpath.isabs():
1368 if subpath.isabs():
1368 dest = bytes(subpath)
1369 dest = bytes(subpath)
1369 else:
1370 else:
1370 p = urlutil.url(dest)
1371 p = urlutil.url(dest)
1371 if p.islocal():
1372 if p.islocal():
1372 normpath = os.path.normpath
1373 normpath = os.path.normpath
1373 else:
1374 else:
1374 normpath = posixpath.normpath
1375 normpath = posixpath.normpath
1375 p.path = normpath(b'%s/%s' % (p.path, subpath))
1376 p.path = normpath(b'%s/%s' % (p.path, subpath))
1376 dest = bytes(p)
1377 dest = bytes(p)
1377 branches = path.branch, opts.get(b'branch') or []
1378 branches = path.branch, opts.get(b'branch') or []
1378
1379
1379 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1380 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1380 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1381 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1381 if revs:
1382 if revs:
1382 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1383 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1383
1384
1384 other = peer(repo, opts, dest)
1385 other = peer(repo, opts, dest)
1385 try:
1386 try:
1386 outgoing = discovery.findcommonoutgoing(
1387 outgoing = discovery.findcommonoutgoing(
1387 repo, other, revs, force=opts.get(b'force')
1388 repo, other, revs, force=opts.get(b'force')
1388 )
1389 )
1389 o = outgoing.missing
1390 o = outgoing.missing
1390 out.update(o)
1391 out.update(o)
1391 if not o:
1392 if not o:
1392 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1393 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1393 others.append(other)
1394 others.append(other)
1394 except: # re-raises
1395 except: # re-raises
1395 other.close()
1396 other.close()
1396 raise
1397 raise
1397 # make sure this is ordered by revision number
1398 # make sure this is ordered by revision number
1398 outgoing_revs = list(out)
1399 outgoing_revs = list(out)
1399 cl = repo.changelog
1400 cl = repo.changelog
1400 outgoing_revs.sort(key=cl.rev)
1401 outgoing_revs.sort(key=cl.rev)
1401 return outgoing_revs, others
1402 return outgoing_revs, others
1402
1403
1403
1404
1404 def _outgoing_recurse(ui, repo, dests, opts):
1405 def _outgoing_recurse(ui, repo, dests, opts):
1405 ret = 1
1406 ret = 1
1406 if opts.get(b'subrepos'):
1407 if opts.get(b'subrepos'):
1407 ctx = repo[None]
1408 ctx = repo[None]
1408 for subpath in sorted(ctx.substate):
1409 for subpath in sorted(ctx.substate):
1409 sub = ctx.sub(subpath)
1410 sub = ctx.sub(subpath)
1410 ret = min(ret, sub.outgoing(ui, dests, opts))
1411 ret = min(ret, sub.outgoing(ui, dests, opts))
1411 return ret
1412 return ret
1412
1413
1413
1414
1414 def _outgoing_filter(repo, revs, opts):
1415 def _outgoing_filter(repo, revs, opts):
1415 """apply revision filtering/ordering option for outgoing"""
1416 """apply revision filtering/ordering option for outgoing"""
1416 limit = logcmdutil.getlimit(opts)
1417 limit = logcmdutil.getlimit(opts)
1417 no_merges = opts.get(b'no_merges')
1418 no_merges = opts.get(b'no_merges')
1418 if opts.get(b'newest_first'):
1419 if opts.get(b'newest_first'):
1419 revs.reverse()
1420 revs.reverse()
1420 if limit is None and not no_merges:
1421 if limit is None and not no_merges:
1421 for r in revs:
1422 for r in revs:
1422 yield r
1423 yield r
1423 return
1424 return
1424
1425
1425 count = 0
1426 count = 0
1426 cl = repo.changelog
1427 cl = repo.changelog
1427 for n in revs:
1428 for n in revs:
1428 if limit is not None and count >= limit:
1429 if limit is not None and count >= limit:
1429 break
1430 break
1430 parents = [p for p in cl.parents(n) if p != repo.nullid]
1431 parents = [p for p in cl.parents(n) if p != repo.nullid]
1431 if no_merges and len(parents) == 2:
1432 if no_merges and len(parents) == 2:
1432 continue
1433 continue
1433 count += 1
1434 count += 1
1434 yield n
1435 yield n
1435
1436
1436
1437
1437 def outgoing(ui, repo, dests, opts, subpath=None):
1438 def outgoing(ui, repo, dests, opts, subpath=None):
1438 if opts.get(b'graph'):
1439 if opts.get(b'graph'):
1439 logcmdutil.checkunsupportedgraphflags([], opts)
1440 logcmdutil.checkunsupportedgraphflags([], opts)
1440 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1441 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1441 ret = 1
1442 ret = 1
1442 try:
1443 try:
1443 if o:
1444 if o:
1444 ret = 0
1445 ret = 0
1445
1446
1446 if opts.get(b'graph'):
1447 if opts.get(b'graph'):
1447 revdag = logcmdutil.graphrevs(repo, o, opts)
1448 revdag = logcmdutil.graphrevs(repo, o, opts)
1448 ui.pager(b'outgoing')
1449 ui.pager(b'outgoing')
1449 displayer = logcmdutil.changesetdisplayer(
1450 displayer = logcmdutil.changesetdisplayer(
1450 ui, repo, opts, buffered=True
1451 ui, repo, opts, buffered=True
1451 )
1452 )
1452 logcmdutil.displaygraph(
1453 logcmdutil.displaygraph(
1453 ui, repo, revdag, displayer, graphmod.asciiedges
1454 ui, repo, revdag, displayer, graphmod.asciiedges
1454 )
1455 )
1455 else:
1456 else:
1456 ui.pager(b'outgoing')
1457 ui.pager(b'outgoing')
1457 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1458 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1458 for n in _outgoing_filter(repo, o, opts):
1459 for n in _outgoing_filter(repo, o, opts):
1459 displayer.show(repo[n])
1460 displayer.show(repo[n])
1460 displayer.close()
1461 displayer.close()
1461 for oth in others:
1462 for oth in others:
1462 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1463 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1463 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1464 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1464 return ret # exit code is zero since we found outgoing changes
1465 return ret # exit code is zero since we found outgoing changes
1465 finally:
1466 finally:
1466 for oth in others:
1467 for oth in others:
1467 oth.close()
1468 oth.close()
1468
1469
1469
1470
1470 def verify(repo, level=None):
1471 def verify(repo, level=None):
1471 """verify the consistency of a repository"""
1472 """verify the consistency of a repository"""
1472 ret = verifymod.verify(repo, level=level)
1473 ret = verifymod.verify(repo, level=level)
1473
1474
1474 # Broken subrepo references in hidden csets don't seem worth worrying about,
1475 # Broken subrepo references in hidden csets don't seem worth worrying about,
1475 # since they can't be pushed/pulled, and --hidden can be used if they are a
1476 # since they can't be pushed/pulled, and --hidden can be used if they are a
1476 # concern.
1477 # concern.
1477
1478
1478 # pathto() is needed for -R case
1479 # pathto() is needed for -R case
1479 revs = repo.revs(
1480 revs = repo.revs(
1480 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1481 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1481 )
1482 )
1482
1483
1483 if revs:
1484 if revs:
1484 repo.ui.status(_(b'checking subrepo links\n'))
1485 repo.ui.status(_(b'checking subrepo links\n'))
1485 for rev in revs:
1486 for rev in revs:
1486 ctx = repo[rev]
1487 ctx = repo[rev]
1487 try:
1488 try:
1488 for subpath in ctx.substate:
1489 for subpath in ctx.substate:
1489 try:
1490 try:
1490 ret = (
1491 ret = (
1491 ctx.sub(subpath, allowcreate=False).verify() or ret
1492 ctx.sub(subpath, allowcreate=False).verify() or ret
1492 )
1493 )
1493 except error.RepoError as e:
1494 except error.RepoError as e:
1494 repo.ui.warn(b'%d: %s\n' % (rev, e))
1495 repo.ui.warn(b'%d: %s\n' % (rev, e))
1495 except Exception:
1496 except Exception:
1496 repo.ui.warn(
1497 repo.ui.warn(
1497 _(b'.hgsubstate is corrupt in revision %s\n')
1498 _(b'.hgsubstate is corrupt in revision %s\n')
1498 % short(ctx.node())
1499 % short(ctx.node())
1499 )
1500 )
1500
1501
1501 return ret
1502 return ret
1502
1503
1503
1504
1504 def remoteui(src, opts):
1505 def remoteui(src, opts):
1505 """build a remote ui from ui or repo and opts"""
1506 """build a remote ui from ui or repo and opts"""
1506 if util.safehasattr(src, b'baseui'): # looks like a repository
1507 if util.safehasattr(src, b'baseui'): # looks like a repository
1507 dst = src.baseui.copy() # drop repo-specific config
1508 dst = src.baseui.copy() # drop repo-specific config
1508 src = src.ui # copy target options from repo
1509 src = src.ui # copy target options from repo
1509 else: # assume it's a global ui object
1510 else: # assume it's a global ui object
1510 dst = src.copy() # keep all global options
1511 dst = src.copy() # keep all global options
1511
1512
1512 # copy ssh-specific options
1513 # copy ssh-specific options
1513 for o in b'ssh', b'remotecmd':
1514 for o in b'ssh', b'remotecmd':
1514 v = opts.get(o) or src.config(b'ui', o)
1515 v = opts.get(o) or src.config(b'ui', o)
1515 if v:
1516 if v:
1516 dst.setconfig(b"ui", o, v, b'copied')
1517 dst.setconfig(b"ui", o, v, b'copied')
1517
1518
1518 # copy bundle-specific options
1519 # copy bundle-specific options
1519 r = src.config(b'bundle', b'mainreporoot')
1520 r = src.config(b'bundle', b'mainreporoot')
1520 if r:
1521 if r:
1521 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1522 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1522
1523
1523 # copy selected local settings to the remote ui
1524 # copy selected local settings to the remote ui
1524 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1525 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1525 for key, val in src.configitems(sect):
1526 for key, val in src.configitems(sect):
1526 dst.setconfig(sect, key, val, b'copied')
1527 dst.setconfig(sect, key, val, b'copied')
1527 v = src.config(b'web', b'cacerts')
1528 v = src.config(b'web', b'cacerts')
1528 if v:
1529 if v:
1529 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1530 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1530
1531
1531 return dst
1532 return dst
1532
1533
1533
1534
1534 # Files of interest
1535 # Files of interest
1535 # Used to check if the repository has changed looking at mtime and size of
1536 # Used to check if the repository has changed looking at mtime and size of
1536 # these files.
1537 # these files.
1537 foi = [
1538 foi = [
1538 (b'spath', b'00changelog.i'),
1539 (b'spath', b'00changelog.i'),
1539 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1540 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1540 (b'spath', b'obsstore'),
1541 (b'spath', b'obsstore'),
1541 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1542 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1542 ]
1543 ]
1543
1544
1544
1545
1545 class cachedlocalrepo:
1546 class cachedlocalrepo:
1546 """Holds a localrepository that can be cached and reused."""
1547 """Holds a localrepository that can be cached and reused."""
1547
1548
1548 def __init__(self, repo):
1549 def __init__(self, repo):
1549 """Create a new cached repo from an existing repo.
1550 """Create a new cached repo from an existing repo.
1550
1551
1551 We assume the passed in repo was recently created. If the
1552 We assume the passed in repo was recently created. If the
1552 repo has changed between when it was created and when it was
1553 repo has changed between when it was created and when it was
1553 turned into a cache, it may not refresh properly.
1554 turned into a cache, it may not refresh properly.
1554 """
1555 """
1555 assert isinstance(repo, localrepo.localrepository)
1556 assert isinstance(repo, localrepo.localrepository)
1556 self._repo = repo
1557 self._repo = repo
1557 self._state, self.mtime = self._repostate()
1558 self._state, self.mtime = self._repostate()
1558 self._filtername = repo.filtername
1559 self._filtername = repo.filtername
1559
1560
1560 def fetch(self):
1561 def fetch(self):
1561 """Refresh (if necessary) and return a repository.
1562 """Refresh (if necessary) and return a repository.
1562
1563
1563 If the cached instance is out of date, it will be recreated
1564 If the cached instance is out of date, it will be recreated
1564 automatically and returned.
1565 automatically and returned.
1565
1566
1566 Returns a tuple of the repo and a boolean indicating whether a new
1567 Returns a tuple of the repo and a boolean indicating whether a new
1567 repo instance was created.
1568 repo instance was created.
1568 """
1569 """
1569 # We compare the mtimes and sizes of some well-known files to
1570 # We compare the mtimes and sizes of some well-known files to
1570 # determine if the repo changed. This is not precise, as mtimes
1571 # determine if the repo changed. This is not precise, as mtimes
1571 # are susceptible to clock skew and imprecise filesystems and
1572 # are susceptible to clock skew and imprecise filesystems and
1572 # file content can change while maintaining the same size.
1573 # file content can change while maintaining the same size.
1573
1574
1574 state, mtime = self._repostate()
1575 state, mtime = self._repostate()
1575 if state == self._state:
1576 if state == self._state:
1576 return self._repo, False
1577 return self._repo, False
1577
1578
1578 repo = repository(self._repo.baseui, self._repo.url())
1579 repo = repository(self._repo.baseui, self._repo.url())
1579 if self._filtername:
1580 if self._filtername:
1580 self._repo = repo.filtered(self._filtername)
1581 self._repo = repo.filtered(self._filtername)
1581 else:
1582 else:
1582 self._repo = repo.unfiltered()
1583 self._repo = repo.unfiltered()
1583 self._state = state
1584 self._state = state
1584 self.mtime = mtime
1585 self.mtime = mtime
1585
1586
1586 return self._repo, True
1587 return self._repo, True
1587
1588
1588 def _repostate(self):
1589 def _repostate(self):
1589 state = []
1590 state = []
1590 maxmtime = -1
1591 maxmtime = -1
1591 for attr, fname in foi:
1592 for attr, fname in foi:
1592 prefix = getattr(self._repo, attr)
1593 prefix = getattr(self._repo, attr)
1593 p = os.path.join(prefix, fname)
1594 p = os.path.join(prefix, fname)
1594 try:
1595 try:
1595 st = os.stat(p)
1596 st = os.stat(p)
1596 except OSError:
1597 except OSError:
1597 st = os.stat(prefix)
1598 st = os.stat(prefix)
1598 state.append((st[stat.ST_MTIME], st.st_size))
1599 state.append((st[stat.ST_MTIME], st.st_size))
1599 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1600 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1600
1601
1601 return tuple(state), maxmtime
1602 return tuple(state), maxmtime
1602
1603
1603 def copy(self):
1604 def copy(self):
1604 """Obtain a copy of this class instance.
1605 """Obtain a copy of this class instance.
1605
1606
1606 A new localrepository instance is obtained. The new instance should be
1607 A new localrepository instance is obtained. The new instance should be
1607 completely independent of the original.
1608 completely independent of the original.
1608 """
1609 """
1609 repo = repository(self._repo.baseui, self._repo.origroot)
1610 repo = repository(self._repo.baseui, self._repo.origroot)
1610 if self._filtername:
1611 if self._filtername:
1611 repo = repo.filtered(self._filtername)
1612 repo = repo.filtered(self._filtername)
1612 else:
1613 else:
1613 repo = repo.unfiltered()
1614 repo = repo.unfiltered()
1614 c = cachedlocalrepo(repo)
1615 c = cachedlocalrepo(repo)
1615 c._state = self._state
1616 c._state = self._state
1616 c.mtime = self.mtime
1617 c.mtime = self.mtime
1617 return c
1618 return c
General Comments 0
You need to be logged in to leave comments. Login now