##// END OF EJS Templates
peer-or-repo: make sure object in "scheme" have a `instance` object...
marmoute -
r50580:d9791643 default
parent child Browse files
Show More
@@ -1,1604 +1,1611 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9
9
10 import os
10 import os
11 import posixpath
11 import posixpath
12 import shutil
12 import shutil
13 import stat
13 import stat
14 import weakref
14 import weakref
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import (
17 from .node import (
18 hex,
18 hex,
19 sha1nodeconstants,
19 sha1nodeconstants,
20 short,
20 short,
21 )
21 )
22 from .pycompat import getattr
22 from .pycompat import getattr
23
23
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 bundlerepo,
26 bundlerepo,
27 cmdutil,
27 cmdutil,
28 destutil,
28 destutil,
29 discovery,
29 discovery,
30 error,
30 error,
31 exchange,
31 exchange,
32 extensions,
32 extensions,
33 graphmod,
33 graphmod,
34 httppeer,
34 httppeer,
35 localrepo,
35 localrepo,
36 lock,
36 lock,
37 logcmdutil,
37 logcmdutil,
38 logexchange,
38 logexchange,
39 merge as mergemod,
39 merge as mergemod,
40 mergestate as mergestatemod,
40 mergestate as mergestatemod,
41 narrowspec,
41 narrowspec,
42 phases,
42 phases,
43 requirements,
43 requirements,
44 scmutil,
44 scmutil,
45 sshpeer,
45 sshpeer,
46 statichttprepo,
46 statichttprepo,
47 ui as uimod,
47 ui as uimod,
48 unionrepo,
48 unionrepo,
49 url,
49 url,
50 util,
50 util,
51 verify as verifymod,
51 verify as verifymod,
52 vfs as vfsmod,
52 vfs as vfsmod,
53 )
53 )
54 from .interfaces import repository as repositorymod
54 from .interfaces import repository as repositorymod
55 from .utils import (
55 from .utils import (
56 hashutil,
56 hashutil,
57 stringutil,
57 stringutil,
58 urlutil,
58 urlutil,
59 )
59 )
60
60
61
61
62 release = lock.release
62 release = lock.release
63
63
64 # shared features
64 # shared features
65 sharedbookmarks = b'bookmarks'
65 sharedbookmarks = b'bookmarks'
66
66
67
67
68 def _local(path):
69 path = util.expandpath(urlutil.urllocalpath(path))
70
71 try:
72 # we use os.stat() directly here instead of os.path.isfile()
73 # because the latter started returning `False` on invalid path
74 # exceptions starting in 3.8 and we care about handling
75 # invalid paths specially here.
76 st = os.stat(path)
77 isfile = stat.S_ISREG(st.st_mode)
78 except ValueError as e:
79 raise error.Abort(
80 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
81 )
82 except OSError:
83 isfile = False
84
85 return isfile and bundlerepo or localrepo
86
87
88 def addbranchrevs(lrepo, other, branches, revs):
68 def addbranchrevs(lrepo, other, branches, revs):
89 peer = other.peer() # a courtesy to callers using a localrepo for other
69 peer = other.peer() # a courtesy to callers using a localrepo for other
90 hashbranch, branches = branches
70 hashbranch, branches = branches
91 if not hashbranch and not branches:
71 if not hashbranch and not branches:
92 x = revs or None
72 x = revs or None
93 if revs:
73 if revs:
94 y = revs[0]
74 y = revs[0]
95 else:
75 else:
96 y = None
76 y = None
97 return x, y
77 return x, y
98 if revs:
78 if revs:
99 revs = list(revs)
79 revs = list(revs)
100 else:
80 else:
101 revs = []
81 revs = []
102
82
103 if not peer.capable(b'branchmap'):
83 if not peer.capable(b'branchmap'):
104 if branches:
84 if branches:
105 raise error.Abort(_(b"remote branch lookup not supported"))
85 raise error.Abort(_(b"remote branch lookup not supported"))
106 revs.append(hashbranch)
86 revs.append(hashbranch)
107 return revs, revs[0]
87 return revs, revs[0]
108
88
109 with peer.commandexecutor() as e:
89 with peer.commandexecutor() as e:
110 branchmap = e.callcommand(b'branchmap', {}).result()
90 branchmap = e.callcommand(b'branchmap', {}).result()
111
91
112 def primary(branch):
92 def primary(branch):
113 if branch == b'.':
93 if branch == b'.':
114 if not lrepo:
94 if not lrepo:
115 raise error.Abort(_(b"dirstate branch not accessible"))
95 raise error.Abort(_(b"dirstate branch not accessible"))
116 branch = lrepo.dirstate.branch()
96 branch = lrepo.dirstate.branch()
117 if branch in branchmap:
97 if branch in branchmap:
118 revs.extend(hex(r) for r in reversed(branchmap[branch]))
98 revs.extend(hex(r) for r in reversed(branchmap[branch]))
119 return True
99 return True
120 else:
100 else:
121 return False
101 return False
122
102
123 for branch in branches:
103 for branch in branches:
124 if not primary(branch):
104 if not primary(branch):
125 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
105 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
126 if hashbranch:
106 if hashbranch:
127 if not primary(hashbranch):
107 if not primary(hashbranch):
128 revs.append(hashbranch)
108 revs.append(hashbranch)
129 return revs, revs[0]
109 return revs, revs[0]
130
110
131
111
112 def _isfile(path):
113 try:
114 # we use os.stat() directly here instead of os.path.isfile()
115 # because the latter started returning `False` on invalid path
116 # exceptions starting in 3.8 and we care about handling
117 # invalid paths specially here.
118 st = os.stat(path)
119 except ValueError as e:
120 msg = stringutil.forcebytestr(e)
121 raise error.Abort(_(b'invalid path %s: %s') % (path, msg))
122 except OSError:
123 return False
124 else:
125 return stat.S_ISREG(st.st_mode)
126
127
128 class LocalFactory:
129 """thin wrapper to dispatch between localrepo and bundle repo"""
130
131 @staticmethod
132 def islocal(path: bytes) -> bool:
133 path = util.expandpath(urlutil.urllocalpath(path))
134 return not _isfile(path)
135
136 @staticmethod
137 def instance(ui, path, *args, **kwargs):
138 path = util.expandpath(urlutil.urllocalpath(path))
139 if _isfile(path):
140 cls = bundlerepo
141 else:
142 cls = localrepo
143 return cls.instance(ui, path, *args, **kwargs)
144
145
132 schemes = {
146 schemes = {
133 b'bundle': bundlerepo,
147 b'bundle': bundlerepo,
134 b'union': unionrepo,
148 b'union': unionrepo,
135 b'file': _local,
149 b'file': LocalFactory,
136 b'http': httppeer,
150 b'http': httppeer,
137 b'https': httppeer,
151 b'https': httppeer,
138 b'ssh': sshpeer,
152 b'ssh': sshpeer,
139 b'static-http': statichttprepo,
153 b'static-http': statichttprepo,
140 }
154 }
141
155
142
156
143 def _peerlookup(path):
157 def _peerlookup(path):
144 u = urlutil.url(path)
158 u = urlutil.url(path)
145 scheme = u.scheme or b'file'
159 scheme = u.scheme or b'file'
146 thing = schemes.get(scheme) or schemes[b'file']
160 thing = schemes.get(scheme) or schemes[b'file']
147 try:
148 return thing(path)
149 except TypeError:
150 # we can't test callable(thing) because 'thing' can be an unloaded
151 # module that implements __call__
152 if not util.safehasattr(thing, b'instance'):
153 raise
154 return thing
161 return thing
155
162
156
163
157 def islocal(repo):
164 def islocal(repo):
158 '''return true if repo (or path pointing to repo) is local'''
165 '''return true if repo (or path pointing to repo) is local'''
159 if isinstance(repo, bytes):
166 if isinstance(repo, bytes):
160 try:
167 try:
161 return _peerlookup(repo).islocal(repo)
168 return _peerlookup(repo).islocal(repo)
162 except AttributeError:
169 except AttributeError:
163 return False
170 return False
164 return repo.local()
171 return repo.local()
165
172
166
173
167 def openpath(ui, path, sendaccept=True):
174 def openpath(ui, path, sendaccept=True):
168 '''open path with open if local, url.open if remote'''
175 '''open path with open if local, url.open if remote'''
169 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
176 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
170 if pathurl.islocal():
177 if pathurl.islocal():
171 return util.posixfile(pathurl.localpath(), b'rb')
178 return util.posixfile(pathurl.localpath(), b'rb')
172 else:
179 else:
173 return url.open(ui, path, sendaccept=sendaccept)
180 return url.open(ui, path, sendaccept=sendaccept)
174
181
175
182
176 # a list of (ui, repo) functions called for wire peer initialization
183 # a list of (ui, repo) functions called for wire peer initialization
177 wirepeersetupfuncs = []
184 wirepeersetupfuncs = []
178
185
179
186
180 def _peerorrepo(
187 def _peerorrepo(
181 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
188 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
182 ):
189 ):
183 """return a repository object for the specified path"""
190 """return a repository object for the specified path"""
184 cls = _peerlookup(path)
191 cls = _peerlookup(path)
185 obj = cls.instance(ui, path, create, intents=intents, createopts=createopts)
192 obj = cls.instance(ui, path, create, intents=intents, createopts=createopts)
186 _setup_repo_or_peer(ui, obj, presetupfuncs)
193 _setup_repo_or_peer(ui, obj, presetupfuncs)
187 return obj
194 return obj
188
195
189
196
190 def _setup_repo_or_peer(ui, obj, presetupfuncs=None):
197 def _setup_repo_or_peer(ui, obj, presetupfuncs=None):
191 ui = getattr(obj, "ui", ui)
198 ui = getattr(obj, "ui", ui)
192 for f in presetupfuncs or []:
199 for f in presetupfuncs or []:
193 f(ui, obj)
200 f(ui, obj)
194 ui.log(b'extension', b'- executing reposetup hooks\n')
201 ui.log(b'extension', b'- executing reposetup hooks\n')
195 with util.timedcm('all reposetup') as allreposetupstats:
202 with util.timedcm('all reposetup') as allreposetupstats:
196 for name, module in extensions.extensions(ui):
203 for name, module in extensions.extensions(ui):
197 ui.log(b'extension', b' - running reposetup for %s\n', name)
204 ui.log(b'extension', b' - running reposetup for %s\n', name)
198 hook = getattr(module, 'reposetup', None)
205 hook = getattr(module, 'reposetup', None)
199 if hook:
206 if hook:
200 with util.timedcm('reposetup %r', name) as stats:
207 with util.timedcm('reposetup %r', name) as stats:
201 hook(ui, obj)
208 hook(ui, obj)
202 msg = b' > reposetup for %s took %s\n'
209 msg = b' > reposetup for %s took %s\n'
203 ui.log(b'extension', msg, name, stats)
210 ui.log(b'extension', msg, name, stats)
204 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
211 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
205 if not obj.local():
212 if not obj.local():
206 for f in wirepeersetupfuncs:
213 for f in wirepeersetupfuncs:
207 f(ui, obj)
214 f(ui, obj)
208
215
209
216
210 def repository(
217 def repository(
211 ui,
218 ui,
212 path=b'',
219 path=b'',
213 create=False,
220 create=False,
214 presetupfuncs=None,
221 presetupfuncs=None,
215 intents=None,
222 intents=None,
216 createopts=None,
223 createopts=None,
217 ):
224 ):
218 """return a repository object for the specified path"""
225 """return a repository object for the specified path"""
219 peer = _peerorrepo(
226 peer = _peerorrepo(
220 ui,
227 ui,
221 path,
228 path,
222 create,
229 create,
223 presetupfuncs=presetupfuncs,
230 presetupfuncs=presetupfuncs,
224 intents=intents,
231 intents=intents,
225 createopts=createopts,
232 createopts=createopts,
226 )
233 )
227 repo = peer.local()
234 repo = peer.local()
228 if not repo:
235 if not repo:
229 raise error.Abort(
236 raise error.Abort(
230 _(b"repository '%s' is not local") % (path or peer.url())
237 _(b"repository '%s' is not local") % (path or peer.url())
231 )
238 )
232 return repo.filtered(b'visible')
239 return repo.filtered(b'visible')
233
240
234
241
235 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
242 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
236 '''return a repository peer for the specified path'''
243 '''return a repository peer for the specified path'''
237 rui = remoteui(uiorrepo, opts)
244 rui = remoteui(uiorrepo, opts)
238 return _peerorrepo(
245 return _peerorrepo(
239 rui, path, create, intents=intents, createopts=createopts
246 rui, path, create, intents=intents, createopts=createopts
240 ).peer()
247 ).peer()
241
248
242
249
243 def defaultdest(source):
250 def defaultdest(source):
244 """return default destination of clone if none is given
251 """return default destination of clone if none is given
245
252
246 >>> defaultdest(b'foo')
253 >>> defaultdest(b'foo')
247 'foo'
254 'foo'
248 >>> defaultdest(b'/foo/bar')
255 >>> defaultdest(b'/foo/bar')
249 'bar'
256 'bar'
250 >>> defaultdest(b'/')
257 >>> defaultdest(b'/')
251 ''
258 ''
252 >>> defaultdest(b'')
259 >>> defaultdest(b'')
253 ''
260 ''
254 >>> defaultdest(b'http://example.org/')
261 >>> defaultdest(b'http://example.org/')
255 ''
262 ''
256 >>> defaultdest(b'http://example.org/foo/')
263 >>> defaultdest(b'http://example.org/foo/')
257 'foo'
264 'foo'
258 """
265 """
259 path = urlutil.url(source).path
266 path = urlutil.url(source).path
260 if not path:
267 if not path:
261 return b''
268 return b''
262 return os.path.basename(os.path.normpath(path))
269 return os.path.basename(os.path.normpath(path))
263
270
264
271
265 def sharedreposource(repo):
272 def sharedreposource(repo):
266 """Returns repository object for source repository of a shared repo.
273 """Returns repository object for source repository of a shared repo.
267
274
268 If repo is not a shared repository, returns None.
275 If repo is not a shared repository, returns None.
269 """
276 """
270 if repo.sharedpath == repo.path:
277 if repo.sharedpath == repo.path:
271 return None
278 return None
272
279
273 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
280 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
274 return repo.srcrepo
281 return repo.srcrepo
275
282
276 # the sharedpath always ends in the .hg; we want the path to the repo
283 # the sharedpath always ends in the .hg; we want the path to the repo
277 source = repo.vfs.split(repo.sharedpath)[0]
284 source = repo.vfs.split(repo.sharedpath)[0]
278 srcurl, branches = urlutil.parseurl(source)
285 srcurl, branches = urlutil.parseurl(source)
279 srcrepo = repository(repo.ui, srcurl)
286 srcrepo = repository(repo.ui, srcurl)
280 repo.srcrepo = srcrepo
287 repo.srcrepo = srcrepo
281 return srcrepo
288 return srcrepo
282
289
283
290
284 def share(
291 def share(
285 ui,
292 ui,
286 source,
293 source,
287 dest=None,
294 dest=None,
288 update=True,
295 update=True,
289 bookmarks=True,
296 bookmarks=True,
290 defaultpath=None,
297 defaultpath=None,
291 relative=False,
298 relative=False,
292 ):
299 ):
293 '''create a shared repository'''
300 '''create a shared repository'''
294
301
295 if not islocal(source):
302 if not islocal(source):
296 raise error.Abort(_(b'can only share local repositories'))
303 raise error.Abort(_(b'can only share local repositories'))
297
304
298 if not dest:
305 if not dest:
299 dest = defaultdest(source)
306 dest = defaultdest(source)
300 else:
307 else:
301 dest = urlutil.get_clone_path(ui, dest)[1]
308 dest = urlutil.get_clone_path(ui, dest)[1]
302
309
303 if isinstance(source, bytes):
310 if isinstance(source, bytes):
304 origsource, source, branches = urlutil.get_clone_path(ui, source)
311 origsource, source, branches = urlutil.get_clone_path(ui, source)
305 srcrepo = repository(ui, source)
312 srcrepo = repository(ui, source)
306 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
313 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
307 else:
314 else:
308 srcrepo = source.local()
315 srcrepo = source.local()
309 checkout = None
316 checkout = None
310
317
311 shareditems = set()
318 shareditems = set()
312 if bookmarks:
319 if bookmarks:
313 shareditems.add(sharedbookmarks)
320 shareditems.add(sharedbookmarks)
314
321
315 r = repository(
322 r = repository(
316 ui,
323 ui,
317 dest,
324 dest,
318 create=True,
325 create=True,
319 createopts={
326 createopts={
320 b'sharedrepo': srcrepo,
327 b'sharedrepo': srcrepo,
321 b'sharedrelative': relative,
328 b'sharedrelative': relative,
322 b'shareditems': shareditems,
329 b'shareditems': shareditems,
323 },
330 },
324 )
331 )
325
332
326 postshare(srcrepo, r, defaultpath=defaultpath)
333 postshare(srcrepo, r, defaultpath=defaultpath)
327 r = repository(ui, dest)
334 r = repository(ui, dest)
328 _postshareupdate(r, update, checkout=checkout)
335 _postshareupdate(r, update, checkout=checkout)
329 return r
336 return r
330
337
331
338
332 def _prependsourcehgrc(repo):
339 def _prependsourcehgrc(repo):
333 """copies the source repo config and prepend it in current repo .hg/hgrc
340 """copies the source repo config and prepend it in current repo .hg/hgrc
334 on unshare. This is only done if the share was perfomed using share safe
341 on unshare. This is only done if the share was perfomed using share safe
335 method where we share config of source in shares"""
342 method where we share config of source in shares"""
336 srcvfs = vfsmod.vfs(repo.sharedpath)
343 srcvfs = vfsmod.vfs(repo.sharedpath)
337 dstvfs = vfsmod.vfs(repo.path)
344 dstvfs = vfsmod.vfs(repo.path)
338
345
339 if not srcvfs.exists(b'hgrc'):
346 if not srcvfs.exists(b'hgrc'):
340 return
347 return
341
348
342 currentconfig = b''
349 currentconfig = b''
343 if dstvfs.exists(b'hgrc'):
350 if dstvfs.exists(b'hgrc'):
344 currentconfig = dstvfs.read(b'hgrc')
351 currentconfig = dstvfs.read(b'hgrc')
345
352
346 with dstvfs(b'hgrc', b'wb') as fp:
353 with dstvfs(b'hgrc', b'wb') as fp:
347 sourceconfig = srcvfs.read(b'hgrc')
354 sourceconfig = srcvfs.read(b'hgrc')
348 fp.write(b"# Config copied from shared source\n")
355 fp.write(b"# Config copied from shared source\n")
349 fp.write(sourceconfig)
356 fp.write(sourceconfig)
350 fp.write(b'\n')
357 fp.write(b'\n')
351 fp.write(currentconfig)
358 fp.write(currentconfig)
352
359
353
360
354 def unshare(ui, repo):
361 def unshare(ui, repo):
355 """convert a shared repository to a normal one
362 """convert a shared repository to a normal one
356
363
357 Copy the store data to the repo and remove the sharedpath data.
364 Copy the store data to the repo and remove the sharedpath data.
358
365
359 Returns a new repository object representing the unshared repository.
366 Returns a new repository object representing the unshared repository.
360
367
361 The passed repository object is not usable after this function is
368 The passed repository object is not usable after this function is
362 called.
369 called.
363 """
370 """
364
371
365 with repo.lock():
372 with repo.lock():
366 # we use locks here because if we race with commit, we
373 # we use locks here because if we race with commit, we
367 # can end up with extra data in the cloned revlogs that's
374 # can end up with extra data in the cloned revlogs that's
368 # not pointed to by changesets, thus causing verify to
375 # not pointed to by changesets, thus causing verify to
369 # fail
376 # fail
370 destlock = copystore(ui, repo, repo.path)
377 destlock = copystore(ui, repo, repo.path)
371 with destlock or util.nullcontextmanager():
378 with destlock or util.nullcontextmanager():
372 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
379 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
373 # we were sharing .hg/hgrc of the share source with the current
380 # we were sharing .hg/hgrc of the share source with the current
374 # repo. We need to copy that while unsharing otherwise it can
381 # repo. We need to copy that while unsharing otherwise it can
375 # disable hooks and other checks
382 # disable hooks and other checks
376 _prependsourcehgrc(repo)
383 _prependsourcehgrc(repo)
377
384
378 sharefile = repo.vfs.join(b'sharedpath')
385 sharefile = repo.vfs.join(b'sharedpath')
379 util.rename(sharefile, sharefile + b'.old')
386 util.rename(sharefile, sharefile + b'.old')
380
387
381 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
388 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
382 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
389 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
383 scmutil.writereporequirements(repo)
390 scmutil.writereporequirements(repo)
384
391
385 # Removing share changes some fundamental properties of the repo instance.
392 # Removing share changes some fundamental properties of the repo instance.
386 # So we instantiate a new repo object and operate on it rather than
393 # So we instantiate a new repo object and operate on it rather than
387 # try to keep the existing repo usable.
394 # try to keep the existing repo usable.
388 newrepo = repository(repo.baseui, repo.root, create=False)
395 newrepo = repository(repo.baseui, repo.root, create=False)
389
396
390 # TODO: figure out how to access subrepos that exist, but were previously
397 # TODO: figure out how to access subrepos that exist, but were previously
391 # removed from .hgsub
398 # removed from .hgsub
392 c = newrepo[b'.']
399 c = newrepo[b'.']
393 subs = c.substate
400 subs = c.substate
394 for s in sorted(subs):
401 for s in sorted(subs):
395 c.sub(s).unshare()
402 c.sub(s).unshare()
396
403
397 localrepo.poisonrepository(repo)
404 localrepo.poisonrepository(repo)
398
405
399 return newrepo
406 return newrepo
400
407
401
408
402 def postshare(sourcerepo, destrepo, defaultpath=None):
409 def postshare(sourcerepo, destrepo, defaultpath=None):
403 """Called after a new shared repo is created.
410 """Called after a new shared repo is created.
404
411
405 The new repo only has a requirements file and pointer to the source.
412 The new repo only has a requirements file and pointer to the source.
406 This function configures additional shared data.
413 This function configures additional shared data.
407
414
408 Extensions can wrap this function and write additional entries to
415 Extensions can wrap this function and write additional entries to
409 destrepo/.hg/shared to indicate additional pieces of data to be shared.
416 destrepo/.hg/shared to indicate additional pieces of data to be shared.
410 """
417 """
411 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
418 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
412 if default:
419 if default:
413 template = b'[paths]\ndefault = %s\n'
420 template = b'[paths]\ndefault = %s\n'
414 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
421 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
415 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
422 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
416 with destrepo.wlock():
423 with destrepo.wlock():
417 narrowspec.copytoworkingcopy(destrepo)
424 narrowspec.copytoworkingcopy(destrepo)
418
425
419
426
420 def _postshareupdate(repo, update, checkout=None):
427 def _postshareupdate(repo, update, checkout=None):
421 """Maybe perform a working directory update after a shared repo is created.
428 """Maybe perform a working directory update after a shared repo is created.
422
429
423 ``update`` can be a boolean or a revision to update to.
430 ``update`` can be a boolean or a revision to update to.
424 """
431 """
425 if not update:
432 if not update:
426 return
433 return
427
434
428 repo.ui.status(_(b"updating working directory\n"))
435 repo.ui.status(_(b"updating working directory\n"))
429 if update is not True:
436 if update is not True:
430 checkout = update
437 checkout = update
431 for test in (checkout, b'default', b'tip'):
438 for test in (checkout, b'default', b'tip'):
432 if test is None:
439 if test is None:
433 continue
440 continue
434 try:
441 try:
435 uprev = repo.lookup(test)
442 uprev = repo.lookup(test)
436 break
443 break
437 except error.RepoLookupError:
444 except error.RepoLookupError:
438 continue
445 continue
439 _update(repo, uprev)
446 _update(repo, uprev)
440
447
441
448
442 def copystore(ui, srcrepo, destpath):
449 def copystore(ui, srcrepo, destpath):
443 """copy files from store of srcrepo in destpath
450 """copy files from store of srcrepo in destpath
444
451
445 returns destlock
452 returns destlock
446 """
453 """
447 destlock = None
454 destlock = None
448 try:
455 try:
449 hardlink = None
456 hardlink = None
450 topic = _(b'linking') if hardlink else _(b'copying')
457 topic = _(b'linking') if hardlink else _(b'copying')
451 with ui.makeprogress(topic, unit=_(b'files')) as progress:
458 with ui.makeprogress(topic, unit=_(b'files')) as progress:
452 num = 0
459 num = 0
453 srcpublishing = srcrepo.publishing()
460 srcpublishing = srcrepo.publishing()
454 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
461 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
455 dstvfs = vfsmod.vfs(destpath)
462 dstvfs = vfsmod.vfs(destpath)
456 for f in srcrepo.store.copylist():
463 for f in srcrepo.store.copylist():
457 if srcpublishing and f.endswith(b'phaseroots'):
464 if srcpublishing and f.endswith(b'phaseroots'):
458 continue
465 continue
459 dstbase = os.path.dirname(f)
466 dstbase = os.path.dirname(f)
460 if dstbase and not dstvfs.exists(dstbase):
467 if dstbase and not dstvfs.exists(dstbase):
461 dstvfs.mkdir(dstbase)
468 dstvfs.mkdir(dstbase)
462 if srcvfs.exists(f):
469 if srcvfs.exists(f):
463 if f.endswith(b'data'):
470 if f.endswith(b'data'):
464 # 'dstbase' may be empty (e.g. revlog format 0)
471 # 'dstbase' may be empty (e.g. revlog format 0)
465 lockfile = os.path.join(dstbase, b"lock")
472 lockfile = os.path.join(dstbase, b"lock")
466 # lock to avoid premature writing to the target
473 # lock to avoid premature writing to the target
467 destlock = lock.lock(dstvfs, lockfile)
474 destlock = lock.lock(dstvfs, lockfile)
468 hardlink, n = util.copyfiles(
475 hardlink, n = util.copyfiles(
469 srcvfs.join(f), dstvfs.join(f), hardlink, progress
476 srcvfs.join(f), dstvfs.join(f), hardlink, progress
470 )
477 )
471 num += n
478 num += n
472 if hardlink:
479 if hardlink:
473 ui.debug(b"linked %d files\n" % num)
480 ui.debug(b"linked %d files\n" % num)
474 else:
481 else:
475 ui.debug(b"copied %d files\n" % num)
482 ui.debug(b"copied %d files\n" % num)
476 return destlock
483 return destlock
477 except: # re-raises
484 except: # re-raises
478 release(destlock)
485 release(destlock)
479 raise
486 raise
480
487
481
488
482 def clonewithshare(
489 def clonewithshare(
483 ui,
490 ui,
484 peeropts,
491 peeropts,
485 sharepath,
492 sharepath,
486 source,
493 source,
487 srcpeer,
494 srcpeer,
488 dest,
495 dest,
489 pull=False,
496 pull=False,
490 rev=None,
497 rev=None,
491 update=True,
498 update=True,
492 stream=False,
499 stream=False,
493 ):
500 ):
494 """Perform a clone using a shared repo.
501 """Perform a clone using a shared repo.
495
502
496 The store for the repository will be located at <sharepath>/.hg. The
503 The store for the repository will be located at <sharepath>/.hg. The
497 specified revisions will be cloned or pulled from "source". A shared repo
504 specified revisions will be cloned or pulled from "source". A shared repo
498 will be created at "dest" and a working copy will be created if "update" is
505 will be created at "dest" and a working copy will be created if "update" is
499 True.
506 True.
500 """
507 """
501 revs = None
508 revs = None
502 if rev:
509 if rev:
503 if not srcpeer.capable(b'lookup'):
510 if not srcpeer.capable(b'lookup'):
504 raise error.Abort(
511 raise error.Abort(
505 _(
512 _(
506 b"src repository does not support "
513 b"src repository does not support "
507 b"revision lookup and so doesn't "
514 b"revision lookup and so doesn't "
508 b"support clone by revision"
515 b"support clone by revision"
509 )
516 )
510 )
517 )
511
518
512 # TODO this is batchable.
519 # TODO this is batchable.
513 remoterevs = []
520 remoterevs = []
514 for r in rev:
521 for r in rev:
515 with srcpeer.commandexecutor() as e:
522 with srcpeer.commandexecutor() as e:
516 remoterevs.append(
523 remoterevs.append(
517 e.callcommand(
524 e.callcommand(
518 b'lookup',
525 b'lookup',
519 {
526 {
520 b'key': r,
527 b'key': r,
521 },
528 },
522 ).result()
529 ).result()
523 )
530 )
524 revs = remoterevs
531 revs = remoterevs
525
532
526 # Obtain a lock before checking for or cloning the pooled repo otherwise
533 # Obtain a lock before checking for or cloning the pooled repo otherwise
527 # 2 clients may race creating or populating it.
534 # 2 clients may race creating or populating it.
528 pooldir = os.path.dirname(sharepath)
535 pooldir = os.path.dirname(sharepath)
529 # lock class requires the directory to exist.
536 # lock class requires the directory to exist.
530 try:
537 try:
531 util.makedir(pooldir, False)
538 util.makedir(pooldir, False)
532 except FileExistsError:
539 except FileExistsError:
533 pass
540 pass
534
541
535 poolvfs = vfsmod.vfs(pooldir)
542 poolvfs = vfsmod.vfs(pooldir)
536 basename = os.path.basename(sharepath)
543 basename = os.path.basename(sharepath)
537
544
538 with lock.lock(poolvfs, b'%s.lock' % basename):
545 with lock.lock(poolvfs, b'%s.lock' % basename):
539 if os.path.exists(sharepath):
546 if os.path.exists(sharepath):
540 ui.status(
547 ui.status(
541 _(b'(sharing from existing pooled repository %s)\n') % basename
548 _(b'(sharing from existing pooled repository %s)\n') % basename
542 )
549 )
543 else:
550 else:
544 ui.status(
551 ui.status(
545 _(b'(sharing from new pooled repository %s)\n') % basename
552 _(b'(sharing from new pooled repository %s)\n') % basename
546 )
553 )
547 # Always use pull mode because hardlinks in share mode don't work
554 # Always use pull mode because hardlinks in share mode don't work
548 # well. Never update because working copies aren't necessary in
555 # well. Never update because working copies aren't necessary in
549 # share mode.
556 # share mode.
550 clone(
557 clone(
551 ui,
558 ui,
552 peeropts,
559 peeropts,
553 source,
560 source,
554 dest=sharepath,
561 dest=sharepath,
555 pull=True,
562 pull=True,
556 revs=rev,
563 revs=rev,
557 update=False,
564 update=False,
558 stream=stream,
565 stream=stream,
559 )
566 )
560
567
561 # Resolve the value to put in [paths] section for the source.
568 # Resolve the value to put in [paths] section for the source.
562 if islocal(source):
569 if islocal(source):
563 defaultpath = util.abspath(urlutil.urllocalpath(source))
570 defaultpath = util.abspath(urlutil.urllocalpath(source))
564 else:
571 else:
565 defaultpath = source
572 defaultpath = source
566
573
567 sharerepo = repository(ui, path=sharepath)
574 sharerepo = repository(ui, path=sharepath)
568 destrepo = share(
575 destrepo = share(
569 ui,
576 ui,
570 sharerepo,
577 sharerepo,
571 dest=dest,
578 dest=dest,
572 update=False,
579 update=False,
573 bookmarks=False,
580 bookmarks=False,
574 defaultpath=defaultpath,
581 defaultpath=defaultpath,
575 )
582 )
576
583
577 # We need to perform a pull against the dest repo to fetch bookmarks
584 # We need to perform a pull against the dest repo to fetch bookmarks
578 # and other non-store data that isn't shared by default. In the case of
585 # and other non-store data that isn't shared by default. In the case of
579 # non-existing shared repo, this means we pull from the remote twice. This
586 # non-existing shared repo, this means we pull from the remote twice. This
580 # is a bit weird. But at the time it was implemented, there wasn't an easy
587 # is a bit weird. But at the time it was implemented, there wasn't an easy
581 # way to pull just non-changegroup data.
588 # way to pull just non-changegroup data.
582 exchange.pull(destrepo, srcpeer, heads=revs)
589 exchange.pull(destrepo, srcpeer, heads=revs)
583
590
584 _postshareupdate(destrepo, update)
591 _postshareupdate(destrepo, update)
585
592
586 return srcpeer, peer(ui, peeropts, dest)
593 return srcpeer, peer(ui, peeropts, dest)
587
594
588
595
589 # Recomputing caches is often slow on big repos, so copy them.
596 # Recomputing caches is often slow on big repos, so copy them.
590 def _copycache(srcrepo, dstcachedir, fname):
597 def _copycache(srcrepo, dstcachedir, fname):
591 """copy a cache from srcrepo to destcachedir (if it exists)"""
598 """copy a cache from srcrepo to destcachedir (if it exists)"""
592 srcfname = srcrepo.cachevfs.join(fname)
599 srcfname = srcrepo.cachevfs.join(fname)
593 dstfname = os.path.join(dstcachedir, fname)
600 dstfname = os.path.join(dstcachedir, fname)
594 if os.path.exists(srcfname):
601 if os.path.exists(srcfname):
595 if not os.path.exists(dstcachedir):
602 if not os.path.exists(dstcachedir):
596 os.mkdir(dstcachedir)
603 os.mkdir(dstcachedir)
597 util.copyfile(srcfname, dstfname)
604 util.copyfile(srcfname, dstfname)
598
605
599
606
600 def clone(
607 def clone(
601 ui,
608 ui,
602 peeropts,
609 peeropts,
603 source,
610 source,
604 dest=None,
611 dest=None,
605 pull=False,
612 pull=False,
606 revs=None,
613 revs=None,
607 update=True,
614 update=True,
608 stream=False,
615 stream=False,
609 branch=None,
616 branch=None,
610 shareopts=None,
617 shareopts=None,
611 storeincludepats=None,
618 storeincludepats=None,
612 storeexcludepats=None,
619 storeexcludepats=None,
613 depth=None,
620 depth=None,
614 ):
621 ):
615 """Make a copy of an existing repository.
622 """Make a copy of an existing repository.
616
623
617 Create a copy of an existing repository in a new directory. The
624 Create a copy of an existing repository in a new directory. The
618 source and destination are URLs, as passed to the repository
625 source and destination are URLs, as passed to the repository
619 function. Returns a pair of repository peers, the source and
626 function. Returns a pair of repository peers, the source and
620 newly created destination.
627 newly created destination.
621
628
622 The location of the source is added to the new repository's
629 The location of the source is added to the new repository's
623 .hg/hgrc file, as the default to be used for future pulls and
630 .hg/hgrc file, as the default to be used for future pulls and
624 pushes.
631 pushes.
625
632
626 If an exception is raised, the partly cloned/updated destination
633 If an exception is raised, the partly cloned/updated destination
627 repository will be deleted.
634 repository will be deleted.
628
635
629 Arguments:
636 Arguments:
630
637
631 source: repository object or URL
638 source: repository object or URL
632
639
633 dest: URL of destination repository to create (defaults to base
640 dest: URL of destination repository to create (defaults to base
634 name of source repository)
641 name of source repository)
635
642
636 pull: always pull from source repository, even in local case or if the
643 pull: always pull from source repository, even in local case or if the
637 server prefers streaming
644 server prefers streaming
638
645
639 stream: stream raw data uncompressed from repository (fast over
646 stream: stream raw data uncompressed from repository (fast over
640 LAN, slow over WAN)
647 LAN, slow over WAN)
641
648
642 revs: revision to clone up to (implies pull=True)
649 revs: revision to clone up to (implies pull=True)
643
650
644 update: update working directory after clone completes, if
651 update: update working directory after clone completes, if
645 destination is local repository (True means update to default rev,
652 destination is local repository (True means update to default rev,
646 anything else is treated as a revision)
653 anything else is treated as a revision)
647
654
648 branch: branches to clone
655 branch: branches to clone
649
656
650 shareopts: dict of options to control auto sharing behavior. The "pool" key
657 shareopts: dict of options to control auto sharing behavior. The "pool" key
651 activates auto sharing mode and defines the directory for stores. The
658 activates auto sharing mode and defines the directory for stores. The
652 "mode" key determines how to construct the directory name of the shared
659 "mode" key determines how to construct the directory name of the shared
653 repository. "identity" means the name is derived from the node of the first
660 repository. "identity" means the name is derived from the node of the first
654 changeset in the repository. "remote" means the name is derived from the
661 changeset in the repository. "remote" means the name is derived from the
655 remote's path/URL. Defaults to "identity."
662 remote's path/URL. Defaults to "identity."
656
663
657 storeincludepats and storeexcludepats: sets of file patterns to include and
664 storeincludepats and storeexcludepats: sets of file patterns to include and
658 exclude in the repository copy, respectively. If not defined, all files
665 exclude in the repository copy, respectively. If not defined, all files
659 will be included (a "full" clone). Otherwise a "narrow" clone containing
666 will be included (a "full" clone). Otherwise a "narrow" clone containing
660 only the requested files will be performed. If ``storeincludepats`` is not
667 only the requested files will be performed. If ``storeincludepats`` is not
661 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
668 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
662 ``path:.``. If both are empty sets, no files will be cloned.
669 ``path:.``. If both are empty sets, no files will be cloned.
663 """
670 """
664
671
665 if isinstance(source, bytes):
672 if isinstance(source, bytes):
666 src = urlutil.get_clone_path(ui, source, branch)
673 src = urlutil.get_clone_path(ui, source, branch)
667 origsource, source, branches = src
674 origsource, source, branches = src
668 srcpeer = peer(ui, peeropts, source)
675 srcpeer = peer(ui, peeropts, source)
669 else:
676 else:
670 srcpeer = source.peer() # in case we were called with a localrepo
677 srcpeer = source.peer() # in case we were called with a localrepo
671 branches = (None, branch or [])
678 branches = (None, branch or [])
672 origsource = source = srcpeer.url()
679 origsource = source = srcpeer.url()
673 srclock = destlock = destwlock = cleandir = None
680 srclock = destlock = destwlock = cleandir = None
674 destpeer = None
681 destpeer = None
675 try:
682 try:
676 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
683 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
677
684
678 if dest is None:
685 if dest is None:
679 dest = defaultdest(source)
686 dest = defaultdest(source)
680 if dest:
687 if dest:
681 ui.status(_(b"destination directory: %s\n") % dest)
688 ui.status(_(b"destination directory: %s\n") % dest)
682 else:
689 else:
683 dest = urlutil.get_clone_path(ui, dest)[0]
690 dest = urlutil.get_clone_path(ui, dest)[0]
684
691
685 dest = urlutil.urllocalpath(dest)
692 dest = urlutil.urllocalpath(dest)
686 source = urlutil.urllocalpath(source)
693 source = urlutil.urllocalpath(source)
687
694
688 if not dest:
695 if not dest:
689 raise error.InputError(_(b"empty destination path is not valid"))
696 raise error.InputError(_(b"empty destination path is not valid"))
690
697
691 destvfs = vfsmod.vfs(dest, expandpath=True)
698 destvfs = vfsmod.vfs(dest, expandpath=True)
692 if destvfs.lexists():
699 if destvfs.lexists():
693 if not destvfs.isdir():
700 if not destvfs.isdir():
694 raise error.InputError(
701 raise error.InputError(
695 _(b"destination '%s' already exists") % dest
702 _(b"destination '%s' already exists") % dest
696 )
703 )
697 elif destvfs.listdir():
704 elif destvfs.listdir():
698 raise error.InputError(
705 raise error.InputError(
699 _(b"destination '%s' is not empty") % dest
706 _(b"destination '%s' is not empty") % dest
700 )
707 )
701
708
702 createopts = {}
709 createopts = {}
703 narrow = False
710 narrow = False
704
711
705 if storeincludepats is not None:
712 if storeincludepats is not None:
706 narrowspec.validatepatterns(storeincludepats)
713 narrowspec.validatepatterns(storeincludepats)
707 narrow = True
714 narrow = True
708
715
709 if storeexcludepats is not None:
716 if storeexcludepats is not None:
710 narrowspec.validatepatterns(storeexcludepats)
717 narrowspec.validatepatterns(storeexcludepats)
711 narrow = True
718 narrow = True
712
719
713 if narrow:
720 if narrow:
714 # Include everything by default if only exclusion patterns defined.
721 # Include everything by default if only exclusion patterns defined.
715 if storeexcludepats and not storeincludepats:
722 if storeexcludepats and not storeincludepats:
716 storeincludepats = {b'path:.'}
723 storeincludepats = {b'path:.'}
717
724
718 createopts[b'narrowfiles'] = True
725 createopts[b'narrowfiles'] = True
719
726
720 if depth:
727 if depth:
721 createopts[b'shallowfilestore'] = True
728 createopts[b'shallowfilestore'] = True
722
729
723 if srcpeer.capable(b'lfs-serve'):
730 if srcpeer.capable(b'lfs-serve'):
724 # Repository creation honors the config if it disabled the extension, so
731 # Repository creation honors the config if it disabled the extension, so
725 # we can't just announce that lfs will be enabled. This check avoids
732 # we can't just announce that lfs will be enabled. This check avoids
726 # saying that lfs will be enabled, and then saying it's an unknown
733 # saying that lfs will be enabled, and then saying it's an unknown
727 # feature. The lfs creation option is set in either case so that a
734 # feature. The lfs creation option is set in either case so that a
728 # requirement is added. If the extension is explicitly disabled but the
735 # requirement is added. If the extension is explicitly disabled but the
729 # requirement is set, the clone aborts early, before transferring any
736 # requirement is set, the clone aborts early, before transferring any
730 # data.
737 # data.
731 createopts[b'lfs'] = True
738 createopts[b'lfs'] = True
732
739
733 if extensions.disabled_help(b'lfs'):
740 if extensions.disabled_help(b'lfs'):
734 ui.status(
741 ui.status(
735 _(
742 _(
736 b'(remote is using large file support (lfs), but it is '
743 b'(remote is using large file support (lfs), but it is '
737 b'explicitly disabled in the local configuration)\n'
744 b'explicitly disabled in the local configuration)\n'
738 )
745 )
739 )
746 )
740 else:
747 else:
741 ui.status(
748 ui.status(
742 _(
749 _(
743 b'(remote is using large file support (lfs); lfs will '
750 b'(remote is using large file support (lfs); lfs will '
744 b'be enabled for this repository)\n'
751 b'be enabled for this repository)\n'
745 )
752 )
746 )
753 )
747
754
748 shareopts = shareopts or {}
755 shareopts = shareopts or {}
749 sharepool = shareopts.get(b'pool')
756 sharepool = shareopts.get(b'pool')
750 sharenamemode = shareopts.get(b'mode')
757 sharenamemode = shareopts.get(b'mode')
751 if sharepool and islocal(dest):
758 if sharepool and islocal(dest):
752 sharepath = None
759 sharepath = None
753 if sharenamemode == b'identity':
760 if sharenamemode == b'identity':
754 # Resolve the name from the initial changeset in the remote
761 # Resolve the name from the initial changeset in the remote
755 # repository. This returns nullid when the remote is empty. It
762 # repository. This returns nullid when the remote is empty. It
756 # raises RepoLookupError if revision 0 is filtered or otherwise
763 # raises RepoLookupError if revision 0 is filtered or otherwise
757 # not available. If we fail to resolve, sharing is not enabled.
764 # not available. If we fail to resolve, sharing is not enabled.
758 try:
765 try:
759 with srcpeer.commandexecutor() as e:
766 with srcpeer.commandexecutor() as e:
760 rootnode = e.callcommand(
767 rootnode = e.callcommand(
761 b'lookup',
768 b'lookup',
762 {
769 {
763 b'key': b'0',
770 b'key': b'0',
764 },
771 },
765 ).result()
772 ).result()
766
773
767 if rootnode != sha1nodeconstants.nullid:
774 if rootnode != sha1nodeconstants.nullid:
768 sharepath = os.path.join(sharepool, hex(rootnode))
775 sharepath = os.path.join(sharepool, hex(rootnode))
769 else:
776 else:
770 ui.status(
777 ui.status(
771 _(
778 _(
772 b'(not using pooled storage: '
779 b'(not using pooled storage: '
773 b'remote appears to be empty)\n'
780 b'remote appears to be empty)\n'
774 )
781 )
775 )
782 )
776 except error.RepoLookupError:
783 except error.RepoLookupError:
777 ui.status(
784 ui.status(
778 _(
785 _(
779 b'(not using pooled storage: '
786 b'(not using pooled storage: '
780 b'unable to resolve identity of remote)\n'
787 b'unable to resolve identity of remote)\n'
781 )
788 )
782 )
789 )
783 elif sharenamemode == b'remote':
790 elif sharenamemode == b'remote':
784 sharepath = os.path.join(
791 sharepath = os.path.join(
785 sharepool, hex(hashutil.sha1(source).digest())
792 sharepool, hex(hashutil.sha1(source).digest())
786 )
793 )
787 else:
794 else:
788 raise error.Abort(
795 raise error.Abort(
789 _(b'unknown share naming mode: %s') % sharenamemode
796 _(b'unknown share naming mode: %s') % sharenamemode
790 )
797 )
791
798
792 # TODO this is a somewhat arbitrary restriction.
799 # TODO this is a somewhat arbitrary restriction.
793 if narrow:
800 if narrow:
794 ui.status(
801 ui.status(
795 _(b'(pooled storage not supported for narrow clones)\n')
802 _(b'(pooled storage not supported for narrow clones)\n')
796 )
803 )
797 sharepath = None
804 sharepath = None
798
805
799 if sharepath:
806 if sharepath:
800 return clonewithshare(
807 return clonewithshare(
801 ui,
808 ui,
802 peeropts,
809 peeropts,
803 sharepath,
810 sharepath,
804 source,
811 source,
805 srcpeer,
812 srcpeer,
806 dest,
813 dest,
807 pull=pull,
814 pull=pull,
808 rev=revs,
815 rev=revs,
809 update=update,
816 update=update,
810 stream=stream,
817 stream=stream,
811 )
818 )
812
819
813 srcrepo = srcpeer.local()
820 srcrepo = srcpeer.local()
814
821
815 abspath = origsource
822 abspath = origsource
816 if islocal(origsource):
823 if islocal(origsource):
817 abspath = util.abspath(urlutil.urllocalpath(origsource))
824 abspath = util.abspath(urlutil.urllocalpath(origsource))
818
825
819 if islocal(dest):
826 if islocal(dest):
820 if os.path.exists(dest):
827 if os.path.exists(dest):
821 # only clean up directories we create ourselves
828 # only clean up directories we create ourselves
822 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
829 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
823 cleandir = hgdir
830 cleandir = hgdir
824 else:
831 else:
825 cleandir = dest
832 cleandir = dest
826
833
827 copy = False
834 copy = False
828 if (
835 if (
829 srcrepo
836 srcrepo
830 and srcrepo.cancopy()
837 and srcrepo.cancopy()
831 and islocal(dest)
838 and islocal(dest)
832 and not phases.hassecret(srcrepo)
839 and not phases.hassecret(srcrepo)
833 ):
840 ):
834 copy = not pull and not revs
841 copy = not pull and not revs
835
842
836 # TODO this is a somewhat arbitrary restriction.
843 # TODO this is a somewhat arbitrary restriction.
837 if narrow:
844 if narrow:
838 copy = False
845 copy = False
839
846
840 if copy:
847 if copy:
841 try:
848 try:
842 # we use a lock here because if we race with commit, we
849 # we use a lock here because if we race with commit, we
843 # can end up with extra data in the cloned revlogs that's
850 # can end up with extra data in the cloned revlogs that's
844 # not pointed to by changesets, thus causing verify to
851 # not pointed to by changesets, thus causing verify to
845 # fail
852 # fail
846 srclock = srcrepo.lock(wait=False)
853 srclock = srcrepo.lock(wait=False)
847 except error.LockError:
854 except error.LockError:
848 copy = False
855 copy = False
849
856
850 if copy:
857 if copy:
851 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
858 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
852
859
853 destrootpath = urlutil.urllocalpath(dest)
860 destrootpath = urlutil.urllocalpath(dest)
854 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
861 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
855 localrepo.createrepository(
862 localrepo.createrepository(
856 ui,
863 ui,
857 destrootpath,
864 destrootpath,
858 requirements=dest_reqs,
865 requirements=dest_reqs,
859 )
866 )
860 destrepo = localrepo.makelocalrepository(ui, destrootpath)
867 destrepo = localrepo.makelocalrepository(ui, destrootpath)
861
868
862 destwlock = destrepo.wlock()
869 destwlock = destrepo.wlock()
863 destlock = destrepo.lock()
870 destlock = destrepo.lock()
864 from . import streamclone # avoid cycle
871 from . import streamclone # avoid cycle
865
872
866 streamclone.local_copy(srcrepo, destrepo)
873 streamclone.local_copy(srcrepo, destrepo)
867
874
868 # we need to re-init the repo after manually copying the data
875 # we need to re-init the repo after manually copying the data
869 # into it
876 # into it
870 destpeer = peer(srcrepo, peeropts, dest)
877 destpeer = peer(srcrepo, peeropts, dest)
871
878
872 # make the peer aware that is it already locked
879 # make the peer aware that is it already locked
873 #
880 #
874 # important:
881 # important:
875 #
882 #
876 # We still need to release that lock at the end of the function
883 # We still need to release that lock at the end of the function
877 destpeer.local()._lockref = weakref.ref(destlock)
884 destpeer.local()._lockref = weakref.ref(destlock)
878 destpeer.local()._wlockref = weakref.ref(destwlock)
885 destpeer.local()._wlockref = weakref.ref(destwlock)
879 # dirstate also needs to be copied because `_wlockref` has a reference
886 # dirstate also needs to be copied because `_wlockref` has a reference
880 # to it: this dirstate is saved to disk when the wlock is released
887 # to it: this dirstate is saved to disk when the wlock is released
881 destpeer.local().dirstate = destrepo.dirstate
888 destpeer.local().dirstate = destrepo.dirstate
882
889
883 srcrepo.hook(
890 srcrepo.hook(
884 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
891 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
885 )
892 )
886 else:
893 else:
887 try:
894 try:
888 # only pass ui when no srcrepo
895 # only pass ui when no srcrepo
889 destpeer = peer(
896 destpeer = peer(
890 srcrepo or ui,
897 srcrepo or ui,
891 peeropts,
898 peeropts,
892 dest,
899 dest,
893 create=True,
900 create=True,
894 createopts=createopts,
901 createopts=createopts,
895 )
902 )
896 except FileExistsError:
903 except FileExistsError:
897 cleandir = None
904 cleandir = None
898 raise error.Abort(_(b"destination '%s' already exists") % dest)
905 raise error.Abort(_(b"destination '%s' already exists") % dest)
899
906
900 if revs:
907 if revs:
901 if not srcpeer.capable(b'lookup'):
908 if not srcpeer.capable(b'lookup'):
902 raise error.Abort(
909 raise error.Abort(
903 _(
910 _(
904 b"src repository does not support "
911 b"src repository does not support "
905 b"revision lookup and so doesn't "
912 b"revision lookup and so doesn't "
906 b"support clone by revision"
913 b"support clone by revision"
907 )
914 )
908 )
915 )
909
916
910 # TODO this is batchable.
917 # TODO this is batchable.
911 remoterevs = []
918 remoterevs = []
912 for rev in revs:
919 for rev in revs:
913 with srcpeer.commandexecutor() as e:
920 with srcpeer.commandexecutor() as e:
914 remoterevs.append(
921 remoterevs.append(
915 e.callcommand(
922 e.callcommand(
916 b'lookup',
923 b'lookup',
917 {
924 {
918 b'key': rev,
925 b'key': rev,
919 },
926 },
920 ).result()
927 ).result()
921 )
928 )
922 revs = remoterevs
929 revs = remoterevs
923
930
924 checkout = revs[0]
931 checkout = revs[0]
925 else:
932 else:
926 revs = None
933 revs = None
927 local = destpeer.local()
934 local = destpeer.local()
928 if local:
935 if local:
929 if narrow:
936 if narrow:
930 with local.wlock(), local.lock():
937 with local.wlock(), local.lock():
931 local.setnarrowpats(storeincludepats, storeexcludepats)
938 local.setnarrowpats(storeincludepats, storeexcludepats)
932 narrowspec.copytoworkingcopy(local)
939 narrowspec.copytoworkingcopy(local)
933
940
934 u = urlutil.url(abspath)
941 u = urlutil.url(abspath)
935 defaulturl = bytes(u)
942 defaulturl = bytes(u)
936 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
943 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
937 if not stream:
944 if not stream:
938 if pull:
945 if pull:
939 stream = False
946 stream = False
940 else:
947 else:
941 stream = None
948 stream = None
942 # internal config: ui.quietbookmarkmove
949 # internal config: ui.quietbookmarkmove
943 overrides = {(b'ui', b'quietbookmarkmove'): True}
950 overrides = {(b'ui', b'quietbookmarkmove'): True}
944 with local.ui.configoverride(overrides, b'clone'):
951 with local.ui.configoverride(overrides, b'clone'):
945 exchange.pull(
952 exchange.pull(
946 local,
953 local,
947 srcpeer,
954 srcpeer,
948 heads=revs,
955 heads=revs,
949 streamclonerequested=stream,
956 streamclonerequested=stream,
950 includepats=storeincludepats,
957 includepats=storeincludepats,
951 excludepats=storeexcludepats,
958 excludepats=storeexcludepats,
952 depth=depth,
959 depth=depth,
953 )
960 )
954 elif srcrepo:
961 elif srcrepo:
955 # TODO lift restriction once exchange.push() accepts narrow
962 # TODO lift restriction once exchange.push() accepts narrow
956 # push.
963 # push.
957 if narrow:
964 if narrow:
958 raise error.Abort(
965 raise error.Abort(
959 _(
966 _(
960 b'narrow clone not available for '
967 b'narrow clone not available for '
961 b'remote destinations'
968 b'remote destinations'
962 )
969 )
963 )
970 )
964
971
965 exchange.push(
972 exchange.push(
966 srcrepo,
973 srcrepo,
967 destpeer,
974 destpeer,
968 revs=revs,
975 revs=revs,
969 bookmarks=srcrepo._bookmarks.keys(),
976 bookmarks=srcrepo._bookmarks.keys(),
970 )
977 )
971 else:
978 else:
972 raise error.Abort(
979 raise error.Abort(
973 _(b"clone from remote to remote not supported")
980 _(b"clone from remote to remote not supported")
974 )
981 )
975
982
976 cleandir = None
983 cleandir = None
977
984
978 destrepo = destpeer.local()
985 destrepo = destpeer.local()
979 if destrepo:
986 if destrepo:
980 template = uimod.samplehgrcs[b'cloned']
987 template = uimod.samplehgrcs[b'cloned']
981 u = urlutil.url(abspath)
988 u = urlutil.url(abspath)
982 u.passwd = None
989 u.passwd = None
983 defaulturl = bytes(u)
990 defaulturl = bytes(u)
984 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
991 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
985 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
992 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
986
993
987 if ui.configbool(b'experimental', b'remotenames'):
994 if ui.configbool(b'experimental', b'remotenames'):
988 logexchange.pullremotenames(destrepo, srcpeer)
995 logexchange.pullremotenames(destrepo, srcpeer)
989
996
990 if update:
997 if update:
991 if update is not True:
998 if update is not True:
992 with srcpeer.commandexecutor() as e:
999 with srcpeer.commandexecutor() as e:
993 checkout = e.callcommand(
1000 checkout = e.callcommand(
994 b'lookup',
1001 b'lookup',
995 {
1002 {
996 b'key': update,
1003 b'key': update,
997 },
1004 },
998 ).result()
1005 ).result()
999
1006
1000 uprev = None
1007 uprev = None
1001 status = None
1008 status = None
1002 if checkout is not None:
1009 if checkout is not None:
1003 # Some extensions (at least hg-git and hg-subversion) have
1010 # Some extensions (at least hg-git and hg-subversion) have
1004 # a peer.lookup() implementation that returns a name instead
1011 # a peer.lookup() implementation that returns a name instead
1005 # of a nodeid. We work around it here until we've figured
1012 # of a nodeid. We work around it here until we've figured
1006 # out a better solution.
1013 # out a better solution.
1007 if len(checkout) == 20 and checkout in destrepo:
1014 if len(checkout) == 20 and checkout in destrepo:
1008 uprev = checkout
1015 uprev = checkout
1009 elif scmutil.isrevsymbol(destrepo, checkout):
1016 elif scmutil.isrevsymbol(destrepo, checkout):
1010 uprev = scmutil.revsymbol(destrepo, checkout).node()
1017 uprev = scmutil.revsymbol(destrepo, checkout).node()
1011 else:
1018 else:
1012 if update is not True:
1019 if update is not True:
1013 try:
1020 try:
1014 uprev = destrepo.lookup(update)
1021 uprev = destrepo.lookup(update)
1015 except error.RepoLookupError:
1022 except error.RepoLookupError:
1016 pass
1023 pass
1017 if uprev is None:
1024 if uprev is None:
1018 try:
1025 try:
1019 if destrepo._activebookmark:
1026 if destrepo._activebookmark:
1020 uprev = destrepo.lookup(destrepo._activebookmark)
1027 uprev = destrepo.lookup(destrepo._activebookmark)
1021 update = destrepo._activebookmark
1028 update = destrepo._activebookmark
1022 else:
1029 else:
1023 uprev = destrepo._bookmarks[b'@']
1030 uprev = destrepo._bookmarks[b'@']
1024 update = b'@'
1031 update = b'@'
1025 bn = destrepo[uprev].branch()
1032 bn = destrepo[uprev].branch()
1026 if bn == b'default':
1033 if bn == b'default':
1027 status = _(b"updating to bookmark %s\n" % update)
1034 status = _(b"updating to bookmark %s\n" % update)
1028 else:
1035 else:
1029 status = (
1036 status = (
1030 _(b"updating to bookmark %s on branch %s\n")
1037 _(b"updating to bookmark %s on branch %s\n")
1031 ) % (update, bn)
1038 ) % (update, bn)
1032 except KeyError:
1039 except KeyError:
1033 try:
1040 try:
1034 uprev = destrepo.branchtip(b'default')
1041 uprev = destrepo.branchtip(b'default')
1035 except error.RepoLookupError:
1042 except error.RepoLookupError:
1036 uprev = destrepo.lookup(b'tip')
1043 uprev = destrepo.lookup(b'tip')
1037 if not status:
1044 if not status:
1038 bn = destrepo[uprev].branch()
1045 bn = destrepo[uprev].branch()
1039 status = _(b"updating to branch %s\n") % bn
1046 status = _(b"updating to branch %s\n") % bn
1040 destrepo.ui.status(status)
1047 destrepo.ui.status(status)
1041 _update(destrepo, uprev)
1048 _update(destrepo, uprev)
1042 if update in destrepo._bookmarks:
1049 if update in destrepo._bookmarks:
1043 bookmarks.activate(destrepo, update)
1050 bookmarks.activate(destrepo, update)
1044 if destlock is not None:
1051 if destlock is not None:
1045 release(destlock)
1052 release(destlock)
1046 if destwlock is not None:
1053 if destwlock is not None:
1047 release(destlock)
1054 release(destlock)
1048 # here is a tiny windows were someone could end up writing the
1055 # here is a tiny windows were someone could end up writing the
1049 # repository before the cache are sure to be warm. This is "fine"
1056 # repository before the cache are sure to be warm. This is "fine"
1050 # as the only "bad" outcome would be some slowness. That potential
1057 # as the only "bad" outcome would be some slowness. That potential
1051 # slowness already affect reader.
1058 # slowness already affect reader.
1052 with destrepo.lock():
1059 with destrepo.lock():
1053 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1060 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1054 finally:
1061 finally:
1055 release(srclock, destlock, destwlock)
1062 release(srclock, destlock, destwlock)
1056 if cleandir is not None:
1063 if cleandir is not None:
1057 shutil.rmtree(cleandir, True)
1064 shutil.rmtree(cleandir, True)
1058 if srcpeer is not None:
1065 if srcpeer is not None:
1059 srcpeer.close()
1066 srcpeer.close()
1060 if destpeer and destpeer.local() is None:
1067 if destpeer and destpeer.local() is None:
1061 destpeer.close()
1068 destpeer.close()
1062 return srcpeer, destpeer
1069 return srcpeer, destpeer
1063
1070
1064
1071
1065 def _showstats(repo, stats, quietempty=False):
1072 def _showstats(repo, stats, quietempty=False):
1066 if quietempty and stats.isempty():
1073 if quietempty and stats.isempty():
1067 return
1074 return
1068 repo.ui.status(
1075 repo.ui.status(
1069 _(
1076 _(
1070 b"%d files updated, %d files merged, "
1077 b"%d files updated, %d files merged, "
1071 b"%d files removed, %d files unresolved\n"
1078 b"%d files removed, %d files unresolved\n"
1072 )
1079 )
1073 % (
1080 % (
1074 stats.updatedcount,
1081 stats.updatedcount,
1075 stats.mergedcount,
1082 stats.mergedcount,
1076 stats.removedcount,
1083 stats.removedcount,
1077 stats.unresolvedcount,
1084 stats.unresolvedcount,
1078 )
1085 )
1079 )
1086 )
1080
1087
1081
1088
1082 def updaterepo(repo, node, overwrite, updatecheck=None):
1089 def updaterepo(repo, node, overwrite, updatecheck=None):
1083 """Update the working directory to node.
1090 """Update the working directory to node.
1084
1091
1085 When overwrite is set, changes are clobbered, merged else
1092 When overwrite is set, changes are clobbered, merged else
1086
1093
1087 returns stats (see pydoc mercurial.merge.applyupdates)"""
1094 returns stats (see pydoc mercurial.merge.applyupdates)"""
1088 repo.ui.deprecwarn(
1095 repo.ui.deprecwarn(
1089 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1096 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1090 b'5.7',
1097 b'5.7',
1091 )
1098 )
1092 return mergemod._update(
1099 return mergemod._update(
1093 repo,
1100 repo,
1094 node,
1101 node,
1095 branchmerge=False,
1102 branchmerge=False,
1096 force=overwrite,
1103 force=overwrite,
1097 labels=[b'working copy', b'destination'],
1104 labels=[b'working copy', b'destination'],
1098 updatecheck=updatecheck,
1105 updatecheck=updatecheck,
1099 )
1106 )
1100
1107
1101
1108
1102 def update(repo, node, quietempty=False, updatecheck=None):
1109 def update(repo, node, quietempty=False, updatecheck=None):
1103 """update the working directory to node"""
1110 """update the working directory to node"""
1104 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1111 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1105 _showstats(repo, stats, quietempty)
1112 _showstats(repo, stats, quietempty)
1106 if stats.unresolvedcount:
1113 if stats.unresolvedcount:
1107 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1114 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1108 return stats.unresolvedcount > 0
1115 return stats.unresolvedcount > 0
1109
1116
1110
1117
1111 # naming conflict in clone()
1118 # naming conflict in clone()
1112 _update = update
1119 _update = update
1113
1120
1114
1121
1115 def clean(repo, node, show_stats=True, quietempty=False):
1122 def clean(repo, node, show_stats=True, quietempty=False):
1116 """forcibly switch the working directory to node, clobbering changes"""
1123 """forcibly switch the working directory to node, clobbering changes"""
1117 stats = mergemod.clean_update(repo[node])
1124 stats = mergemod.clean_update(repo[node])
1118 assert stats.unresolvedcount == 0
1125 assert stats.unresolvedcount == 0
1119 if show_stats:
1126 if show_stats:
1120 _showstats(repo, stats, quietempty)
1127 _showstats(repo, stats, quietempty)
1121 return False
1128 return False
1122
1129
1123
1130
1124 # naming conflict in updatetotally()
1131 # naming conflict in updatetotally()
1125 _clean = clean
1132 _clean = clean
1126
1133
1127 _VALID_UPDATECHECKS = {
1134 _VALID_UPDATECHECKS = {
1128 mergemod.UPDATECHECK_ABORT,
1135 mergemod.UPDATECHECK_ABORT,
1129 mergemod.UPDATECHECK_NONE,
1136 mergemod.UPDATECHECK_NONE,
1130 mergemod.UPDATECHECK_LINEAR,
1137 mergemod.UPDATECHECK_LINEAR,
1131 mergemod.UPDATECHECK_NO_CONFLICT,
1138 mergemod.UPDATECHECK_NO_CONFLICT,
1132 }
1139 }
1133
1140
1134
1141
1135 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1142 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1136 """Update the working directory with extra care for non-file components
1143 """Update the working directory with extra care for non-file components
1137
1144
1138 This takes care of non-file components below:
1145 This takes care of non-file components below:
1139
1146
1140 :bookmark: might be advanced or (in)activated
1147 :bookmark: might be advanced or (in)activated
1141
1148
1142 This takes arguments below:
1149 This takes arguments below:
1143
1150
1144 :checkout: to which revision the working directory is updated
1151 :checkout: to which revision the working directory is updated
1145 :brev: a name, which might be a bookmark to be activated after updating
1152 :brev: a name, which might be a bookmark to be activated after updating
1146 :clean: whether changes in the working directory can be discarded
1153 :clean: whether changes in the working directory can be discarded
1147 :updatecheck: how to deal with a dirty working directory
1154 :updatecheck: how to deal with a dirty working directory
1148
1155
1149 Valid values for updatecheck are the UPDATECHECK_* constants
1156 Valid values for updatecheck are the UPDATECHECK_* constants
1150 defined in the merge module. Passing `None` will result in using the
1157 defined in the merge module. Passing `None` will result in using the
1151 configured default.
1158 configured default.
1152
1159
1153 * ABORT: abort if the working directory is dirty
1160 * ABORT: abort if the working directory is dirty
1154 * NONE: don't check (merge working directory changes into destination)
1161 * NONE: don't check (merge working directory changes into destination)
1155 * LINEAR: check that update is linear before merging working directory
1162 * LINEAR: check that update is linear before merging working directory
1156 changes into destination
1163 changes into destination
1157 * NO_CONFLICT: check that the update does not result in file merges
1164 * NO_CONFLICT: check that the update does not result in file merges
1158
1165
1159 This returns whether conflict is detected at updating or not.
1166 This returns whether conflict is detected at updating or not.
1160 """
1167 """
1161 if updatecheck is None:
1168 if updatecheck is None:
1162 updatecheck = ui.config(b'commands', b'update.check')
1169 updatecheck = ui.config(b'commands', b'update.check')
1163 if updatecheck not in _VALID_UPDATECHECKS:
1170 if updatecheck not in _VALID_UPDATECHECKS:
1164 # If not configured, or invalid value configured
1171 # If not configured, or invalid value configured
1165 updatecheck = mergemod.UPDATECHECK_LINEAR
1172 updatecheck = mergemod.UPDATECHECK_LINEAR
1166 if updatecheck not in _VALID_UPDATECHECKS:
1173 if updatecheck not in _VALID_UPDATECHECKS:
1167 raise ValueError(
1174 raise ValueError(
1168 r'Invalid updatecheck value %r (can accept %r)'
1175 r'Invalid updatecheck value %r (can accept %r)'
1169 % (updatecheck, _VALID_UPDATECHECKS)
1176 % (updatecheck, _VALID_UPDATECHECKS)
1170 )
1177 )
1171 with repo.wlock():
1178 with repo.wlock():
1172 movemarkfrom = None
1179 movemarkfrom = None
1173 warndest = False
1180 warndest = False
1174 if checkout is None:
1181 if checkout is None:
1175 updata = destutil.destupdate(repo, clean=clean)
1182 updata = destutil.destupdate(repo, clean=clean)
1176 checkout, movemarkfrom, brev = updata
1183 checkout, movemarkfrom, brev = updata
1177 warndest = True
1184 warndest = True
1178
1185
1179 if clean:
1186 if clean:
1180 ret = _clean(repo, checkout)
1187 ret = _clean(repo, checkout)
1181 else:
1188 else:
1182 if updatecheck == mergemod.UPDATECHECK_ABORT:
1189 if updatecheck == mergemod.UPDATECHECK_ABORT:
1183 cmdutil.bailifchanged(repo, merge=False)
1190 cmdutil.bailifchanged(repo, merge=False)
1184 updatecheck = mergemod.UPDATECHECK_NONE
1191 updatecheck = mergemod.UPDATECHECK_NONE
1185 ret = _update(repo, checkout, updatecheck=updatecheck)
1192 ret = _update(repo, checkout, updatecheck=updatecheck)
1186
1193
1187 if not ret and movemarkfrom:
1194 if not ret and movemarkfrom:
1188 if movemarkfrom == repo[b'.'].node():
1195 if movemarkfrom == repo[b'.'].node():
1189 pass # no-op update
1196 pass # no-op update
1190 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1197 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1191 b = ui.label(repo._activebookmark, b'bookmarks.active')
1198 b = ui.label(repo._activebookmark, b'bookmarks.active')
1192 ui.status(_(b"updating bookmark %s\n") % b)
1199 ui.status(_(b"updating bookmark %s\n") % b)
1193 else:
1200 else:
1194 # this can happen with a non-linear update
1201 # this can happen with a non-linear update
1195 b = ui.label(repo._activebookmark, b'bookmarks')
1202 b = ui.label(repo._activebookmark, b'bookmarks')
1196 ui.status(_(b"(leaving bookmark %s)\n") % b)
1203 ui.status(_(b"(leaving bookmark %s)\n") % b)
1197 bookmarks.deactivate(repo)
1204 bookmarks.deactivate(repo)
1198 elif brev in repo._bookmarks:
1205 elif brev in repo._bookmarks:
1199 if brev != repo._activebookmark:
1206 if brev != repo._activebookmark:
1200 b = ui.label(brev, b'bookmarks.active')
1207 b = ui.label(brev, b'bookmarks.active')
1201 ui.status(_(b"(activating bookmark %s)\n") % b)
1208 ui.status(_(b"(activating bookmark %s)\n") % b)
1202 bookmarks.activate(repo, brev)
1209 bookmarks.activate(repo, brev)
1203 elif brev:
1210 elif brev:
1204 if repo._activebookmark:
1211 if repo._activebookmark:
1205 b = ui.label(repo._activebookmark, b'bookmarks')
1212 b = ui.label(repo._activebookmark, b'bookmarks')
1206 ui.status(_(b"(leaving bookmark %s)\n") % b)
1213 ui.status(_(b"(leaving bookmark %s)\n") % b)
1207 bookmarks.deactivate(repo)
1214 bookmarks.deactivate(repo)
1208
1215
1209 if warndest:
1216 if warndest:
1210 destutil.statusotherdests(ui, repo)
1217 destutil.statusotherdests(ui, repo)
1211
1218
1212 return ret
1219 return ret
1213
1220
1214
1221
1215 def merge(
1222 def merge(
1216 ctx,
1223 ctx,
1217 force=False,
1224 force=False,
1218 remind=True,
1225 remind=True,
1219 labels=None,
1226 labels=None,
1220 ):
1227 ):
1221 """Branch merge with node, resolving changes. Return true if any
1228 """Branch merge with node, resolving changes. Return true if any
1222 unresolved conflicts."""
1229 unresolved conflicts."""
1223 repo = ctx.repo()
1230 repo = ctx.repo()
1224 stats = mergemod.merge(ctx, force=force, labels=labels)
1231 stats = mergemod.merge(ctx, force=force, labels=labels)
1225 _showstats(repo, stats)
1232 _showstats(repo, stats)
1226 if stats.unresolvedcount:
1233 if stats.unresolvedcount:
1227 repo.ui.status(
1234 repo.ui.status(
1228 _(
1235 _(
1229 b"use 'hg resolve' to retry unresolved file merges "
1236 b"use 'hg resolve' to retry unresolved file merges "
1230 b"or 'hg merge --abort' to abandon\n"
1237 b"or 'hg merge --abort' to abandon\n"
1231 )
1238 )
1232 )
1239 )
1233 elif remind:
1240 elif remind:
1234 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1241 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1235 return stats.unresolvedcount > 0
1242 return stats.unresolvedcount > 0
1236
1243
1237
1244
1238 def abortmerge(ui, repo):
1245 def abortmerge(ui, repo):
1239 ms = mergestatemod.mergestate.read(repo)
1246 ms = mergestatemod.mergestate.read(repo)
1240 if ms.active():
1247 if ms.active():
1241 # there were conflicts
1248 # there were conflicts
1242 node = ms.localctx.hex()
1249 node = ms.localctx.hex()
1243 else:
1250 else:
1244 # there were no conficts, mergestate was not stored
1251 # there were no conficts, mergestate was not stored
1245 node = repo[b'.'].hex()
1252 node = repo[b'.'].hex()
1246
1253
1247 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1254 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1248 stats = mergemod.clean_update(repo[node])
1255 stats = mergemod.clean_update(repo[node])
1249 assert stats.unresolvedcount == 0
1256 assert stats.unresolvedcount == 0
1250 _showstats(repo, stats)
1257 _showstats(repo, stats)
1251
1258
1252
1259
1253 def _incoming(
1260 def _incoming(
1254 displaychlist,
1261 displaychlist,
1255 subreporecurse,
1262 subreporecurse,
1256 ui,
1263 ui,
1257 repo,
1264 repo,
1258 source,
1265 source,
1259 opts,
1266 opts,
1260 buffered=False,
1267 buffered=False,
1261 subpath=None,
1268 subpath=None,
1262 ):
1269 ):
1263 """
1270 """
1264 Helper for incoming / gincoming.
1271 Helper for incoming / gincoming.
1265 displaychlist gets called with
1272 displaychlist gets called with
1266 (remoterepo, incomingchangesetlist, displayer) parameters,
1273 (remoterepo, incomingchangesetlist, displayer) parameters,
1267 and is supposed to contain only code that can't be unified.
1274 and is supposed to contain only code that can't be unified.
1268 """
1275 """
1269 srcs = urlutil.get_pull_paths(repo, ui, [source])
1276 srcs = urlutil.get_pull_paths(repo, ui, [source])
1270 srcs = list(srcs)
1277 srcs = list(srcs)
1271 if len(srcs) != 1:
1278 if len(srcs) != 1:
1272 msg = _(b'for now, incoming supports only a single source, %d provided')
1279 msg = _(b'for now, incoming supports only a single source, %d provided')
1273 msg %= len(srcs)
1280 msg %= len(srcs)
1274 raise error.Abort(msg)
1281 raise error.Abort(msg)
1275 path = srcs[0]
1282 path = srcs[0]
1276 source, branches = urlutil.parseurl(path.rawloc, opts.get(b'branch'))
1283 source, branches = urlutil.parseurl(path.rawloc, opts.get(b'branch'))
1277 if subpath is not None:
1284 if subpath is not None:
1278 subpath = urlutil.url(subpath)
1285 subpath = urlutil.url(subpath)
1279 if subpath.isabs():
1286 if subpath.isabs():
1280 source = bytes(subpath)
1287 source = bytes(subpath)
1281 else:
1288 else:
1282 p = urlutil.url(source)
1289 p = urlutil.url(source)
1283 if p.islocal():
1290 if p.islocal():
1284 normpath = os.path.normpath
1291 normpath = os.path.normpath
1285 else:
1292 else:
1286 normpath = posixpath.normpath
1293 normpath = posixpath.normpath
1287 p.path = normpath(b'%s/%s' % (p.path, subpath))
1294 p.path = normpath(b'%s/%s' % (p.path, subpath))
1288 source = bytes(p)
1295 source = bytes(p)
1289 other = peer(repo, opts, source)
1296 other = peer(repo, opts, source)
1290 cleanupfn = other.close
1297 cleanupfn = other.close
1291 try:
1298 try:
1292 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source))
1299 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source))
1293 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1300 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1294
1301
1295 if revs:
1302 if revs:
1296 revs = [other.lookup(rev) for rev in revs]
1303 revs = [other.lookup(rev) for rev in revs]
1297 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1304 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1298 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1305 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1299 )
1306 )
1300
1307
1301 if not chlist:
1308 if not chlist:
1302 ui.status(_(b"no changes found\n"))
1309 ui.status(_(b"no changes found\n"))
1303 return subreporecurse()
1310 return subreporecurse()
1304 ui.pager(b'incoming')
1311 ui.pager(b'incoming')
1305 displayer = logcmdutil.changesetdisplayer(
1312 displayer = logcmdutil.changesetdisplayer(
1306 ui, other, opts, buffered=buffered
1313 ui, other, opts, buffered=buffered
1307 )
1314 )
1308 displaychlist(other, chlist, displayer)
1315 displaychlist(other, chlist, displayer)
1309 displayer.close()
1316 displayer.close()
1310 finally:
1317 finally:
1311 cleanupfn()
1318 cleanupfn()
1312 subreporecurse()
1319 subreporecurse()
1313 return 0 # exit code is zero since we found incoming changes
1320 return 0 # exit code is zero since we found incoming changes
1314
1321
1315
1322
1316 def incoming(ui, repo, source, opts, subpath=None):
1323 def incoming(ui, repo, source, opts, subpath=None):
1317 def subreporecurse():
1324 def subreporecurse():
1318 ret = 1
1325 ret = 1
1319 if opts.get(b'subrepos'):
1326 if opts.get(b'subrepos'):
1320 ctx = repo[None]
1327 ctx = repo[None]
1321 for subpath in sorted(ctx.substate):
1328 for subpath in sorted(ctx.substate):
1322 sub = ctx.sub(subpath)
1329 sub = ctx.sub(subpath)
1323 ret = min(ret, sub.incoming(ui, source, opts))
1330 ret = min(ret, sub.incoming(ui, source, opts))
1324 return ret
1331 return ret
1325
1332
1326 def display(other, chlist, displayer):
1333 def display(other, chlist, displayer):
1327 limit = logcmdutil.getlimit(opts)
1334 limit = logcmdutil.getlimit(opts)
1328 if opts.get(b'newest_first'):
1335 if opts.get(b'newest_first'):
1329 chlist.reverse()
1336 chlist.reverse()
1330 count = 0
1337 count = 0
1331 for n in chlist:
1338 for n in chlist:
1332 if limit is not None and count >= limit:
1339 if limit is not None and count >= limit:
1333 break
1340 break
1334 parents = [
1341 parents = [
1335 p for p in other.changelog.parents(n) if p != repo.nullid
1342 p for p in other.changelog.parents(n) if p != repo.nullid
1336 ]
1343 ]
1337 if opts.get(b'no_merges') and len(parents) == 2:
1344 if opts.get(b'no_merges') and len(parents) == 2:
1338 continue
1345 continue
1339 count += 1
1346 count += 1
1340 displayer.show(other[n])
1347 displayer.show(other[n])
1341
1348
1342 return _incoming(
1349 return _incoming(
1343 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1350 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1344 )
1351 )
1345
1352
1346
1353
1347 def _outgoing(ui, repo, dests, opts, subpath=None):
1354 def _outgoing(ui, repo, dests, opts, subpath=None):
1348 out = set()
1355 out = set()
1349 others = []
1356 others = []
1350 for path in urlutil.get_push_paths(repo, ui, dests):
1357 for path in urlutil.get_push_paths(repo, ui, dests):
1351 dest = path.pushloc or path.loc
1358 dest = path.pushloc or path.loc
1352 if subpath is not None:
1359 if subpath is not None:
1353 subpath = urlutil.url(subpath)
1360 subpath = urlutil.url(subpath)
1354 if subpath.isabs():
1361 if subpath.isabs():
1355 dest = bytes(subpath)
1362 dest = bytes(subpath)
1356 else:
1363 else:
1357 p = urlutil.url(dest)
1364 p = urlutil.url(dest)
1358 if p.islocal():
1365 if p.islocal():
1359 normpath = os.path.normpath
1366 normpath = os.path.normpath
1360 else:
1367 else:
1361 normpath = posixpath.normpath
1368 normpath = posixpath.normpath
1362 p.path = normpath(b'%s/%s' % (p.path, subpath))
1369 p.path = normpath(b'%s/%s' % (p.path, subpath))
1363 dest = bytes(p)
1370 dest = bytes(p)
1364 branches = path.branch, opts.get(b'branch') or []
1371 branches = path.branch, opts.get(b'branch') or []
1365
1372
1366 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1373 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1367 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1374 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1368 if revs:
1375 if revs:
1369 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1376 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1370
1377
1371 other = peer(repo, opts, dest)
1378 other = peer(repo, opts, dest)
1372 try:
1379 try:
1373 outgoing = discovery.findcommonoutgoing(
1380 outgoing = discovery.findcommonoutgoing(
1374 repo, other, revs, force=opts.get(b'force')
1381 repo, other, revs, force=opts.get(b'force')
1375 )
1382 )
1376 o = outgoing.missing
1383 o = outgoing.missing
1377 out.update(o)
1384 out.update(o)
1378 if not o:
1385 if not o:
1379 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1386 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1380 others.append(other)
1387 others.append(other)
1381 except: # re-raises
1388 except: # re-raises
1382 other.close()
1389 other.close()
1383 raise
1390 raise
1384 # make sure this is ordered by revision number
1391 # make sure this is ordered by revision number
1385 outgoing_revs = list(out)
1392 outgoing_revs = list(out)
1386 cl = repo.changelog
1393 cl = repo.changelog
1387 outgoing_revs.sort(key=cl.rev)
1394 outgoing_revs.sort(key=cl.rev)
1388 return outgoing_revs, others
1395 return outgoing_revs, others
1389
1396
1390
1397
1391 def _outgoing_recurse(ui, repo, dests, opts):
1398 def _outgoing_recurse(ui, repo, dests, opts):
1392 ret = 1
1399 ret = 1
1393 if opts.get(b'subrepos'):
1400 if opts.get(b'subrepos'):
1394 ctx = repo[None]
1401 ctx = repo[None]
1395 for subpath in sorted(ctx.substate):
1402 for subpath in sorted(ctx.substate):
1396 sub = ctx.sub(subpath)
1403 sub = ctx.sub(subpath)
1397 ret = min(ret, sub.outgoing(ui, dests, opts))
1404 ret = min(ret, sub.outgoing(ui, dests, opts))
1398 return ret
1405 return ret
1399
1406
1400
1407
1401 def _outgoing_filter(repo, revs, opts):
1408 def _outgoing_filter(repo, revs, opts):
1402 """apply revision filtering/ordering option for outgoing"""
1409 """apply revision filtering/ordering option for outgoing"""
1403 limit = logcmdutil.getlimit(opts)
1410 limit = logcmdutil.getlimit(opts)
1404 no_merges = opts.get(b'no_merges')
1411 no_merges = opts.get(b'no_merges')
1405 if opts.get(b'newest_first'):
1412 if opts.get(b'newest_first'):
1406 revs.reverse()
1413 revs.reverse()
1407 if limit is None and not no_merges:
1414 if limit is None and not no_merges:
1408 for r in revs:
1415 for r in revs:
1409 yield r
1416 yield r
1410 return
1417 return
1411
1418
1412 count = 0
1419 count = 0
1413 cl = repo.changelog
1420 cl = repo.changelog
1414 for n in revs:
1421 for n in revs:
1415 if limit is not None and count >= limit:
1422 if limit is not None and count >= limit:
1416 break
1423 break
1417 parents = [p for p in cl.parents(n) if p != repo.nullid]
1424 parents = [p for p in cl.parents(n) if p != repo.nullid]
1418 if no_merges and len(parents) == 2:
1425 if no_merges and len(parents) == 2:
1419 continue
1426 continue
1420 count += 1
1427 count += 1
1421 yield n
1428 yield n
1422
1429
1423
1430
1424 def outgoing(ui, repo, dests, opts, subpath=None):
1431 def outgoing(ui, repo, dests, opts, subpath=None):
1425 if opts.get(b'graph'):
1432 if opts.get(b'graph'):
1426 logcmdutil.checkunsupportedgraphflags([], opts)
1433 logcmdutil.checkunsupportedgraphflags([], opts)
1427 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1434 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1428 ret = 1
1435 ret = 1
1429 try:
1436 try:
1430 if o:
1437 if o:
1431 ret = 0
1438 ret = 0
1432
1439
1433 if opts.get(b'graph'):
1440 if opts.get(b'graph'):
1434 revdag = logcmdutil.graphrevs(repo, o, opts)
1441 revdag = logcmdutil.graphrevs(repo, o, opts)
1435 ui.pager(b'outgoing')
1442 ui.pager(b'outgoing')
1436 displayer = logcmdutil.changesetdisplayer(
1443 displayer = logcmdutil.changesetdisplayer(
1437 ui, repo, opts, buffered=True
1444 ui, repo, opts, buffered=True
1438 )
1445 )
1439 logcmdutil.displaygraph(
1446 logcmdutil.displaygraph(
1440 ui, repo, revdag, displayer, graphmod.asciiedges
1447 ui, repo, revdag, displayer, graphmod.asciiedges
1441 )
1448 )
1442 else:
1449 else:
1443 ui.pager(b'outgoing')
1450 ui.pager(b'outgoing')
1444 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1451 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1445 for n in _outgoing_filter(repo, o, opts):
1452 for n in _outgoing_filter(repo, o, opts):
1446 displayer.show(repo[n])
1453 displayer.show(repo[n])
1447 displayer.close()
1454 displayer.close()
1448 for oth in others:
1455 for oth in others:
1449 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1456 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1450 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1457 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1451 return ret # exit code is zero since we found outgoing changes
1458 return ret # exit code is zero since we found outgoing changes
1452 finally:
1459 finally:
1453 for oth in others:
1460 for oth in others:
1454 oth.close()
1461 oth.close()
1455
1462
1456
1463
1457 def verify(repo, level=None):
1464 def verify(repo, level=None):
1458 """verify the consistency of a repository"""
1465 """verify the consistency of a repository"""
1459 ret = verifymod.verify(repo, level=level)
1466 ret = verifymod.verify(repo, level=level)
1460
1467
1461 # Broken subrepo references in hidden csets don't seem worth worrying about,
1468 # Broken subrepo references in hidden csets don't seem worth worrying about,
1462 # since they can't be pushed/pulled, and --hidden can be used if they are a
1469 # since they can't be pushed/pulled, and --hidden can be used if they are a
1463 # concern.
1470 # concern.
1464
1471
1465 # pathto() is needed for -R case
1472 # pathto() is needed for -R case
1466 revs = repo.revs(
1473 revs = repo.revs(
1467 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1474 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1468 )
1475 )
1469
1476
1470 if revs:
1477 if revs:
1471 repo.ui.status(_(b'checking subrepo links\n'))
1478 repo.ui.status(_(b'checking subrepo links\n'))
1472 for rev in revs:
1479 for rev in revs:
1473 ctx = repo[rev]
1480 ctx = repo[rev]
1474 try:
1481 try:
1475 for subpath in ctx.substate:
1482 for subpath in ctx.substate:
1476 try:
1483 try:
1477 ret = (
1484 ret = (
1478 ctx.sub(subpath, allowcreate=False).verify() or ret
1485 ctx.sub(subpath, allowcreate=False).verify() or ret
1479 )
1486 )
1480 except error.RepoError as e:
1487 except error.RepoError as e:
1481 repo.ui.warn(b'%d: %s\n' % (rev, e))
1488 repo.ui.warn(b'%d: %s\n' % (rev, e))
1482 except Exception:
1489 except Exception:
1483 repo.ui.warn(
1490 repo.ui.warn(
1484 _(b'.hgsubstate is corrupt in revision %s\n')
1491 _(b'.hgsubstate is corrupt in revision %s\n')
1485 % short(ctx.node())
1492 % short(ctx.node())
1486 )
1493 )
1487
1494
1488 return ret
1495 return ret
1489
1496
1490
1497
1491 def remoteui(src, opts):
1498 def remoteui(src, opts):
1492 """build a remote ui from ui or repo and opts"""
1499 """build a remote ui from ui or repo and opts"""
1493 if util.safehasattr(src, b'baseui'): # looks like a repository
1500 if util.safehasattr(src, b'baseui'): # looks like a repository
1494 dst = src.baseui.copy() # drop repo-specific config
1501 dst = src.baseui.copy() # drop repo-specific config
1495 src = src.ui # copy target options from repo
1502 src = src.ui # copy target options from repo
1496 else: # assume it's a global ui object
1503 else: # assume it's a global ui object
1497 dst = src.copy() # keep all global options
1504 dst = src.copy() # keep all global options
1498
1505
1499 # copy ssh-specific options
1506 # copy ssh-specific options
1500 for o in b'ssh', b'remotecmd':
1507 for o in b'ssh', b'remotecmd':
1501 v = opts.get(o) or src.config(b'ui', o)
1508 v = opts.get(o) or src.config(b'ui', o)
1502 if v:
1509 if v:
1503 dst.setconfig(b"ui", o, v, b'copied')
1510 dst.setconfig(b"ui", o, v, b'copied')
1504
1511
1505 # copy bundle-specific options
1512 # copy bundle-specific options
1506 r = src.config(b'bundle', b'mainreporoot')
1513 r = src.config(b'bundle', b'mainreporoot')
1507 if r:
1514 if r:
1508 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1515 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1509
1516
1510 # copy selected local settings to the remote ui
1517 # copy selected local settings to the remote ui
1511 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1518 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1512 for key, val in src.configitems(sect):
1519 for key, val in src.configitems(sect):
1513 dst.setconfig(sect, key, val, b'copied')
1520 dst.setconfig(sect, key, val, b'copied')
1514 v = src.config(b'web', b'cacerts')
1521 v = src.config(b'web', b'cacerts')
1515 if v:
1522 if v:
1516 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1523 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1517
1524
1518 return dst
1525 return dst
1519
1526
1520
1527
1521 # Files of interest
1528 # Files of interest
1522 # Used to check if the repository has changed looking at mtime and size of
1529 # Used to check if the repository has changed looking at mtime and size of
1523 # these files.
1530 # these files.
1524 foi = [
1531 foi = [
1525 (b'spath', b'00changelog.i'),
1532 (b'spath', b'00changelog.i'),
1526 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1533 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1527 (b'spath', b'obsstore'),
1534 (b'spath', b'obsstore'),
1528 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1535 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1529 ]
1536 ]
1530
1537
1531
1538
1532 class cachedlocalrepo:
1539 class cachedlocalrepo:
1533 """Holds a localrepository that can be cached and reused."""
1540 """Holds a localrepository that can be cached and reused."""
1534
1541
1535 def __init__(self, repo):
1542 def __init__(self, repo):
1536 """Create a new cached repo from an existing repo.
1543 """Create a new cached repo from an existing repo.
1537
1544
1538 We assume the passed in repo was recently created. If the
1545 We assume the passed in repo was recently created. If the
1539 repo has changed between when it was created and when it was
1546 repo has changed between when it was created and when it was
1540 turned into a cache, it may not refresh properly.
1547 turned into a cache, it may not refresh properly.
1541 """
1548 """
1542 assert isinstance(repo, localrepo.localrepository)
1549 assert isinstance(repo, localrepo.localrepository)
1543 self._repo = repo
1550 self._repo = repo
1544 self._state, self.mtime = self._repostate()
1551 self._state, self.mtime = self._repostate()
1545 self._filtername = repo.filtername
1552 self._filtername = repo.filtername
1546
1553
1547 def fetch(self):
1554 def fetch(self):
1548 """Refresh (if necessary) and return a repository.
1555 """Refresh (if necessary) and return a repository.
1549
1556
1550 If the cached instance is out of date, it will be recreated
1557 If the cached instance is out of date, it will be recreated
1551 automatically and returned.
1558 automatically and returned.
1552
1559
1553 Returns a tuple of the repo and a boolean indicating whether a new
1560 Returns a tuple of the repo and a boolean indicating whether a new
1554 repo instance was created.
1561 repo instance was created.
1555 """
1562 """
1556 # We compare the mtimes and sizes of some well-known files to
1563 # We compare the mtimes and sizes of some well-known files to
1557 # determine if the repo changed. This is not precise, as mtimes
1564 # determine if the repo changed. This is not precise, as mtimes
1558 # are susceptible to clock skew and imprecise filesystems and
1565 # are susceptible to clock skew and imprecise filesystems and
1559 # file content can change while maintaining the same size.
1566 # file content can change while maintaining the same size.
1560
1567
1561 state, mtime = self._repostate()
1568 state, mtime = self._repostate()
1562 if state == self._state:
1569 if state == self._state:
1563 return self._repo, False
1570 return self._repo, False
1564
1571
1565 repo = repository(self._repo.baseui, self._repo.url())
1572 repo = repository(self._repo.baseui, self._repo.url())
1566 if self._filtername:
1573 if self._filtername:
1567 self._repo = repo.filtered(self._filtername)
1574 self._repo = repo.filtered(self._filtername)
1568 else:
1575 else:
1569 self._repo = repo.unfiltered()
1576 self._repo = repo.unfiltered()
1570 self._state = state
1577 self._state = state
1571 self.mtime = mtime
1578 self.mtime = mtime
1572
1579
1573 return self._repo, True
1580 return self._repo, True
1574
1581
1575 def _repostate(self):
1582 def _repostate(self):
1576 state = []
1583 state = []
1577 maxmtime = -1
1584 maxmtime = -1
1578 for attr, fname in foi:
1585 for attr, fname in foi:
1579 prefix = getattr(self._repo, attr)
1586 prefix = getattr(self._repo, attr)
1580 p = os.path.join(prefix, fname)
1587 p = os.path.join(prefix, fname)
1581 try:
1588 try:
1582 st = os.stat(p)
1589 st = os.stat(p)
1583 except OSError:
1590 except OSError:
1584 st = os.stat(prefix)
1591 st = os.stat(prefix)
1585 state.append((st[stat.ST_MTIME], st.st_size))
1592 state.append((st[stat.ST_MTIME], st.st_size))
1586 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1593 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1587
1594
1588 return tuple(state), maxmtime
1595 return tuple(state), maxmtime
1589
1596
1590 def copy(self):
1597 def copy(self):
1591 """Obtain a copy of this class instance.
1598 """Obtain a copy of this class instance.
1592
1599
1593 A new localrepository instance is obtained. The new instance should be
1600 A new localrepository instance is obtained. The new instance should be
1594 completely independent of the original.
1601 completely independent of the original.
1595 """
1602 """
1596 repo = repository(self._repo.baseui, self._repo.origroot)
1603 repo = repository(self._repo.baseui, self._repo.origroot)
1597 if self._filtername:
1604 if self._filtername:
1598 repo = repo.filtered(self._filtername)
1605 repo = repo.filtered(self._filtername)
1599 else:
1606 else:
1600 repo = repo.unfiltered()
1607 repo = repo.unfiltered()
1601 c = cachedlocalrepo(repo)
1608 c = cachedlocalrepo(repo)
1602 c._state = self._state
1609 c._state = self._state
1603 c.mtime = self.mtime
1610 c.mtime = self.mtime
1604 return c
1611 return c
General Comments 0
You need to be logged in to leave comments. Login now