##// END OF EJS Templates
peer-or-repo: move the object setup in its own function...
marmoute -
r50579:1e6c3736 default
parent child Browse files
Show More
@@ -1,1602 +1,1604
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9
9
10 import os
10 import os
11 import posixpath
11 import posixpath
12 import shutil
12 import shutil
13 import stat
13 import stat
14 import weakref
14 import weakref
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import (
17 from .node import (
18 hex,
18 hex,
19 sha1nodeconstants,
19 sha1nodeconstants,
20 short,
20 short,
21 )
21 )
22 from .pycompat import getattr
22 from .pycompat import getattr
23
23
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 bundlerepo,
26 bundlerepo,
27 cmdutil,
27 cmdutil,
28 destutil,
28 destutil,
29 discovery,
29 discovery,
30 error,
30 error,
31 exchange,
31 exchange,
32 extensions,
32 extensions,
33 graphmod,
33 graphmod,
34 httppeer,
34 httppeer,
35 localrepo,
35 localrepo,
36 lock,
36 lock,
37 logcmdutil,
37 logcmdutil,
38 logexchange,
38 logexchange,
39 merge as mergemod,
39 merge as mergemod,
40 mergestate as mergestatemod,
40 mergestate as mergestatemod,
41 narrowspec,
41 narrowspec,
42 phases,
42 phases,
43 requirements,
43 requirements,
44 scmutil,
44 scmutil,
45 sshpeer,
45 sshpeer,
46 statichttprepo,
46 statichttprepo,
47 ui as uimod,
47 ui as uimod,
48 unionrepo,
48 unionrepo,
49 url,
49 url,
50 util,
50 util,
51 verify as verifymod,
51 verify as verifymod,
52 vfs as vfsmod,
52 vfs as vfsmod,
53 )
53 )
54 from .interfaces import repository as repositorymod
54 from .interfaces import repository as repositorymod
55 from .utils import (
55 from .utils import (
56 hashutil,
56 hashutil,
57 stringutil,
57 stringutil,
58 urlutil,
58 urlutil,
59 )
59 )
60
60
61
61
62 release = lock.release
62 release = lock.release
63
63
64 # shared features
64 # shared features
65 sharedbookmarks = b'bookmarks'
65 sharedbookmarks = b'bookmarks'
66
66
67
67
68 def _local(path):
68 def _local(path):
69 path = util.expandpath(urlutil.urllocalpath(path))
69 path = util.expandpath(urlutil.urllocalpath(path))
70
70
71 try:
71 try:
72 # we use os.stat() directly here instead of os.path.isfile()
72 # we use os.stat() directly here instead of os.path.isfile()
73 # because the latter started returning `False` on invalid path
73 # because the latter started returning `False` on invalid path
74 # exceptions starting in 3.8 and we care about handling
74 # exceptions starting in 3.8 and we care about handling
75 # invalid paths specially here.
75 # invalid paths specially here.
76 st = os.stat(path)
76 st = os.stat(path)
77 isfile = stat.S_ISREG(st.st_mode)
77 isfile = stat.S_ISREG(st.st_mode)
78 except ValueError as e:
78 except ValueError as e:
79 raise error.Abort(
79 raise error.Abort(
80 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
80 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
81 )
81 )
82 except OSError:
82 except OSError:
83 isfile = False
83 isfile = False
84
84
85 return isfile and bundlerepo or localrepo
85 return isfile and bundlerepo or localrepo
86
86
87
87
88 def addbranchrevs(lrepo, other, branches, revs):
88 def addbranchrevs(lrepo, other, branches, revs):
89 peer = other.peer() # a courtesy to callers using a localrepo for other
89 peer = other.peer() # a courtesy to callers using a localrepo for other
90 hashbranch, branches = branches
90 hashbranch, branches = branches
91 if not hashbranch and not branches:
91 if not hashbranch and not branches:
92 x = revs or None
92 x = revs or None
93 if revs:
93 if revs:
94 y = revs[0]
94 y = revs[0]
95 else:
95 else:
96 y = None
96 y = None
97 return x, y
97 return x, y
98 if revs:
98 if revs:
99 revs = list(revs)
99 revs = list(revs)
100 else:
100 else:
101 revs = []
101 revs = []
102
102
103 if not peer.capable(b'branchmap'):
103 if not peer.capable(b'branchmap'):
104 if branches:
104 if branches:
105 raise error.Abort(_(b"remote branch lookup not supported"))
105 raise error.Abort(_(b"remote branch lookup not supported"))
106 revs.append(hashbranch)
106 revs.append(hashbranch)
107 return revs, revs[0]
107 return revs, revs[0]
108
108
109 with peer.commandexecutor() as e:
109 with peer.commandexecutor() as e:
110 branchmap = e.callcommand(b'branchmap', {}).result()
110 branchmap = e.callcommand(b'branchmap', {}).result()
111
111
112 def primary(branch):
112 def primary(branch):
113 if branch == b'.':
113 if branch == b'.':
114 if not lrepo:
114 if not lrepo:
115 raise error.Abort(_(b"dirstate branch not accessible"))
115 raise error.Abort(_(b"dirstate branch not accessible"))
116 branch = lrepo.dirstate.branch()
116 branch = lrepo.dirstate.branch()
117 if branch in branchmap:
117 if branch in branchmap:
118 revs.extend(hex(r) for r in reversed(branchmap[branch]))
118 revs.extend(hex(r) for r in reversed(branchmap[branch]))
119 return True
119 return True
120 else:
120 else:
121 return False
121 return False
122
122
123 for branch in branches:
123 for branch in branches:
124 if not primary(branch):
124 if not primary(branch):
125 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
125 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
126 if hashbranch:
126 if hashbranch:
127 if not primary(hashbranch):
127 if not primary(hashbranch):
128 revs.append(hashbranch)
128 revs.append(hashbranch)
129 return revs, revs[0]
129 return revs, revs[0]
130
130
131
131
132 schemes = {
132 schemes = {
133 b'bundle': bundlerepo,
133 b'bundle': bundlerepo,
134 b'union': unionrepo,
134 b'union': unionrepo,
135 b'file': _local,
135 b'file': _local,
136 b'http': httppeer,
136 b'http': httppeer,
137 b'https': httppeer,
137 b'https': httppeer,
138 b'ssh': sshpeer,
138 b'ssh': sshpeer,
139 b'static-http': statichttprepo,
139 b'static-http': statichttprepo,
140 }
140 }
141
141
142
142
143 def _peerlookup(path):
143 def _peerlookup(path):
144 u = urlutil.url(path)
144 u = urlutil.url(path)
145 scheme = u.scheme or b'file'
145 scheme = u.scheme or b'file'
146 thing = schemes.get(scheme) or schemes[b'file']
146 thing = schemes.get(scheme) or schemes[b'file']
147 try:
147 try:
148 return thing(path)
148 return thing(path)
149 except TypeError:
149 except TypeError:
150 # we can't test callable(thing) because 'thing' can be an unloaded
150 # we can't test callable(thing) because 'thing' can be an unloaded
151 # module that implements __call__
151 # module that implements __call__
152 if not util.safehasattr(thing, b'instance'):
152 if not util.safehasattr(thing, b'instance'):
153 raise
153 raise
154 return thing
154 return thing
155
155
156
156
157 def islocal(repo):
157 def islocal(repo):
158 '''return true if repo (or path pointing to repo) is local'''
158 '''return true if repo (or path pointing to repo) is local'''
159 if isinstance(repo, bytes):
159 if isinstance(repo, bytes):
160 try:
160 try:
161 return _peerlookup(repo).islocal(repo)
161 return _peerlookup(repo).islocal(repo)
162 except AttributeError:
162 except AttributeError:
163 return False
163 return False
164 return repo.local()
164 return repo.local()
165
165
166
166
167 def openpath(ui, path, sendaccept=True):
167 def openpath(ui, path, sendaccept=True):
168 '''open path with open if local, url.open if remote'''
168 '''open path with open if local, url.open if remote'''
169 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
169 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
170 if pathurl.islocal():
170 if pathurl.islocal():
171 return util.posixfile(pathurl.localpath(), b'rb')
171 return util.posixfile(pathurl.localpath(), b'rb')
172 else:
172 else:
173 return url.open(ui, path, sendaccept=sendaccept)
173 return url.open(ui, path, sendaccept=sendaccept)
174
174
175
175
176 # a list of (ui, repo) functions called for wire peer initialization
176 # a list of (ui, repo) functions called for wire peer initialization
177 wirepeersetupfuncs = []
177 wirepeersetupfuncs = []
178
178
179
179
180 def _peerorrepo(
180 def _peerorrepo(
181 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
181 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
182 ):
182 ):
183 """return a repository object for the specified path"""
183 """return a repository object for the specified path"""
184 obj = _peerlookup(path).instance(
184 cls = _peerlookup(path)
185 ui, path, create, intents=intents, createopts=createopts
185 obj = cls.instance(ui, path, create, intents=intents, createopts=createopts)
186 )
186 _setup_repo_or_peer(ui, obj, presetupfuncs)
187 return obj
188
189
190 def _setup_repo_or_peer(ui, obj, presetupfuncs=None):
187 ui = getattr(obj, "ui", ui)
191 ui = getattr(obj, "ui", ui)
188 for f in presetupfuncs or []:
192 for f in presetupfuncs or []:
189 f(ui, obj)
193 f(ui, obj)
190 ui.log(b'extension', b'- executing reposetup hooks\n')
194 ui.log(b'extension', b'- executing reposetup hooks\n')
191 with util.timedcm('all reposetup') as allreposetupstats:
195 with util.timedcm('all reposetup') as allreposetupstats:
192 for name, module in extensions.extensions(ui):
196 for name, module in extensions.extensions(ui):
193 ui.log(b'extension', b' - running reposetup for %s\n', name)
197 ui.log(b'extension', b' - running reposetup for %s\n', name)
194 hook = getattr(module, 'reposetup', None)
198 hook = getattr(module, 'reposetup', None)
195 if hook:
199 if hook:
196 with util.timedcm('reposetup %r', name) as stats:
200 with util.timedcm('reposetup %r', name) as stats:
197 hook(ui, obj)
201 hook(ui, obj)
198 ui.log(
202 msg = b' > reposetup for %s took %s\n'
199 b'extension', b' > reposetup for %s took %s\n', name, stats
203 ui.log(b'extension', msg, name, stats)
200 )
201 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
204 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
202 if not obj.local():
205 if not obj.local():
203 for f in wirepeersetupfuncs:
206 for f in wirepeersetupfuncs:
204 f(ui, obj)
207 f(ui, obj)
205 return obj
206
208
207
209
208 def repository(
210 def repository(
209 ui,
211 ui,
210 path=b'',
212 path=b'',
211 create=False,
213 create=False,
212 presetupfuncs=None,
214 presetupfuncs=None,
213 intents=None,
215 intents=None,
214 createopts=None,
216 createopts=None,
215 ):
217 ):
216 """return a repository object for the specified path"""
218 """return a repository object for the specified path"""
217 peer = _peerorrepo(
219 peer = _peerorrepo(
218 ui,
220 ui,
219 path,
221 path,
220 create,
222 create,
221 presetupfuncs=presetupfuncs,
223 presetupfuncs=presetupfuncs,
222 intents=intents,
224 intents=intents,
223 createopts=createopts,
225 createopts=createopts,
224 )
226 )
225 repo = peer.local()
227 repo = peer.local()
226 if not repo:
228 if not repo:
227 raise error.Abort(
229 raise error.Abort(
228 _(b"repository '%s' is not local") % (path or peer.url())
230 _(b"repository '%s' is not local") % (path or peer.url())
229 )
231 )
230 return repo.filtered(b'visible')
232 return repo.filtered(b'visible')
231
233
232
234
233 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
235 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
234 '''return a repository peer for the specified path'''
236 '''return a repository peer for the specified path'''
235 rui = remoteui(uiorrepo, opts)
237 rui = remoteui(uiorrepo, opts)
236 return _peerorrepo(
238 return _peerorrepo(
237 rui, path, create, intents=intents, createopts=createopts
239 rui, path, create, intents=intents, createopts=createopts
238 ).peer()
240 ).peer()
239
241
240
242
241 def defaultdest(source):
243 def defaultdest(source):
242 """return default destination of clone if none is given
244 """return default destination of clone if none is given
243
245
244 >>> defaultdest(b'foo')
246 >>> defaultdest(b'foo')
245 'foo'
247 'foo'
246 >>> defaultdest(b'/foo/bar')
248 >>> defaultdest(b'/foo/bar')
247 'bar'
249 'bar'
248 >>> defaultdest(b'/')
250 >>> defaultdest(b'/')
249 ''
251 ''
250 >>> defaultdest(b'')
252 >>> defaultdest(b'')
251 ''
253 ''
252 >>> defaultdest(b'http://example.org/')
254 >>> defaultdest(b'http://example.org/')
253 ''
255 ''
254 >>> defaultdest(b'http://example.org/foo/')
256 >>> defaultdest(b'http://example.org/foo/')
255 'foo'
257 'foo'
256 """
258 """
257 path = urlutil.url(source).path
259 path = urlutil.url(source).path
258 if not path:
260 if not path:
259 return b''
261 return b''
260 return os.path.basename(os.path.normpath(path))
262 return os.path.basename(os.path.normpath(path))
261
263
262
264
263 def sharedreposource(repo):
265 def sharedreposource(repo):
264 """Returns repository object for source repository of a shared repo.
266 """Returns repository object for source repository of a shared repo.
265
267
266 If repo is not a shared repository, returns None.
268 If repo is not a shared repository, returns None.
267 """
269 """
268 if repo.sharedpath == repo.path:
270 if repo.sharedpath == repo.path:
269 return None
271 return None
270
272
271 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
273 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
272 return repo.srcrepo
274 return repo.srcrepo
273
275
274 # the sharedpath always ends in the .hg; we want the path to the repo
276 # the sharedpath always ends in the .hg; we want the path to the repo
275 source = repo.vfs.split(repo.sharedpath)[0]
277 source = repo.vfs.split(repo.sharedpath)[0]
276 srcurl, branches = urlutil.parseurl(source)
278 srcurl, branches = urlutil.parseurl(source)
277 srcrepo = repository(repo.ui, srcurl)
279 srcrepo = repository(repo.ui, srcurl)
278 repo.srcrepo = srcrepo
280 repo.srcrepo = srcrepo
279 return srcrepo
281 return srcrepo
280
282
281
283
282 def share(
284 def share(
283 ui,
285 ui,
284 source,
286 source,
285 dest=None,
287 dest=None,
286 update=True,
288 update=True,
287 bookmarks=True,
289 bookmarks=True,
288 defaultpath=None,
290 defaultpath=None,
289 relative=False,
291 relative=False,
290 ):
292 ):
291 '''create a shared repository'''
293 '''create a shared repository'''
292
294
293 if not islocal(source):
295 if not islocal(source):
294 raise error.Abort(_(b'can only share local repositories'))
296 raise error.Abort(_(b'can only share local repositories'))
295
297
296 if not dest:
298 if not dest:
297 dest = defaultdest(source)
299 dest = defaultdest(source)
298 else:
300 else:
299 dest = urlutil.get_clone_path(ui, dest)[1]
301 dest = urlutil.get_clone_path(ui, dest)[1]
300
302
301 if isinstance(source, bytes):
303 if isinstance(source, bytes):
302 origsource, source, branches = urlutil.get_clone_path(ui, source)
304 origsource, source, branches = urlutil.get_clone_path(ui, source)
303 srcrepo = repository(ui, source)
305 srcrepo = repository(ui, source)
304 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
306 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
305 else:
307 else:
306 srcrepo = source.local()
308 srcrepo = source.local()
307 checkout = None
309 checkout = None
308
310
309 shareditems = set()
311 shareditems = set()
310 if bookmarks:
312 if bookmarks:
311 shareditems.add(sharedbookmarks)
313 shareditems.add(sharedbookmarks)
312
314
313 r = repository(
315 r = repository(
314 ui,
316 ui,
315 dest,
317 dest,
316 create=True,
318 create=True,
317 createopts={
319 createopts={
318 b'sharedrepo': srcrepo,
320 b'sharedrepo': srcrepo,
319 b'sharedrelative': relative,
321 b'sharedrelative': relative,
320 b'shareditems': shareditems,
322 b'shareditems': shareditems,
321 },
323 },
322 )
324 )
323
325
324 postshare(srcrepo, r, defaultpath=defaultpath)
326 postshare(srcrepo, r, defaultpath=defaultpath)
325 r = repository(ui, dest)
327 r = repository(ui, dest)
326 _postshareupdate(r, update, checkout=checkout)
328 _postshareupdate(r, update, checkout=checkout)
327 return r
329 return r
328
330
329
331
330 def _prependsourcehgrc(repo):
332 def _prependsourcehgrc(repo):
331 """copies the source repo config and prepend it in current repo .hg/hgrc
333 """copies the source repo config and prepend it in current repo .hg/hgrc
332 on unshare. This is only done if the share was perfomed using share safe
334 on unshare. This is only done if the share was perfomed using share safe
333 method where we share config of source in shares"""
335 method where we share config of source in shares"""
334 srcvfs = vfsmod.vfs(repo.sharedpath)
336 srcvfs = vfsmod.vfs(repo.sharedpath)
335 dstvfs = vfsmod.vfs(repo.path)
337 dstvfs = vfsmod.vfs(repo.path)
336
338
337 if not srcvfs.exists(b'hgrc'):
339 if not srcvfs.exists(b'hgrc'):
338 return
340 return
339
341
340 currentconfig = b''
342 currentconfig = b''
341 if dstvfs.exists(b'hgrc'):
343 if dstvfs.exists(b'hgrc'):
342 currentconfig = dstvfs.read(b'hgrc')
344 currentconfig = dstvfs.read(b'hgrc')
343
345
344 with dstvfs(b'hgrc', b'wb') as fp:
346 with dstvfs(b'hgrc', b'wb') as fp:
345 sourceconfig = srcvfs.read(b'hgrc')
347 sourceconfig = srcvfs.read(b'hgrc')
346 fp.write(b"# Config copied from shared source\n")
348 fp.write(b"# Config copied from shared source\n")
347 fp.write(sourceconfig)
349 fp.write(sourceconfig)
348 fp.write(b'\n')
350 fp.write(b'\n')
349 fp.write(currentconfig)
351 fp.write(currentconfig)
350
352
351
353
352 def unshare(ui, repo):
354 def unshare(ui, repo):
353 """convert a shared repository to a normal one
355 """convert a shared repository to a normal one
354
356
355 Copy the store data to the repo and remove the sharedpath data.
357 Copy the store data to the repo and remove the sharedpath data.
356
358
357 Returns a new repository object representing the unshared repository.
359 Returns a new repository object representing the unshared repository.
358
360
359 The passed repository object is not usable after this function is
361 The passed repository object is not usable after this function is
360 called.
362 called.
361 """
363 """
362
364
363 with repo.lock():
365 with repo.lock():
364 # we use locks here because if we race with commit, we
366 # we use locks here because if we race with commit, we
365 # can end up with extra data in the cloned revlogs that's
367 # can end up with extra data in the cloned revlogs that's
366 # not pointed to by changesets, thus causing verify to
368 # not pointed to by changesets, thus causing verify to
367 # fail
369 # fail
368 destlock = copystore(ui, repo, repo.path)
370 destlock = copystore(ui, repo, repo.path)
369 with destlock or util.nullcontextmanager():
371 with destlock or util.nullcontextmanager():
370 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
372 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
371 # we were sharing .hg/hgrc of the share source with the current
373 # we were sharing .hg/hgrc of the share source with the current
372 # repo. We need to copy that while unsharing otherwise it can
374 # repo. We need to copy that while unsharing otherwise it can
373 # disable hooks and other checks
375 # disable hooks and other checks
374 _prependsourcehgrc(repo)
376 _prependsourcehgrc(repo)
375
377
376 sharefile = repo.vfs.join(b'sharedpath')
378 sharefile = repo.vfs.join(b'sharedpath')
377 util.rename(sharefile, sharefile + b'.old')
379 util.rename(sharefile, sharefile + b'.old')
378
380
379 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
381 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
380 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
382 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
381 scmutil.writereporequirements(repo)
383 scmutil.writereporequirements(repo)
382
384
383 # Removing share changes some fundamental properties of the repo instance.
385 # Removing share changes some fundamental properties of the repo instance.
384 # So we instantiate a new repo object and operate on it rather than
386 # So we instantiate a new repo object and operate on it rather than
385 # try to keep the existing repo usable.
387 # try to keep the existing repo usable.
386 newrepo = repository(repo.baseui, repo.root, create=False)
388 newrepo = repository(repo.baseui, repo.root, create=False)
387
389
388 # TODO: figure out how to access subrepos that exist, but were previously
390 # TODO: figure out how to access subrepos that exist, but were previously
389 # removed from .hgsub
391 # removed from .hgsub
390 c = newrepo[b'.']
392 c = newrepo[b'.']
391 subs = c.substate
393 subs = c.substate
392 for s in sorted(subs):
394 for s in sorted(subs):
393 c.sub(s).unshare()
395 c.sub(s).unshare()
394
396
395 localrepo.poisonrepository(repo)
397 localrepo.poisonrepository(repo)
396
398
397 return newrepo
399 return newrepo
398
400
399
401
400 def postshare(sourcerepo, destrepo, defaultpath=None):
402 def postshare(sourcerepo, destrepo, defaultpath=None):
401 """Called after a new shared repo is created.
403 """Called after a new shared repo is created.
402
404
403 The new repo only has a requirements file and pointer to the source.
405 The new repo only has a requirements file and pointer to the source.
404 This function configures additional shared data.
406 This function configures additional shared data.
405
407
406 Extensions can wrap this function and write additional entries to
408 Extensions can wrap this function and write additional entries to
407 destrepo/.hg/shared to indicate additional pieces of data to be shared.
409 destrepo/.hg/shared to indicate additional pieces of data to be shared.
408 """
410 """
409 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
411 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
410 if default:
412 if default:
411 template = b'[paths]\ndefault = %s\n'
413 template = b'[paths]\ndefault = %s\n'
412 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
414 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
413 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
415 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
414 with destrepo.wlock():
416 with destrepo.wlock():
415 narrowspec.copytoworkingcopy(destrepo)
417 narrowspec.copytoworkingcopy(destrepo)
416
418
417
419
418 def _postshareupdate(repo, update, checkout=None):
420 def _postshareupdate(repo, update, checkout=None):
419 """Maybe perform a working directory update after a shared repo is created.
421 """Maybe perform a working directory update after a shared repo is created.
420
422
421 ``update`` can be a boolean or a revision to update to.
423 ``update`` can be a boolean or a revision to update to.
422 """
424 """
423 if not update:
425 if not update:
424 return
426 return
425
427
426 repo.ui.status(_(b"updating working directory\n"))
428 repo.ui.status(_(b"updating working directory\n"))
427 if update is not True:
429 if update is not True:
428 checkout = update
430 checkout = update
429 for test in (checkout, b'default', b'tip'):
431 for test in (checkout, b'default', b'tip'):
430 if test is None:
432 if test is None:
431 continue
433 continue
432 try:
434 try:
433 uprev = repo.lookup(test)
435 uprev = repo.lookup(test)
434 break
436 break
435 except error.RepoLookupError:
437 except error.RepoLookupError:
436 continue
438 continue
437 _update(repo, uprev)
439 _update(repo, uprev)
438
440
439
441
440 def copystore(ui, srcrepo, destpath):
442 def copystore(ui, srcrepo, destpath):
441 """copy files from store of srcrepo in destpath
443 """copy files from store of srcrepo in destpath
442
444
443 returns destlock
445 returns destlock
444 """
446 """
445 destlock = None
447 destlock = None
446 try:
448 try:
447 hardlink = None
449 hardlink = None
448 topic = _(b'linking') if hardlink else _(b'copying')
450 topic = _(b'linking') if hardlink else _(b'copying')
449 with ui.makeprogress(topic, unit=_(b'files')) as progress:
451 with ui.makeprogress(topic, unit=_(b'files')) as progress:
450 num = 0
452 num = 0
451 srcpublishing = srcrepo.publishing()
453 srcpublishing = srcrepo.publishing()
452 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
454 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
453 dstvfs = vfsmod.vfs(destpath)
455 dstvfs = vfsmod.vfs(destpath)
454 for f in srcrepo.store.copylist():
456 for f in srcrepo.store.copylist():
455 if srcpublishing and f.endswith(b'phaseroots'):
457 if srcpublishing and f.endswith(b'phaseroots'):
456 continue
458 continue
457 dstbase = os.path.dirname(f)
459 dstbase = os.path.dirname(f)
458 if dstbase and not dstvfs.exists(dstbase):
460 if dstbase and not dstvfs.exists(dstbase):
459 dstvfs.mkdir(dstbase)
461 dstvfs.mkdir(dstbase)
460 if srcvfs.exists(f):
462 if srcvfs.exists(f):
461 if f.endswith(b'data'):
463 if f.endswith(b'data'):
462 # 'dstbase' may be empty (e.g. revlog format 0)
464 # 'dstbase' may be empty (e.g. revlog format 0)
463 lockfile = os.path.join(dstbase, b"lock")
465 lockfile = os.path.join(dstbase, b"lock")
464 # lock to avoid premature writing to the target
466 # lock to avoid premature writing to the target
465 destlock = lock.lock(dstvfs, lockfile)
467 destlock = lock.lock(dstvfs, lockfile)
466 hardlink, n = util.copyfiles(
468 hardlink, n = util.copyfiles(
467 srcvfs.join(f), dstvfs.join(f), hardlink, progress
469 srcvfs.join(f), dstvfs.join(f), hardlink, progress
468 )
470 )
469 num += n
471 num += n
470 if hardlink:
472 if hardlink:
471 ui.debug(b"linked %d files\n" % num)
473 ui.debug(b"linked %d files\n" % num)
472 else:
474 else:
473 ui.debug(b"copied %d files\n" % num)
475 ui.debug(b"copied %d files\n" % num)
474 return destlock
476 return destlock
475 except: # re-raises
477 except: # re-raises
476 release(destlock)
478 release(destlock)
477 raise
479 raise
478
480
479
481
480 def clonewithshare(
482 def clonewithshare(
481 ui,
483 ui,
482 peeropts,
484 peeropts,
483 sharepath,
485 sharepath,
484 source,
486 source,
485 srcpeer,
487 srcpeer,
486 dest,
488 dest,
487 pull=False,
489 pull=False,
488 rev=None,
490 rev=None,
489 update=True,
491 update=True,
490 stream=False,
492 stream=False,
491 ):
493 ):
492 """Perform a clone using a shared repo.
494 """Perform a clone using a shared repo.
493
495
494 The store for the repository will be located at <sharepath>/.hg. The
496 The store for the repository will be located at <sharepath>/.hg. The
495 specified revisions will be cloned or pulled from "source". A shared repo
497 specified revisions will be cloned or pulled from "source". A shared repo
496 will be created at "dest" and a working copy will be created if "update" is
498 will be created at "dest" and a working copy will be created if "update" is
497 True.
499 True.
498 """
500 """
499 revs = None
501 revs = None
500 if rev:
502 if rev:
501 if not srcpeer.capable(b'lookup'):
503 if not srcpeer.capable(b'lookup'):
502 raise error.Abort(
504 raise error.Abort(
503 _(
505 _(
504 b"src repository does not support "
506 b"src repository does not support "
505 b"revision lookup and so doesn't "
507 b"revision lookup and so doesn't "
506 b"support clone by revision"
508 b"support clone by revision"
507 )
509 )
508 )
510 )
509
511
510 # TODO this is batchable.
512 # TODO this is batchable.
511 remoterevs = []
513 remoterevs = []
512 for r in rev:
514 for r in rev:
513 with srcpeer.commandexecutor() as e:
515 with srcpeer.commandexecutor() as e:
514 remoterevs.append(
516 remoterevs.append(
515 e.callcommand(
517 e.callcommand(
516 b'lookup',
518 b'lookup',
517 {
519 {
518 b'key': r,
520 b'key': r,
519 },
521 },
520 ).result()
522 ).result()
521 )
523 )
522 revs = remoterevs
524 revs = remoterevs
523
525
524 # Obtain a lock before checking for or cloning the pooled repo otherwise
526 # Obtain a lock before checking for or cloning the pooled repo otherwise
525 # 2 clients may race creating or populating it.
527 # 2 clients may race creating or populating it.
526 pooldir = os.path.dirname(sharepath)
528 pooldir = os.path.dirname(sharepath)
527 # lock class requires the directory to exist.
529 # lock class requires the directory to exist.
528 try:
530 try:
529 util.makedir(pooldir, False)
531 util.makedir(pooldir, False)
530 except FileExistsError:
532 except FileExistsError:
531 pass
533 pass
532
534
533 poolvfs = vfsmod.vfs(pooldir)
535 poolvfs = vfsmod.vfs(pooldir)
534 basename = os.path.basename(sharepath)
536 basename = os.path.basename(sharepath)
535
537
536 with lock.lock(poolvfs, b'%s.lock' % basename):
538 with lock.lock(poolvfs, b'%s.lock' % basename):
537 if os.path.exists(sharepath):
539 if os.path.exists(sharepath):
538 ui.status(
540 ui.status(
539 _(b'(sharing from existing pooled repository %s)\n') % basename
541 _(b'(sharing from existing pooled repository %s)\n') % basename
540 )
542 )
541 else:
543 else:
542 ui.status(
544 ui.status(
543 _(b'(sharing from new pooled repository %s)\n') % basename
545 _(b'(sharing from new pooled repository %s)\n') % basename
544 )
546 )
545 # Always use pull mode because hardlinks in share mode don't work
547 # Always use pull mode because hardlinks in share mode don't work
546 # well. Never update because working copies aren't necessary in
548 # well. Never update because working copies aren't necessary in
547 # share mode.
549 # share mode.
548 clone(
550 clone(
549 ui,
551 ui,
550 peeropts,
552 peeropts,
551 source,
553 source,
552 dest=sharepath,
554 dest=sharepath,
553 pull=True,
555 pull=True,
554 revs=rev,
556 revs=rev,
555 update=False,
557 update=False,
556 stream=stream,
558 stream=stream,
557 )
559 )
558
560
559 # Resolve the value to put in [paths] section for the source.
561 # Resolve the value to put in [paths] section for the source.
560 if islocal(source):
562 if islocal(source):
561 defaultpath = util.abspath(urlutil.urllocalpath(source))
563 defaultpath = util.abspath(urlutil.urllocalpath(source))
562 else:
564 else:
563 defaultpath = source
565 defaultpath = source
564
566
565 sharerepo = repository(ui, path=sharepath)
567 sharerepo = repository(ui, path=sharepath)
566 destrepo = share(
568 destrepo = share(
567 ui,
569 ui,
568 sharerepo,
570 sharerepo,
569 dest=dest,
571 dest=dest,
570 update=False,
572 update=False,
571 bookmarks=False,
573 bookmarks=False,
572 defaultpath=defaultpath,
574 defaultpath=defaultpath,
573 )
575 )
574
576
575 # We need to perform a pull against the dest repo to fetch bookmarks
577 # We need to perform a pull against the dest repo to fetch bookmarks
576 # and other non-store data that isn't shared by default. In the case of
578 # and other non-store data that isn't shared by default. In the case of
577 # non-existing shared repo, this means we pull from the remote twice. This
579 # non-existing shared repo, this means we pull from the remote twice. This
578 # is a bit weird. But at the time it was implemented, there wasn't an easy
580 # is a bit weird. But at the time it was implemented, there wasn't an easy
579 # way to pull just non-changegroup data.
581 # way to pull just non-changegroup data.
580 exchange.pull(destrepo, srcpeer, heads=revs)
582 exchange.pull(destrepo, srcpeer, heads=revs)
581
583
582 _postshareupdate(destrepo, update)
584 _postshareupdate(destrepo, update)
583
585
584 return srcpeer, peer(ui, peeropts, dest)
586 return srcpeer, peer(ui, peeropts, dest)
585
587
586
588
587 # Recomputing caches is often slow on big repos, so copy them.
589 # Recomputing caches is often slow on big repos, so copy them.
588 def _copycache(srcrepo, dstcachedir, fname):
590 def _copycache(srcrepo, dstcachedir, fname):
589 """copy a cache from srcrepo to destcachedir (if it exists)"""
591 """copy a cache from srcrepo to destcachedir (if it exists)"""
590 srcfname = srcrepo.cachevfs.join(fname)
592 srcfname = srcrepo.cachevfs.join(fname)
591 dstfname = os.path.join(dstcachedir, fname)
593 dstfname = os.path.join(dstcachedir, fname)
592 if os.path.exists(srcfname):
594 if os.path.exists(srcfname):
593 if not os.path.exists(dstcachedir):
595 if not os.path.exists(dstcachedir):
594 os.mkdir(dstcachedir)
596 os.mkdir(dstcachedir)
595 util.copyfile(srcfname, dstfname)
597 util.copyfile(srcfname, dstfname)
596
598
597
599
598 def clone(
600 def clone(
599 ui,
601 ui,
600 peeropts,
602 peeropts,
601 source,
603 source,
602 dest=None,
604 dest=None,
603 pull=False,
605 pull=False,
604 revs=None,
606 revs=None,
605 update=True,
607 update=True,
606 stream=False,
608 stream=False,
607 branch=None,
609 branch=None,
608 shareopts=None,
610 shareopts=None,
609 storeincludepats=None,
611 storeincludepats=None,
610 storeexcludepats=None,
612 storeexcludepats=None,
611 depth=None,
613 depth=None,
612 ):
614 ):
613 """Make a copy of an existing repository.
615 """Make a copy of an existing repository.
614
616
615 Create a copy of an existing repository in a new directory. The
617 Create a copy of an existing repository in a new directory. The
616 source and destination are URLs, as passed to the repository
618 source and destination are URLs, as passed to the repository
617 function. Returns a pair of repository peers, the source and
619 function. Returns a pair of repository peers, the source and
618 newly created destination.
620 newly created destination.
619
621
620 The location of the source is added to the new repository's
622 The location of the source is added to the new repository's
621 .hg/hgrc file, as the default to be used for future pulls and
623 .hg/hgrc file, as the default to be used for future pulls and
622 pushes.
624 pushes.
623
625
624 If an exception is raised, the partly cloned/updated destination
626 If an exception is raised, the partly cloned/updated destination
625 repository will be deleted.
627 repository will be deleted.
626
628
627 Arguments:
629 Arguments:
628
630
629 source: repository object or URL
631 source: repository object or URL
630
632
631 dest: URL of destination repository to create (defaults to base
633 dest: URL of destination repository to create (defaults to base
632 name of source repository)
634 name of source repository)
633
635
634 pull: always pull from source repository, even in local case or if the
636 pull: always pull from source repository, even in local case or if the
635 server prefers streaming
637 server prefers streaming
636
638
637 stream: stream raw data uncompressed from repository (fast over
639 stream: stream raw data uncompressed from repository (fast over
638 LAN, slow over WAN)
640 LAN, slow over WAN)
639
641
640 revs: revision to clone up to (implies pull=True)
642 revs: revision to clone up to (implies pull=True)
641
643
642 update: update working directory after clone completes, if
644 update: update working directory after clone completes, if
643 destination is local repository (True means update to default rev,
645 destination is local repository (True means update to default rev,
644 anything else is treated as a revision)
646 anything else is treated as a revision)
645
647
646 branch: branches to clone
648 branch: branches to clone
647
649
648 shareopts: dict of options to control auto sharing behavior. The "pool" key
650 shareopts: dict of options to control auto sharing behavior. The "pool" key
649 activates auto sharing mode and defines the directory for stores. The
651 activates auto sharing mode and defines the directory for stores. The
650 "mode" key determines how to construct the directory name of the shared
652 "mode" key determines how to construct the directory name of the shared
651 repository. "identity" means the name is derived from the node of the first
653 repository. "identity" means the name is derived from the node of the first
652 changeset in the repository. "remote" means the name is derived from the
654 changeset in the repository. "remote" means the name is derived from the
653 remote's path/URL. Defaults to "identity."
655 remote's path/URL. Defaults to "identity."
654
656
655 storeincludepats and storeexcludepats: sets of file patterns to include and
657 storeincludepats and storeexcludepats: sets of file patterns to include and
656 exclude in the repository copy, respectively. If not defined, all files
658 exclude in the repository copy, respectively. If not defined, all files
657 will be included (a "full" clone). Otherwise a "narrow" clone containing
659 will be included (a "full" clone). Otherwise a "narrow" clone containing
658 only the requested files will be performed. If ``storeincludepats`` is not
660 only the requested files will be performed. If ``storeincludepats`` is not
659 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
661 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
660 ``path:.``. If both are empty sets, no files will be cloned.
662 ``path:.``. If both are empty sets, no files will be cloned.
661 """
663 """
662
664
663 if isinstance(source, bytes):
665 if isinstance(source, bytes):
664 src = urlutil.get_clone_path(ui, source, branch)
666 src = urlutil.get_clone_path(ui, source, branch)
665 origsource, source, branches = src
667 origsource, source, branches = src
666 srcpeer = peer(ui, peeropts, source)
668 srcpeer = peer(ui, peeropts, source)
667 else:
669 else:
668 srcpeer = source.peer() # in case we were called with a localrepo
670 srcpeer = source.peer() # in case we were called with a localrepo
669 branches = (None, branch or [])
671 branches = (None, branch or [])
670 origsource = source = srcpeer.url()
672 origsource = source = srcpeer.url()
671 srclock = destlock = destwlock = cleandir = None
673 srclock = destlock = destwlock = cleandir = None
672 destpeer = None
674 destpeer = None
673 try:
675 try:
674 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
676 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
675
677
676 if dest is None:
678 if dest is None:
677 dest = defaultdest(source)
679 dest = defaultdest(source)
678 if dest:
680 if dest:
679 ui.status(_(b"destination directory: %s\n") % dest)
681 ui.status(_(b"destination directory: %s\n") % dest)
680 else:
682 else:
681 dest = urlutil.get_clone_path(ui, dest)[0]
683 dest = urlutil.get_clone_path(ui, dest)[0]
682
684
683 dest = urlutil.urllocalpath(dest)
685 dest = urlutil.urllocalpath(dest)
684 source = urlutil.urllocalpath(source)
686 source = urlutil.urllocalpath(source)
685
687
686 if not dest:
688 if not dest:
687 raise error.InputError(_(b"empty destination path is not valid"))
689 raise error.InputError(_(b"empty destination path is not valid"))
688
690
689 destvfs = vfsmod.vfs(dest, expandpath=True)
691 destvfs = vfsmod.vfs(dest, expandpath=True)
690 if destvfs.lexists():
692 if destvfs.lexists():
691 if not destvfs.isdir():
693 if not destvfs.isdir():
692 raise error.InputError(
694 raise error.InputError(
693 _(b"destination '%s' already exists") % dest
695 _(b"destination '%s' already exists") % dest
694 )
696 )
695 elif destvfs.listdir():
697 elif destvfs.listdir():
696 raise error.InputError(
698 raise error.InputError(
697 _(b"destination '%s' is not empty") % dest
699 _(b"destination '%s' is not empty") % dest
698 )
700 )
699
701
700 createopts = {}
702 createopts = {}
701 narrow = False
703 narrow = False
702
704
703 if storeincludepats is not None:
705 if storeincludepats is not None:
704 narrowspec.validatepatterns(storeincludepats)
706 narrowspec.validatepatterns(storeincludepats)
705 narrow = True
707 narrow = True
706
708
707 if storeexcludepats is not None:
709 if storeexcludepats is not None:
708 narrowspec.validatepatterns(storeexcludepats)
710 narrowspec.validatepatterns(storeexcludepats)
709 narrow = True
711 narrow = True
710
712
711 if narrow:
713 if narrow:
712 # Include everything by default if only exclusion patterns defined.
714 # Include everything by default if only exclusion patterns defined.
713 if storeexcludepats and not storeincludepats:
715 if storeexcludepats and not storeincludepats:
714 storeincludepats = {b'path:.'}
716 storeincludepats = {b'path:.'}
715
717
716 createopts[b'narrowfiles'] = True
718 createopts[b'narrowfiles'] = True
717
719
718 if depth:
720 if depth:
719 createopts[b'shallowfilestore'] = True
721 createopts[b'shallowfilestore'] = True
720
722
721 if srcpeer.capable(b'lfs-serve'):
723 if srcpeer.capable(b'lfs-serve'):
722 # Repository creation honors the config if it disabled the extension, so
724 # Repository creation honors the config if it disabled the extension, so
723 # we can't just announce that lfs will be enabled. This check avoids
725 # we can't just announce that lfs will be enabled. This check avoids
724 # saying that lfs will be enabled, and then saying it's an unknown
726 # saying that lfs will be enabled, and then saying it's an unknown
725 # feature. The lfs creation option is set in either case so that a
727 # feature. The lfs creation option is set in either case so that a
726 # requirement is added. If the extension is explicitly disabled but the
728 # requirement is added. If the extension is explicitly disabled but the
727 # requirement is set, the clone aborts early, before transferring any
729 # requirement is set, the clone aborts early, before transferring any
728 # data.
730 # data.
729 createopts[b'lfs'] = True
731 createopts[b'lfs'] = True
730
732
731 if extensions.disabled_help(b'lfs'):
733 if extensions.disabled_help(b'lfs'):
732 ui.status(
734 ui.status(
733 _(
735 _(
734 b'(remote is using large file support (lfs), but it is '
736 b'(remote is using large file support (lfs), but it is '
735 b'explicitly disabled in the local configuration)\n'
737 b'explicitly disabled in the local configuration)\n'
736 )
738 )
737 )
739 )
738 else:
740 else:
739 ui.status(
741 ui.status(
740 _(
742 _(
741 b'(remote is using large file support (lfs); lfs will '
743 b'(remote is using large file support (lfs); lfs will '
742 b'be enabled for this repository)\n'
744 b'be enabled for this repository)\n'
743 )
745 )
744 )
746 )
745
747
746 shareopts = shareopts or {}
748 shareopts = shareopts or {}
747 sharepool = shareopts.get(b'pool')
749 sharepool = shareopts.get(b'pool')
748 sharenamemode = shareopts.get(b'mode')
750 sharenamemode = shareopts.get(b'mode')
749 if sharepool and islocal(dest):
751 if sharepool and islocal(dest):
750 sharepath = None
752 sharepath = None
751 if sharenamemode == b'identity':
753 if sharenamemode == b'identity':
752 # Resolve the name from the initial changeset in the remote
754 # Resolve the name from the initial changeset in the remote
753 # repository. This returns nullid when the remote is empty. It
755 # repository. This returns nullid when the remote is empty. It
754 # raises RepoLookupError if revision 0 is filtered or otherwise
756 # raises RepoLookupError if revision 0 is filtered or otherwise
755 # not available. If we fail to resolve, sharing is not enabled.
757 # not available. If we fail to resolve, sharing is not enabled.
756 try:
758 try:
757 with srcpeer.commandexecutor() as e:
759 with srcpeer.commandexecutor() as e:
758 rootnode = e.callcommand(
760 rootnode = e.callcommand(
759 b'lookup',
761 b'lookup',
760 {
762 {
761 b'key': b'0',
763 b'key': b'0',
762 },
764 },
763 ).result()
765 ).result()
764
766
765 if rootnode != sha1nodeconstants.nullid:
767 if rootnode != sha1nodeconstants.nullid:
766 sharepath = os.path.join(sharepool, hex(rootnode))
768 sharepath = os.path.join(sharepool, hex(rootnode))
767 else:
769 else:
768 ui.status(
770 ui.status(
769 _(
771 _(
770 b'(not using pooled storage: '
772 b'(not using pooled storage: '
771 b'remote appears to be empty)\n'
773 b'remote appears to be empty)\n'
772 )
774 )
773 )
775 )
774 except error.RepoLookupError:
776 except error.RepoLookupError:
775 ui.status(
777 ui.status(
776 _(
778 _(
777 b'(not using pooled storage: '
779 b'(not using pooled storage: '
778 b'unable to resolve identity of remote)\n'
780 b'unable to resolve identity of remote)\n'
779 )
781 )
780 )
782 )
781 elif sharenamemode == b'remote':
783 elif sharenamemode == b'remote':
782 sharepath = os.path.join(
784 sharepath = os.path.join(
783 sharepool, hex(hashutil.sha1(source).digest())
785 sharepool, hex(hashutil.sha1(source).digest())
784 )
786 )
785 else:
787 else:
786 raise error.Abort(
788 raise error.Abort(
787 _(b'unknown share naming mode: %s') % sharenamemode
789 _(b'unknown share naming mode: %s') % sharenamemode
788 )
790 )
789
791
790 # TODO this is a somewhat arbitrary restriction.
792 # TODO this is a somewhat arbitrary restriction.
791 if narrow:
793 if narrow:
792 ui.status(
794 ui.status(
793 _(b'(pooled storage not supported for narrow clones)\n')
795 _(b'(pooled storage not supported for narrow clones)\n')
794 )
796 )
795 sharepath = None
797 sharepath = None
796
798
797 if sharepath:
799 if sharepath:
798 return clonewithshare(
800 return clonewithshare(
799 ui,
801 ui,
800 peeropts,
802 peeropts,
801 sharepath,
803 sharepath,
802 source,
804 source,
803 srcpeer,
805 srcpeer,
804 dest,
806 dest,
805 pull=pull,
807 pull=pull,
806 rev=revs,
808 rev=revs,
807 update=update,
809 update=update,
808 stream=stream,
810 stream=stream,
809 )
811 )
810
812
811 srcrepo = srcpeer.local()
813 srcrepo = srcpeer.local()
812
814
813 abspath = origsource
815 abspath = origsource
814 if islocal(origsource):
816 if islocal(origsource):
815 abspath = util.abspath(urlutil.urllocalpath(origsource))
817 abspath = util.abspath(urlutil.urllocalpath(origsource))
816
818
817 if islocal(dest):
819 if islocal(dest):
818 if os.path.exists(dest):
820 if os.path.exists(dest):
819 # only clean up directories we create ourselves
821 # only clean up directories we create ourselves
820 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
822 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
821 cleandir = hgdir
823 cleandir = hgdir
822 else:
824 else:
823 cleandir = dest
825 cleandir = dest
824
826
825 copy = False
827 copy = False
826 if (
828 if (
827 srcrepo
829 srcrepo
828 and srcrepo.cancopy()
830 and srcrepo.cancopy()
829 and islocal(dest)
831 and islocal(dest)
830 and not phases.hassecret(srcrepo)
832 and not phases.hassecret(srcrepo)
831 ):
833 ):
832 copy = not pull and not revs
834 copy = not pull and not revs
833
835
834 # TODO this is a somewhat arbitrary restriction.
836 # TODO this is a somewhat arbitrary restriction.
835 if narrow:
837 if narrow:
836 copy = False
838 copy = False
837
839
838 if copy:
840 if copy:
839 try:
841 try:
840 # we use a lock here because if we race with commit, we
842 # we use a lock here because if we race with commit, we
841 # can end up with extra data in the cloned revlogs that's
843 # can end up with extra data in the cloned revlogs that's
842 # not pointed to by changesets, thus causing verify to
844 # not pointed to by changesets, thus causing verify to
843 # fail
845 # fail
844 srclock = srcrepo.lock(wait=False)
846 srclock = srcrepo.lock(wait=False)
845 except error.LockError:
847 except error.LockError:
846 copy = False
848 copy = False
847
849
848 if copy:
850 if copy:
849 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
851 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
850
852
851 destrootpath = urlutil.urllocalpath(dest)
853 destrootpath = urlutil.urllocalpath(dest)
852 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
854 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
853 localrepo.createrepository(
855 localrepo.createrepository(
854 ui,
856 ui,
855 destrootpath,
857 destrootpath,
856 requirements=dest_reqs,
858 requirements=dest_reqs,
857 )
859 )
858 destrepo = localrepo.makelocalrepository(ui, destrootpath)
860 destrepo = localrepo.makelocalrepository(ui, destrootpath)
859
861
860 destwlock = destrepo.wlock()
862 destwlock = destrepo.wlock()
861 destlock = destrepo.lock()
863 destlock = destrepo.lock()
862 from . import streamclone # avoid cycle
864 from . import streamclone # avoid cycle
863
865
864 streamclone.local_copy(srcrepo, destrepo)
866 streamclone.local_copy(srcrepo, destrepo)
865
867
866 # we need to re-init the repo after manually copying the data
868 # we need to re-init the repo after manually copying the data
867 # into it
869 # into it
868 destpeer = peer(srcrepo, peeropts, dest)
870 destpeer = peer(srcrepo, peeropts, dest)
869
871
870 # make the peer aware that is it already locked
872 # make the peer aware that is it already locked
871 #
873 #
872 # important:
874 # important:
873 #
875 #
874 # We still need to release that lock at the end of the function
876 # We still need to release that lock at the end of the function
875 destpeer.local()._lockref = weakref.ref(destlock)
877 destpeer.local()._lockref = weakref.ref(destlock)
876 destpeer.local()._wlockref = weakref.ref(destwlock)
878 destpeer.local()._wlockref = weakref.ref(destwlock)
877 # dirstate also needs to be copied because `_wlockref` has a reference
879 # dirstate also needs to be copied because `_wlockref` has a reference
878 # to it: this dirstate is saved to disk when the wlock is released
880 # to it: this dirstate is saved to disk when the wlock is released
879 destpeer.local().dirstate = destrepo.dirstate
881 destpeer.local().dirstate = destrepo.dirstate
880
882
881 srcrepo.hook(
883 srcrepo.hook(
882 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
884 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
883 )
885 )
884 else:
886 else:
885 try:
887 try:
886 # only pass ui when no srcrepo
888 # only pass ui when no srcrepo
887 destpeer = peer(
889 destpeer = peer(
888 srcrepo or ui,
890 srcrepo or ui,
889 peeropts,
891 peeropts,
890 dest,
892 dest,
891 create=True,
893 create=True,
892 createopts=createopts,
894 createopts=createopts,
893 )
895 )
894 except FileExistsError:
896 except FileExistsError:
895 cleandir = None
897 cleandir = None
896 raise error.Abort(_(b"destination '%s' already exists") % dest)
898 raise error.Abort(_(b"destination '%s' already exists") % dest)
897
899
898 if revs:
900 if revs:
899 if not srcpeer.capable(b'lookup'):
901 if not srcpeer.capable(b'lookup'):
900 raise error.Abort(
902 raise error.Abort(
901 _(
903 _(
902 b"src repository does not support "
904 b"src repository does not support "
903 b"revision lookup and so doesn't "
905 b"revision lookup and so doesn't "
904 b"support clone by revision"
906 b"support clone by revision"
905 )
907 )
906 )
908 )
907
909
908 # TODO this is batchable.
910 # TODO this is batchable.
909 remoterevs = []
911 remoterevs = []
910 for rev in revs:
912 for rev in revs:
911 with srcpeer.commandexecutor() as e:
913 with srcpeer.commandexecutor() as e:
912 remoterevs.append(
914 remoterevs.append(
913 e.callcommand(
915 e.callcommand(
914 b'lookup',
916 b'lookup',
915 {
917 {
916 b'key': rev,
918 b'key': rev,
917 },
919 },
918 ).result()
920 ).result()
919 )
921 )
920 revs = remoterevs
922 revs = remoterevs
921
923
922 checkout = revs[0]
924 checkout = revs[0]
923 else:
925 else:
924 revs = None
926 revs = None
925 local = destpeer.local()
927 local = destpeer.local()
926 if local:
928 if local:
927 if narrow:
929 if narrow:
928 with local.wlock(), local.lock():
930 with local.wlock(), local.lock():
929 local.setnarrowpats(storeincludepats, storeexcludepats)
931 local.setnarrowpats(storeincludepats, storeexcludepats)
930 narrowspec.copytoworkingcopy(local)
932 narrowspec.copytoworkingcopy(local)
931
933
932 u = urlutil.url(abspath)
934 u = urlutil.url(abspath)
933 defaulturl = bytes(u)
935 defaulturl = bytes(u)
934 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
936 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
935 if not stream:
937 if not stream:
936 if pull:
938 if pull:
937 stream = False
939 stream = False
938 else:
940 else:
939 stream = None
941 stream = None
940 # internal config: ui.quietbookmarkmove
942 # internal config: ui.quietbookmarkmove
941 overrides = {(b'ui', b'quietbookmarkmove'): True}
943 overrides = {(b'ui', b'quietbookmarkmove'): True}
942 with local.ui.configoverride(overrides, b'clone'):
944 with local.ui.configoverride(overrides, b'clone'):
943 exchange.pull(
945 exchange.pull(
944 local,
946 local,
945 srcpeer,
947 srcpeer,
946 heads=revs,
948 heads=revs,
947 streamclonerequested=stream,
949 streamclonerequested=stream,
948 includepats=storeincludepats,
950 includepats=storeincludepats,
949 excludepats=storeexcludepats,
951 excludepats=storeexcludepats,
950 depth=depth,
952 depth=depth,
951 )
953 )
952 elif srcrepo:
954 elif srcrepo:
953 # TODO lift restriction once exchange.push() accepts narrow
955 # TODO lift restriction once exchange.push() accepts narrow
954 # push.
956 # push.
955 if narrow:
957 if narrow:
956 raise error.Abort(
958 raise error.Abort(
957 _(
959 _(
958 b'narrow clone not available for '
960 b'narrow clone not available for '
959 b'remote destinations'
961 b'remote destinations'
960 )
962 )
961 )
963 )
962
964
963 exchange.push(
965 exchange.push(
964 srcrepo,
966 srcrepo,
965 destpeer,
967 destpeer,
966 revs=revs,
968 revs=revs,
967 bookmarks=srcrepo._bookmarks.keys(),
969 bookmarks=srcrepo._bookmarks.keys(),
968 )
970 )
969 else:
971 else:
970 raise error.Abort(
972 raise error.Abort(
971 _(b"clone from remote to remote not supported")
973 _(b"clone from remote to remote not supported")
972 )
974 )
973
975
974 cleandir = None
976 cleandir = None
975
977
976 destrepo = destpeer.local()
978 destrepo = destpeer.local()
977 if destrepo:
979 if destrepo:
978 template = uimod.samplehgrcs[b'cloned']
980 template = uimod.samplehgrcs[b'cloned']
979 u = urlutil.url(abspath)
981 u = urlutil.url(abspath)
980 u.passwd = None
982 u.passwd = None
981 defaulturl = bytes(u)
983 defaulturl = bytes(u)
982 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
984 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
983 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
985 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
984
986
985 if ui.configbool(b'experimental', b'remotenames'):
987 if ui.configbool(b'experimental', b'remotenames'):
986 logexchange.pullremotenames(destrepo, srcpeer)
988 logexchange.pullremotenames(destrepo, srcpeer)
987
989
988 if update:
990 if update:
989 if update is not True:
991 if update is not True:
990 with srcpeer.commandexecutor() as e:
992 with srcpeer.commandexecutor() as e:
991 checkout = e.callcommand(
993 checkout = e.callcommand(
992 b'lookup',
994 b'lookup',
993 {
995 {
994 b'key': update,
996 b'key': update,
995 },
997 },
996 ).result()
998 ).result()
997
999
998 uprev = None
1000 uprev = None
999 status = None
1001 status = None
1000 if checkout is not None:
1002 if checkout is not None:
1001 # Some extensions (at least hg-git and hg-subversion) have
1003 # Some extensions (at least hg-git and hg-subversion) have
1002 # a peer.lookup() implementation that returns a name instead
1004 # a peer.lookup() implementation that returns a name instead
1003 # of a nodeid. We work around it here until we've figured
1005 # of a nodeid. We work around it here until we've figured
1004 # out a better solution.
1006 # out a better solution.
1005 if len(checkout) == 20 and checkout in destrepo:
1007 if len(checkout) == 20 and checkout in destrepo:
1006 uprev = checkout
1008 uprev = checkout
1007 elif scmutil.isrevsymbol(destrepo, checkout):
1009 elif scmutil.isrevsymbol(destrepo, checkout):
1008 uprev = scmutil.revsymbol(destrepo, checkout).node()
1010 uprev = scmutil.revsymbol(destrepo, checkout).node()
1009 else:
1011 else:
1010 if update is not True:
1012 if update is not True:
1011 try:
1013 try:
1012 uprev = destrepo.lookup(update)
1014 uprev = destrepo.lookup(update)
1013 except error.RepoLookupError:
1015 except error.RepoLookupError:
1014 pass
1016 pass
1015 if uprev is None:
1017 if uprev is None:
1016 try:
1018 try:
1017 if destrepo._activebookmark:
1019 if destrepo._activebookmark:
1018 uprev = destrepo.lookup(destrepo._activebookmark)
1020 uprev = destrepo.lookup(destrepo._activebookmark)
1019 update = destrepo._activebookmark
1021 update = destrepo._activebookmark
1020 else:
1022 else:
1021 uprev = destrepo._bookmarks[b'@']
1023 uprev = destrepo._bookmarks[b'@']
1022 update = b'@'
1024 update = b'@'
1023 bn = destrepo[uprev].branch()
1025 bn = destrepo[uprev].branch()
1024 if bn == b'default':
1026 if bn == b'default':
1025 status = _(b"updating to bookmark %s\n" % update)
1027 status = _(b"updating to bookmark %s\n" % update)
1026 else:
1028 else:
1027 status = (
1029 status = (
1028 _(b"updating to bookmark %s on branch %s\n")
1030 _(b"updating to bookmark %s on branch %s\n")
1029 ) % (update, bn)
1031 ) % (update, bn)
1030 except KeyError:
1032 except KeyError:
1031 try:
1033 try:
1032 uprev = destrepo.branchtip(b'default')
1034 uprev = destrepo.branchtip(b'default')
1033 except error.RepoLookupError:
1035 except error.RepoLookupError:
1034 uprev = destrepo.lookup(b'tip')
1036 uprev = destrepo.lookup(b'tip')
1035 if not status:
1037 if not status:
1036 bn = destrepo[uprev].branch()
1038 bn = destrepo[uprev].branch()
1037 status = _(b"updating to branch %s\n") % bn
1039 status = _(b"updating to branch %s\n") % bn
1038 destrepo.ui.status(status)
1040 destrepo.ui.status(status)
1039 _update(destrepo, uprev)
1041 _update(destrepo, uprev)
1040 if update in destrepo._bookmarks:
1042 if update in destrepo._bookmarks:
1041 bookmarks.activate(destrepo, update)
1043 bookmarks.activate(destrepo, update)
1042 if destlock is not None:
1044 if destlock is not None:
1043 release(destlock)
1045 release(destlock)
1044 if destwlock is not None:
1046 if destwlock is not None:
1045 release(destlock)
1047 release(destlock)
1046 # here is a tiny windows were someone could end up writing the
1048 # here is a tiny windows were someone could end up writing the
1047 # repository before the cache are sure to be warm. This is "fine"
1049 # repository before the cache are sure to be warm. This is "fine"
1048 # as the only "bad" outcome would be some slowness. That potential
1050 # as the only "bad" outcome would be some slowness. That potential
1049 # slowness already affect reader.
1051 # slowness already affect reader.
1050 with destrepo.lock():
1052 with destrepo.lock():
1051 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1053 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1052 finally:
1054 finally:
1053 release(srclock, destlock, destwlock)
1055 release(srclock, destlock, destwlock)
1054 if cleandir is not None:
1056 if cleandir is not None:
1055 shutil.rmtree(cleandir, True)
1057 shutil.rmtree(cleandir, True)
1056 if srcpeer is not None:
1058 if srcpeer is not None:
1057 srcpeer.close()
1059 srcpeer.close()
1058 if destpeer and destpeer.local() is None:
1060 if destpeer and destpeer.local() is None:
1059 destpeer.close()
1061 destpeer.close()
1060 return srcpeer, destpeer
1062 return srcpeer, destpeer
1061
1063
1062
1064
1063 def _showstats(repo, stats, quietempty=False):
1065 def _showstats(repo, stats, quietempty=False):
1064 if quietempty and stats.isempty():
1066 if quietempty and stats.isempty():
1065 return
1067 return
1066 repo.ui.status(
1068 repo.ui.status(
1067 _(
1069 _(
1068 b"%d files updated, %d files merged, "
1070 b"%d files updated, %d files merged, "
1069 b"%d files removed, %d files unresolved\n"
1071 b"%d files removed, %d files unresolved\n"
1070 )
1072 )
1071 % (
1073 % (
1072 stats.updatedcount,
1074 stats.updatedcount,
1073 stats.mergedcount,
1075 stats.mergedcount,
1074 stats.removedcount,
1076 stats.removedcount,
1075 stats.unresolvedcount,
1077 stats.unresolvedcount,
1076 )
1078 )
1077 )
1079 )
1078
1080
1079
1081
1080 def updaterepo(repo, node, overwrite, updatecheck=None):
1082 def updaterepo(repo, node, overwrite, updatecheck=None):
1081 """Update the working directory to node.
1083 """Update the working directory to node.
1082
1084
1083 When overwrite is set, changes are clobbered, merged else
1085 When overwrite is set, changes are clobbered, merged else
1084
1086
1085 returns stats (see pydoc mercurial.merge.applyupdates)"""
1087 returns stats (see pydoc mercurial.merge.applyupdates)"""
1086 repo.ui.deprecwarn(
1088 repo.ui.deprecwarn(
1087 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1089 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1088 b'5.7',
1090 b'5.7',
1089 )
1091 )
1090 return mergemod._update(
1092 return mergemod._update(
1091 repo,
1093 repo,
1092 node,
1094 node,
1093 branchmerge=False,
1095 branchmerge=False,
1094 force=overwrite,
1096 force=overwrite,
1095 labels=[b'working copy', b'destination'],
1097 labels=[b'working copy', b'destination'],
1096 updatecheck=updatecheck,
1098 updatecheck=updatecheck,
1097 )
1099 )
1098
1100
1099
1101
1100 def update(repo, node, quietempty=False, updatecheck=None):
1102 def update(repo, node, quietempty=False, updatecheck=None):
1101 """update the working directory to node"""
1103 """update the working directory to node"""
1102 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1104 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1103 _showstats(repo, stats, quietempty)
1105 _showstats(repo, stats, quietempty)
1104 if stats.unresolvedcount:
1106 if stats.unresolvedcount:
1105 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1107 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1106 return stats.unresolvedcount > 0
1108 return stats.unresolvedcount > 0
1107
1109
1108
1110
1109 # naming conflict in clone()
1111 # naming conflict in clone()
1110 _update = update
1112 _update = update
1111
1113
1112
1114
1113 def clean(repo, node, show_stats=True, quietempty=False):
1115 def clean(repo, node, show_stats=True, quietempty=False):
1114 """forcibly switch the working directory to node, clobbering changes"""
1116 """forcibly switch the working directory to node, clobbering changes"""
1115 stats = mergemod.clean_update(repo[node])
1117 stats = mergemod.clean_update(repo[node])
1116 assert stats.unresolvedcount == 0
1118 assert stats.unresolvedcount == 0
1117 if show_stats:
1119 if show_stats:
1118 _showstats(repo, stats, quietempty)
1120 _showstats(repo, stats, quietempty)
1119 return False
1121 return False
1120
1122
1121
1123
1122 # naming conflict in updatetotally()
1124 # naming conflict in updatetotally()
1123 _clean = clean
1125 _clean = clean
1124
1126
1125 _VALID_UPDATECHECKS = {
1127 _VALID_UPDATECHECKS = {
1126 mergemod.UPDATECHECK_ABORT,
1128 mergemod.UPDATECHECK_ABORT,
1127 mergemod.UPDATECHECK_NONE,
1129 mergemod.UPDATECHECK_NONE,
1128 mergemod.UPDATECHECK_LINEAR,
1130 mergemod.UPDATECHECK_LINEAR,
1129 mergemod.UPDATECHECK_NO_CONFLICT,
1131 mergemod.UPDATECHECK_NO_CONFLICT,
1130 }
1132 }
1131
1133
1132
1134
1133 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1135 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1134 """Update the working directory with extra care for non-file components
1136 """Update the working directory with extra care for non-file components
1135
1137
1136 This takes care of non-file components below:
1138 This takes care of non-file components below:
1137
1139
1138 :bookmark: might be advanced or (in)activated
1140 :bookmark: might be advanced or (in)activated
1139
1141
1140 This takes arguments below:
1142 This takes arguments below:
1141
1143
1142 :checkout: to which revision the working directory is updated
1144 :checkout: to which revision the working directory is updated
1143 :brev: a name, which might be a bookmark to be activated after updating
1145 :brev: a name, which might be a bookmark to be activated after updating
1144 :clean: whether changes in the working directory can be discarded
1146 :clean: whether changes in the working directory can be discarded
1145 :updatecheck: how to deal with a dirty working directory
1147 :updatecheck: how to deal with a dirty working directory
1146
1148
1147 Valid values for updatecheck are the UPDATECHECK_* constants
1149 Valid values for updatecheck are the UPDATECHECK_* constants
1148 defined in the merge module. Passing `None` will result in using the
1150 defined in the merge module. Passing `None` will result in using the
1149 configured default.
1151 configured default.
1150
1152
1151 * ABORT: abort if the working directory is dirty
1153 * ABORT: abort if the working directory is dirty
1152 * NONE: don't check (merge working directory changes into destination)
1154 * NONE: don't check (merge working directory changes into destination)
1153 * LINEAR: check that update is linear before merging working directory
1155 * LINEAR: check that update is linear before merging working directory
1154 changes into destination
1156 changes into destination
1155 * NO_CONFLICT: check that the update does not result in file merges
1157 * NO_CONFLICT: check that the update does not result in file merges
1156
1158
1157 This returns whether conflict is detected at updating or not.
1159 This returns whether conflict is detected at updating or not.
1158 """
1160 """
1159 if updatecheck is None:
1161 if updatecheck is None:
1160 updatecheck = ui.config(b'commands', b'update.check')
1162 updatecheck = ui.config(b'commands', b'update.check')
1161 if updatecheck not in _VALID_UPDATECHECKS:
1163 if updatecheck not in _VALID_UPDATECHECKS:
1162 # If not configured, or invalid value configured
1164 # If not configured, or invalid value configured
1163 updatecheck = mergemod.UPDATECHECK_LINEAR
1165 updatecheck = mergemod.UPDATECHECK_LINEAR
1164 if updatecheck not in _VALID_UPDATECHECKS:
1166 if updatecheck not in _VALID_UPDATECHECKS:
1165 raise ValueError(
1167 raise ValueError(
1166 r'Invalid updatecheck value %r (can accept %r)'
1168 r'Invalid updatecheck value %r (can accept %r)'
1167 % (updatecheck, _VALID_UPDATECHECKS)
1169 % (updatecheck, _VALID_UPDATECHECKS)
1168 )
1170 )
1169 with repo.wlock():
1171 with repo.wlock():
1170 movemarkfrom = None
1172 movemarkfrom = None
1171 warndest = False
1173 warndest = False
1172 if checkout is None:
1174 if checkout is None:
1173 updata = destutil.destupdate(repo, clean=clean)
1175 updata = destutil.destupdate(repo, clean=clean)
1174 checkout, movemarkfrom, brev = updata
1176 checkout, movemarkfrom, brev = updata
1175 warndest = True
1177 warndest = True
1176
1178
1177 if clean:
1179 if clean:
1178 ret = _clean(repo, checkout)
1180 ret = _clean(repo, checkout)
1179 else:
1181 else:
1180 if updatecheck == mergemod.UPDATECHECK_ABORT:
1182 if updatecheck == mergemod.UPDATECHECK_ABORT:
1181 cmdutil.bailifchanged(repo, merge=False)
1183 cmdutil.bailifchanged(repo, merge=False)
1182 updatecheck = mergemod.UPDATECHECK_NONE
1184 updatecheck = mergemod.UPDATECHECK_NONE
1183 ret = _update(repo, checkout, updatecheck=updatecheck)
1185 ret = _update(repo, checkout, updatecheck=updatecheck)
1184
1186
1185 if not ret and movemarkfrom:
1187 if not ret and movemarkfrom:
1186 if movemarkfrom == repo[b'.'].node():
1188 if movemarkfrom == repo[b'.'].node():
1187 pass # no-op update
1189 pass # no-op update
1188 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1190 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1189 b = ui.label(repo._activebookmark, b'bookmarks.active')
1191 b = ui.label(repo._activebookmark, b'bookmarks.active')
1190 ui.status(_(b"updating bookmark %s\n") % b)
1192 ui.status(_(b"updating bookmark %s\n") % b)
1191 else:
1193 else:
1192 # this can happen with a non-linear update
1194 # this can happen with a non-linear update
1193 b = ui.label(repo._activebookmark, b'bookmarks')
1195 b = ui.label(repo._activebookmark, b'bookmarks')
1194 ui.status(_(b"(leaving bookmark %s)\n") % b)
1196 ui.status(_(b"(leaving bookmark %s)\n") % b)
1195 bookmarks.deactivate(repo)
1197 bookmarks.deactivate(repo)
1196 elif brev in repo._bookmarks:
1198 elif brev in repo._bookmarks:
1197 if brev != repo._activebookmark:
1199 if brev != repo._activebookmark:
1198 b = ui.label(brev, b'bookmarks.active')
1200 b = ui.label(brev, b'bookmarks.active')
1199 ui.status(_(b"(activating bookmark %s)\n") % b)
1201 ui.status(_(b"(activating bookmark %s)\n") % b)
1200 bookmarks.activate(repo, brev)
1202 bookmarks.activate(repo, brev)
1201 elif brev:
1203 elif brev:
1202 if repo._activebookmark:
1204 if repo._activebookmark:
1203 b = ui.label(repo._activebookmark, b'bookmarks')
1205 b = ui.label(repo._activebookmark, b'bookmarks')
1204 ui.status(_(b"(leaving bookmark %s)\n") % b)
1206 ui.status(_(b"(leaving bookmark %s)\n") % b)
1205 bookmarks.deactivate(repo)
1207 bookmarks.deactivate(repo)
1206
1208
1207 if warndest:
1209 if warndest:
1208 destutil.statusotherdests(ui, repo)
1210 destutil.statusotherdests(ui, repo)
1209
1211
1210 return ret
1212 return ret
1211
1213
1212
1214
1213 def merge(
1215 def merge(
1214 ctx,
1216 ctx,
1215 force=False,
1217 force=False,
1216 remind=True,
1218 remind=True,
1217 labels=None,
1219 labels=None,
1218 ):
1220 ):
1219 """Branch merge with node, resolving changes. Return true if any
1221 """Branch merge with node, resolving changes. Return true if any
1220 unresolved conflicts."""
1222 unresolved conflicts."""
1221 repo = ctx.repo()
1223 repo = ctx.repo()
1222 stats = mergemod.merge(ctx, force=force, labels=labels)
1224 stats = mergemod.merge(ctx, force=force, labels=labels)
1223 _showstats(repo, stats)
1225 _showstats(repo, stats)
1224 if stats.unresolvedcount:
1226 if stats.unresolvedcount:
1225 repo.ui.status(
1227 repo.ui.status(
1226 _(
1228 _(
1227 b"use 'hg resolve' to retry unresolved file merges "
1229 b"use 'hg resolve' to retry unresolved file merges "
1228 b"or 'hg merge --abort' to abandon\n"
1230 b"or 'hg merge --abort' to abandon\n"
1229 )
1231 )
1230 )
1232 )
1231 elif remind:
1233 elif remind:
1232 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1234 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1233 return stats.unresolvedcount > 0
1235 return stats.unresolvedcount > 0
1234
1236
1235
1237
1236 def abortmerge(ui, repo):
1238 def abortmerge(ui, repo):
1237 ms = mergestatemod.mergestate.read(repo)
1239 ms = mergestatemod.mergestate.read(repo)
1238 if ms.active():
1240 if ms.active():
1239 # there were conflicts
1241 # there were conflicts
1240 node = ms.localctx.hex()
1242 node = ms.localctx.hex()
1241 else:
1243 else:
1242 # there were no conficts, mergestate was not stored
1244 # there were no conficts, mergestate was not stored
1243 node = repo[b'.'].hex()
1245 node = repo[b'.'].hex()
1244
1246
1245 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1247 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1246 stats = mergemod.clean_update(repo[node])
1248 stats = mergemod.clean_update(repo[node])
1247 assert stats.unresolvedcount == 0
1249 assert stats.unresolvedcount == 0
1248 _showstats(repo, stats)
1250 _showstats(repo, stats)
1249
1251
1250
1252
1251 def _incoming(
1253 def _incoming(
1252 displaychlist,
1254 displaychlist,
1253 subreporecurse,
1255 subreporecurse,
1254 ui,
1256 ui,
1255 repo,
1257 repo,
1256 source,
1258 source,
1257 opts,
1259 opts,
1258 buffered=False,
1260 buffered=False,
1259 subpath=None,
1261 subpath=None,
1260 ):
1262 ):
1261 """
1263 """
1262 Helper for incoming / gincoming.
1264 Helper for incoming / gincoming.
1263 displaychlist gets called with
1265 displaychlist gets called with
1264 (remoterepo, incomingchangesetlist, displayer) parameters,
1266 (remoterepo, incomingchangesetlist, displayer) parameters,
1265 and is supposed to contain only code that can't be unified.
1267 and is supposed to contain only code that can't be unified.
1266 """
1268 """
1267 srcs = urlutil.get_pull_paths(repo, ui, [source])
1269 srcs = urlutil.get_pull_paths(repo, ui, [source])
1268 srcs = list(srcs)
1270 srcs = list(srcs)
1269 if len(srcs) != 1:
1271 if len(srcs) != 1:
1270 msg = _(b'for now, incoming supports only a single source, %d provided')
1272 msg = _(b'for now, incoming supports only a single source, %d provided')
1271 msg %= len(srcs)
1273 msg %= len(srcs)
1272 raise error.Abort(msg)
1274 raise error.Abort(msg)
1273 path = srcs[0]
1275 path = srcs[0]
1274 source, branches = urlutil.parseurl(path.rawloc, opts.get(b'branch'))
1276 source, branches = urlutil.parseurl(path.rawloc, opts.get(b'branch'))
1275 if subpath is not None:
1277 if subpath is not None:
1276 subpath = urlutil.url(subpath)
1278 subpath = urlutil.url(subpath)
1277 if subpath.isabs():
1279 if subpath.isabs():
1278 source = bytes(subpath)
1280 source = bytes(subpath)
1279 else:
1281 else:
1280 p = urlutil.url(source)
1282 p = urlutil.url(source)
1281 if p.islocal():
1283 if p.islocal():
1282 normpath = os.path.normpath
1284 normpath = os.path.normpath
1283 else:
1285 else:
1284 normpath = posixpath.normpath
1286 normpath = posixpath.normpath
1285 p.path = normpath(b'%s/%s' % (p.path, subpath))
1287 p.path = normpath(b'%s/%s' % (p.path, subpath))
1286 source = bytes(p)
1288 source = bytes(p)
1287 other = peer(repo, opts, source)
1289 other = peer(repo, opts, source)
1288 cleanupfn = other.close
1290 cleanupfn = other.close
1289 try:
1291 try:
1290 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source))
1292 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source))
1291 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1293 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1292
1294
1293 if revs:
1295 if revs:
1294 revs = [other.lookup(rev) for rev in revs]
1296 revs = [other.lookup(rev) for rev in revs]
1295 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1297 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1296 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1298 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1297 )
1299 )
1298
1300
1299 if not chlist:
1301 if not chlist:
1300 ui.status(_(b"no changes found\n"))
1302 ui.status(_(b"no changes found\n"))
1301 return subreporecurse()
1303 return subreporecurse()
1302 ui.pager(b'incoming')
1304 ui.pager(b'incoming')
1303 displayer = logcmdutil.changesetdisplayer(
1305 displayer = logcmdutil.changesetdisplayer(
1304 ui, other, opts, buffered=buffered
1306 ui, other, opts, buffered=buffered
1305 )
1307 )
1306 displaychlist(other, chlist, displayer)
1308 displaychlist(other, chlist, displayer)
1307 displayer.close()
1309 displayer.close()
1308 finally:
1310 finally:
1309 cleanupfn()
1311 cleanupfn()
1310 subreporecurse()
1312 subreporecurse()
1311 return 0 # exit code is zero since we found incoming changes
1313 return 0 # exit code is zero since we found incoming changes
1312
1314
1313
1315
1314 def incoming(ui, repo, source, opts, subpath=None):
1316 def incoming(ui, repo, source, opts, subpath=None):
1315 def subreporecurse():
1317 def subreporecurse():
1316 ret = 1
1318 ret = 1
1317 if opts.get(b'subrepos'):
1319 if opts.get(b'subrepos'):
1318 ctx = repo[None]
1320 ctx = repo[None]
1319 for subpath in sorted(ctx.substate):
1321 for subpath in sorted(ctx.substate):
1320 sub = ctx.sub(subpath)
1322 sub = ctx.sub(subpath)
1321 ret = min(ret, sub.incoming(ui, source, opts))
1323 ret = min(ret, sub.incoming(ui, source, opts))
1322 return ret
1324 return ret
1323
1325
1324 def display(other, chlist, displayer):
1326 def display(other, chlist, displayer):
1325 limit = logcmdutil.getlimit(opts)
1327 limit = logcmdutil.getlimit(opts)
1326 if opts.get(b'newest_first'):
1328 if opts.get(b'newest_first'):
1327 chlist.reverse()
1329 chlist.reverse()
1328 count = 0
1330 count = 0
1329 for n in chlist:
1331 for n in chlist:
1330 if limit is not None and count >= limit:
1332 if limit is not None and count >= limit:
1331 break
1333 break
1332 parents = [
1334 parents = [
1333 p for p in other.changelog.parents(n) if p != repo.nullid
1335 p for p in other.changelog.parents(n) if p != repo.nullid
1334 ]
1336 ]
1335 if opts.get(b'no_merges') and len(parents) == 2:
1337 if opts.get(b'no_merges') and len(parents) == 2:
1336 continue
1338 continue
1337 count += 1
1339 count += 1
1338 displayer.show(other[n])
1340 displayer.show(other[n])
1339
1341
1340 return _incoming(
1342 return _incoming(
1341 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1343 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1342 )
1344 )
1343
1345
1344
1346
1345 def _outgoing(ui, repo, dests, opts, subpath=None):
1347 def _outgoing(ui, repo, dests, opts, subpath=None):
1346 out = set()
1348 out = set()
1347 others = []
1349 others = []
1348 for path in urlutil.get_push_paths(repo, ui, dests):
1350 for path in urlutil.get_push_paths(repo, ui, dests):
1349 dest = path.pushloc or path.loc
1351 dest = path.pushloc or path.loc
1350 if subpath is not None:
1352 if subpath is not None:
1351 subpath = urlutil.url(subpath)
1353 subpath = urlutil.url(subpath)
1352 if subpath.isabs():
1354 if subpath.isabs():
1353 dest = bytes(subpath)
1355 dest = bytes(subpath)
1354 else:
1356 else:
1355 p = urlutil.url(dest)
1357 p = urlutil.url(dest)
1356 if p.islocal():
1358 if p.islocal():
1357 normpath = os.path.normpath
1359 normpath = os.path.normpath
1358 else:
1360 else:
1359 normpath = posixpath.normpath
1361 normpath = posixpath.normpath
1360 p.path = normpath(b'%s/%s' % (p.path, subpath))
1362 p.path = normpath(b'%s/%s' % (p.path, subpath))
1361 dest = bytes(p)
1363 dest = bytes(p)
1362 branches = path.branch, opts.get(b'branch') or []
1364 branches = path.branch, opts.get(b'branch') or []
1363
1365
1364 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1366 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1365 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1367 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1366 if revs:
1368 if revs:
1367 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1369 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1368
1370
1369 other = peer(repo, opts, dest)
1371 other = peer(repo, opts, dest)
1370 try:
1372 try:
1371 outgoing = discovery.findcommonoutgoing(
1373 outgoing = discovery.findcommonoutgoing(
1372 repo, other, revs, force=opts.get(b'force')
1374 repo, other, revs, force=opts.get(b'force')
1373 )
1375 )
1374 o = outgoing.missing
1376 o = outgoing.missing
1375 out.update(o)
1377 out.update(o)
1376 if not o:
1378 if not o:
1377 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1379 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1378 others.append(other)
1380 others.append(other)
1379 except: # re-raises
1381 except: # re-raises
1380 other.close()
1382 other.close()
1381 raise
1383 raise
1382 # make sure this is ordered by revision number
1384 # make sure this is ordered by revision number
1383 outgoing_revs = list(out)
1385 outgoing_revs = list(out)
1384 cl = repo.changelog
1386 cl = repo.changelog
1385 outgoing_revs.sort(key=cl.rev)
1387 outgoing_revs.sort(key=cl.rev)
1386 return outgoing_revs, others
1388 return outgoing_revs, others
1387
1389
1388
1390
1389 def _outgoing_recurse(ui, repo, dests, opts):
1391 def _outgoing_recurse(ui, repo, dests, opts):
1390 ret = 1
1392 ret = 1
1391 if opts.get(b'subrepos'):
1393 if opts.get(b'subrepos'):
1392 ctx = repo[None]
1394 ctx = repo[None]
1393 for subpath in sorted(ctx.substate):
1395 for subpath in sorted(ctx.substate):
1394 sub = ctx.sub(subpath)
1396 sub = ctx.sub(subpath)
1395 ret = min(ret, sub.outgoing(ui, dests, opts))
1397 ret = min(ret, sub.outgoing(ui, dests, opts))
1396 return ret
1398 return ret
1397
1399
1398
1400
1399 def _outgoing_filter(repo, revs, opts):
1401 def _outgoing_filter(repo, revs, opts):
1400 """apply revision filtering/ordering option for outgoing"""
1402 """apply revision filtering/ordering option for outgoing"""
1401 limit = logcmdutil.getlimit(opts)
1403 limit = logcmdutil.getlimit(opts)
1402 no_merges = opts.get(b'no_merges')
1404 no_merges = opts.get(b'no_merges')
1403 if opts.get(b'newest_first'):
1405 if opts.get(b'newest_first'):
1404 revs.reverse()
1406 revs.reverse()
1405 if limit is None and not no_merges:
1407 if limit is None and not no_merges:
1406 for r in revs:
1408 for r in revs:
1407 yield r
1409 yield r
1408 return
1410 return
1409
1411
1410 count = 0
1412 count = 0
1411 cl = repo.changelog
1413 cl = repo.changelog
1412 for n in revs:
1414 for n in revs:
1413 if limit is not None and count >= limit:
1415 if limit is not None and count >= limit:
1414 break
1416 break
1415 parents = [p for p in cl.parents(n) if p != repo.nullid]
1417 parents = [p for p in cl.parents(n) if p != repo.nullid]
1416 if no_merges and len(parents) == 2:
1418 if no_merges and len(parents) == 2:
1417 continue
1419 continue
1418 count += 1
1420 count += 1
1419 yield n
1421 yield n
1420
1422
1421
1423
1422 def outgoing(ui, repo, dests, opts, subpath=None):
1424 def outgoing(ui, repo, dests, opts, subpath=None):
1423 if opts.get(b'graph'):
1425 if opts.get(b'graph'):
1424 logcmdutil.checkunsupportedgraphflags([], opts)
1426 logcmdutil.checkunsupportedgraphflags([], opts)
1425 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1427 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1426 ret = 1
1428 ret = 1
1427 try:
1429 try:
1428 if o:
1430 if o:
1429 ret = 0
1431 ret = 0
1430
1432
1431 if opts.get(b'graph'):
1433 if opts.get(b'graph'):
1432 revdag = logcmdutil.graphrevs(repo, o, opts)
1434 revdag = logcmdutil.graphrevs(repo, o, opts)
1433 ui.pager(b'outgoing')
1435 ui.pager(b'outgoing')
1434 displayer = logcmdutil.changesetdisplayer(
1436 displayer = logcmdutil.changesetdisplayer(
1435 ui, repo, opts, buffered=True
1437 ui, repo, opts, buffered=True
1436 )
1438 )
1437 logcmdutil.displaygraph(
1439 logcmdutil.displaygraph(
1438 ui, repo, revdag, displayer, graphmod.asciiedges
1440 ui, repo, revdag, displayer, graphmod.asciiedges
1439 )
1441 )
1440 else:
1442 else:
1441 ui.pager(b'outgoing')
1443 ui.pager(b'outgoing')
1442 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1444 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1443 for n in _outgoing_filter(repo, o, opts):
1445 for n in _outgoing_filter(repo, o, opts):
1444 displayer.show(repo[n])
1446 displayer.show(repo[n])
1445 displayer.close()
1447 displayer.close()
1446 for oth in others:
1448 for oth in others:
1447 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1449 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1448 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1450 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1449 return ret # exit code is zero since we found outgoing changes
1451 return ret # exit code is zero since we found outgoing changes
1450 finally:
1452 finally:
1451 for oth in others:
1453 for oth in others:
1452 oth.close()
1454 oth.close()
1453
1455
1454
1456
1455 def verify(repo, level=None):
1457 def verify(repo, level=None):
1456 """verify the consistency of a repository"""
1458 """verify the consistency of a repository"""
1457 ret = verifymod.verify(repo, level=level)
1459 ret = verifymod.verify(repo, level=level)
1458
1460
1459 # Broken subrepo references in hidden csets don't seem worth worrying about,
1461 # Broken subrepo references in hidden csets don't seem worth worrying about,
1460 # since they can't be pushed/pulled, and --hidden can be used if they are a
1462 # since they can't be pushed/pulled, and --hidden can be used if they are a
1461 # concern.
1463 # concern.
1462
1464
1463 # pathto() is needed for -R case
1465 # pathto() is needed for -R case
1464 revs = repo.revs(
1466 revs = repo.revs(
1465 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1467 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1466 )
1468 )
1467
1469
1468 if revs:
1470 if revs:
1469 repo.ui.status(_(b'checking subrepo links\n'))
1471 repo.ui.status(_(b'checking subrepo links\n'))
1470 for rev in revs:
1472 for rev in revs:
1471 ctx = repo[rev]
1473 ctx = repo[rev]
1472 try:
1474 try:
1473 for subpath in ctx.substate:
1475 for subpath in ctx.substate:
1474 try:
1476 try:
1475 ret = (
1477 ret = (
1476 ctx.sub(subpath, allowcreate=False).verify() or ret
1478 ctx.sub(subpath, allowcreate=False).verify() or ret
1477 )
1479 )
1478 except error.RepoError as e:
1480 except error.RepoError as e:
1479 repo.ui.warn(b'%d: %s\n' % (rev, e))
1481 repo.ui.warn(b'%d: %s\n' % (rev, e))
1480 except Exception:
1482 except Exception:
1481 repo.ui.warn(
1483 repo.ui.warn(
1482 _(b'.hgsubstate is corrupt in revision %s\n')
1484 _(b'.hgsubstate is corrupt in revision %s\n')
1483 % short(ctx.node())
1485 % short(ctx.node())
1484 )
1486 )
1485
1487
1486 return ret
1488 return ret
1487
1489
1488
1490
1489 def remoteui(src, opts):
1491 def remoteui(src, opts):
1490 """build a remote ui from ui or repo and opts"""
1492 """build a remote ui from ui or repo and opts"""
1491 if util.safehasattr(src, b'baseui'): # looks like a repository
1493 if util.safehasattr(src, b'baseui'): # looks like a repository
1492 dst = src.baseui.copy() # drop repo-specific config
1494 dst = src.baseui.copy() # drop repo-specific config
1493 src = src.ui # copy target options from repo
1495 src = src.ui # copy target options from repo
1494 else: # assume it's a global ui object
1496 else: # assume it's a global ui object
1495 dst = src.copy() # keep all global options
1497 dst = src.copy() # keep all global options
1496
1498
1497 # copy ssh-specific options
1499 # copy ssh-specific options
1498 for o in b'ssh', b'remotecmd':
1500 for o in b'ssh', b'remotecmd':
1499 v = opts.get(o) or src.config(b'ui', o)
1501 v = opts.get(o) or src.config(b'ui', o)
1500 if v:
1502 if v:
1501 dst.setconfig(b"ui", o, v, b'copied')
1503 dst.setconfig(b"ui", o, v, b'copied')
1502
1504
1503 # copy bundle-specific options
1505 # copy bundle-specific options
1504 r = src.config(b'bundle', b'mainreporoot')
1506 r = src.config(b'bundle', b'mainreporoot')
1505 if r:
1507 if r:
1506 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1508 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1507
1509
1508 # copy selected local settings to the remote ui
1510 # copy selected local settings to the remote ui
1509 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1511 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1510 for key, val in src.configitems(sect):
1512 for key, val in src.configitems(sect):
1511 dst.setconfig(sect, key, val, b'copied')
1513 dst.setconfig(sect, key, val, b'copied')
1512 v = src.config(b'web', b'cacerts')
1514 v = src.config(b'web', b'cacerts')
1513 if v:
1515 if v:
1514 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1516 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1515
1517
1516 return dst
1518 return dst
1517
1519
1518
1520
1519 # Files of interest
1521 # Files of interest
1520 # Used to check if the repository has changed looking at mtime and size of
1522 # Used to check if the repository has changed looking at mtime and size of
1521 # these files.
1523 # these files.
1522 foi = [
1524 foi = [
1523 (b'spath', b'00changelog.i'),
1525 (b'spath', b'00changelog.i'),
1524 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1526 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1525 (b'spath', b'obsstore'),
1527 (b'spath', b'obsstore'),
1526 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1528 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1527 ]
1529 ]
1528
1530
1529
1531
1530 class cachedlocalrepo:
1532 class cachedlocalrepo:
1531 """Holds a localrepository that can be cached and reused."""
1533 """Holds a localrepository that can be cached and reused."""
1532
1534
1533 def __init__(self, repo):
1535 def __init__(self, repo):
1534 """Create a new cached repo from an existing repo.
1536 """Create a new cached repo from an existing repo.
1535
1537
1536 We assume the passed in repo was recently created. If the
1538 We assume the passed in repo was recently created. If the
1537 repo has changed between when it was created and when it was
1539 repo has changed between when it was created and when it was
1538 turned into a cache, it may not refresh properly.
1540 turned into a cache, it may not refresh properly.
1539 """
1541 """
1540 assert isinstance(repo, localrepo.localrepository)
1542 assert isinstance(repo, localrepo.localrepository)
1541 self._repo = repo
1543 self._repo = repo
1542 self._state, self.mtime = self._repostate()
1544 self._state, self.mtime = self._repostate()
1543 self._filtername = repo.filtername
1545 self._filtername = repo.filtername
1544
1546
1545 def fetch(self):
1547 def fetch(self):
1546 """Refresh (if necessary) and return a repository.
1548 """Refresh (if necessary) and return a repository.
1547
1549
1548 If the cached instance is out of date, it will be recreated
1550 If the cached instance is out of date, it will be recreated
1549 automatically and returned.
1551 automatically and returned.
1550
1552
1551 Returns a tuple of the repo and a boolean indicating whether a new
1553 Returns a tuple of the repo and a boolean indicating whether a new
1552 repo instance was created.
1554 repo instance was created.
1553 """
1555 """
1554 # We compare the mtimes and sizes of some well-known files to
1556 # We compare the mtimes and sizes of some well-known files to
1555 # determine if the repo changed. This is not precise, as mtimes
1557 # determine if the repo changed. This is not precise, as mtimes
1556 # are susceptible to clock skew and imprecise filesystems and
1558 # are susceptible to clock skew and imprecise filesystems and
1557 # file content can change while maintaining the same size.
1559 # file content can change while maintaining the same size.
1558
1560
1559 state, mtime = self._repostate()
1561 state, mtime = self._repostate()
1560 if state == self._state:
1562 if state == self._state:
1561 return self._repo, False
1563 return self._repo, False
1562
1564
1563 repo = repository(self._repo.baseui, self._repo.url())
1565 repo = repository(self._repo.baseui, self._repo.url())
1564 if self._filtername:
1566 if self._filtername:
1565 self._repo = repo.filtered(self._filtername)
1567 self._repo = repo.filtered(self._filtername)
1566 else:
1568 else:
1567 self._repo = repo.unfiltered()
1569 self._repo = repo.unfiltered()
1568 self._state = state
1570 self._state = state
1569 self.mtime = mtime
1571 self.mtime = mtime
1570
1572
1571 return self._repo, True
1573 return self._repo, True
1572
1574
1573 def _repostate(self):
1575 def _repostate(self):
1574 state = []
1576 state = []
1575 maxmtime = -1
1577 maxmtime = -1
1576 for attr, fname in foi:
1578 for attr, fname in foi:
1577 prefix = getattr(self._repo, attr)
1579 prefix = getattr(self._repo, attr)
1578 p = os.path.join(prefix, fname)
1580 p = os.path.join(prefix, fname)
1579 try:
1581 try:
1580 st = os.stat(p)
1582 st = os.stat(p)
1581 except OSError:
1583 except OSError:
1582 st = os.stat(prefix)
1584 st = os.stat(prefix)
1583 state.append((st[stat.ST_MTIME], st.st_size))
1585 state.append((st[stat.ST_MTIME], st.st_size))
1584 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1586 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1585
1587
1586 return tuple(state), maxmtime
1588 return tuple(state), maxmtime
1587
1589
1588 def copy(self):
1590 def copy(self):
1589 """Obtain a copy of this class instance.
1591 """Obtain a copy of this class instance.
1590
1592
1591 A new localrepository instance is obtained. The new instance should be
1593 A new localrepository instance is obtained. The new instance should be
1592 completely independent of the original.
1594 completely independent of the original.
1593 """
1595 """
1594 repo = repository(self._repo.baseui, self._repo.origroot)
1596 repo = repository(self._repo.baseui, self._repo.origroot)
1595 if self._filtername:
1597 if self._filtername:
1596 repo = repo.filtered(self._filtername)
1598 repo = repo.filtered(self._filtername)
1597 else:
1599 else:
1598 repo = repo.unfiltered()
1600 repo = repo.unfiltered()
1599 c = cachedlocalrepo(repo)
1601 c = cachedlocalrepo(repo)
1600 c._state = self._state
1602 c._state = self._state
1601 c.mtime = self.mtime
1603 c.mtime = self.mtime
1602 return c
1604 return c
General Comments 0
You need to be logged in to leave comments. Login now