##// END OF EJS Templates
hg: show the correct message when cloning an LFS repo with extension disabled...
Matt Harbison -
r50670:7d6c8943 stable
parent child Browse files
Show More
@@ -1,1602 +1,1602 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9
9
10 import os
10 import os
11 import posixpath
11 import posixpath
12 import shutil
12 import shutil
13 import stat
13 import stat
14 import weakref
14 import weakref
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import (
17 from .node import (
18 hex,
18 hex,
19 sha1nodeconstants,
19 sha1nodeconstants,
20 short,
20 short,
21 )
21 )
22 from .pycompat import getattr
22 from .pycompat import getattr
23
23
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 bundlerepo,
26 bundlerepo,
27 cmdutil,
27 cmdutil,
28 destutil,
28 destutil,
29 discovery,
29 discovery,
30 error,
30 error,
31 exchange,
31 exchange,
32 extensions,
32 extensions,
33 graphmod,
33 graphmod,
34 httppeer,
34 httppeer,
35 localrepo,
35 localrepo,
36 lock,
36 lock,
37 logcmdutil,
37 logcmdutil,
38 logexchange,
38 logexchange,
39 merge as mergemod,
39 merge as mergemod,
40 mergestate as mergestatemod,
40 mergestate as mergestatemod,
41 narrowspec,
41 narrowspec,
42 phases,
42 phases,
43 requirements,
43 requirements,
44 scmutil,
44 scmutil,
45 sshpeer,
45 sshpeer,
46 statichttprepo,
46 statichttprepo,
47 ui as uimod,
47 ui as uimod,
48 unionrepo,
48 unionrepo,
49 url,
49 url,
50 util,
50 util,
51 verify as verifymod,
51 verify as verifymod,
52 vfs as vfsmod,
52 vfs as vfsmod,
53 )
53 )
54 from .interfaces import repository as repositorymod
54 from .interfaces import repository as repositorymod
55 from .utils import (
55 from .utils import (
56 hashutil,
56 hashutil,
57 stringutil,
57 stringutil,
58 urlutil,
58 urlutil,
59 )
59 )
60
60
61
61
62 release = lock.release
62 release = lock.release
63
63
64 # shared features
64 # shared features
65 sharedbookmarks = b'bookmarks'
65 sharedbookmarks = b'bookmarks'
66
66
67
67
68 def _local(path):
68 def _local(path):
69 path = util.expandpath(urlutil.urllocalpath(path))
69 path = util.expandpath(urlutil.urllocalpath(path))
70
70
71 try:
71 try:
72 # we use os.stat() directly here instead of os.path.isfile()
72 # we use os.stat() directly here instead of os.path.isfile()
73 # because the latter started returning `False` on invalid path
73 # because the latter started returning `False` on invalid path
74 # exceptions starting in 3.8 and we care about handling
74 # exceptions starting in 3.8 and we care about handling
75 # invalid paths specially here.
75 # invalid paths specially here.
76 st = os.stat(path)
76 st = os.stat(path)
77 isfile = stat.S_ISREG(st.st_mode)
77 isfile = stat.S_ISREG(st.st_mode)
78 except ValueError as e:
78 except ValueError as e:
79 raise error.Abort(
79 raise error.Abort(
80 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
80 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
81 )
81 )
82 except OSError:
82 except OSError:
83 isfile = False
83 isfile = False
84
84
85 return isfile and bundlerepo or localrepo
85 return isfile and bundlerepo or localrepo
86
86
87
87
88 def addbranchrevs(lrepo, other, branches, revs):
88 def addbranchrevs(lrepo, other, branches, revs):
89 peer = other.peer() # a courtesy to callers using a localrepo for other
89 peer = other.peer() # a courtesy to callers using a localrepo for other
90 hashbranch, branches = branches
90 hashbranch, branches = branches
91 if not hashbranch and not branches:
91 if not hashbranch and not branches:
92 x = revs or None
92 x = revs or None
93 if revs:
93 if revs:
94 y = revs[0]
94 y = revs[0]
95 else:
95 else:
96 y = None
96 y = None
97 return x, y
97 return x, y
98 if revs:
98 if revs:
99 revs = list(revs)
99 revs = list(revs)
100 else:
100 else:
101 revs = []
101 revs = []
102
102
103 if not peer.capable(b'branchmap'):
103 if not peer.capable(b'branchmap'):
104 if branches:
104 if branches:
105 raise error.Abort(_(b"remote branch lookup not supported"))
105 raise error.Abort(_(b"remote branch lookup not supported"))
106 revs.append(hashbranch)
106 revs.append(hashbranch)
107 return revs, revs[0]
107 return revs, revs[0]
108
108
109 with peer.commandexecutor() as e:
109 with peer.commandexecutor() as e:
110 branchmap = e.callcommand(b'branchmap', {}).result()
110 branchmap = e.callcommand(b'branchmap', {}).result()
111
111
112 def primary(branch):
112 def primary(branch):
113 if branch == b'.':
113 if branch == b'.':
114 if not lrepo:
114 if not lrepo:
115 raise error.Abort(_(b"dirstate branch not accessible"))
115 raise error.Abort(_(b"dirstate branch not accessible"))
116 branch = lrepo.dirstate.branch()
116 branch = lrepo.dirstate.branch()
117 if branch in branchmap:
117 if branch in branchmap:
118 revs.extend(hex(r) for r in reversed(branchmap[branch]))
118 revs.extend(hex(r) for r in reversed(branchmap[branch]))
119 return True
119 return True
120 else:
120 else:
121 return False
121 return False
122
122
123 for branch in branches:
123 for branch in branches:
124 if not primary(branch):
124 if not primary(branch):
125 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
125 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
126 if hashbranch:
126 if hashbranch:
127 if not primary(hashbranch):
127 if not primary(hashbranch):
128 revs.append(hashbranch)
128 revs.append(hashbranch)
129 return revs, revs[0]
129 return revs, revs[0]
130
130
131
131
132 schemes = {
132 schemes = {
133 b'bundle': bundlerepo,
133 b'bundle': bundlerepo,
134 b'union': unionrepo,
134 b'union': unionrepo,
135 b'file': _local,
135 b'file': _local,
136 b'http': httppeer,
136 b'http': httppeer,
137 b'https': httppeer,
137 b'https': httppeer,
138 b'ssh': sshpeer,
138 b'ssh': sshpeer,
139 b'static-http': statichttprepo,
139 b'static-http': statichttprepo,
140 }
140 }
141
141
142
142
143 def _peerlookup(path):
143 def _peerlookup(path):
144 u = urlutil.url(path)
144 u = urlutil.url(path)
145 scheme = u.scheme or b'file'
145 scheme = u.scheme or b'file'
146 thing = schemes.get(scheme) or schemes[b'file']
146 thing = schemes.get(scheme) or schemes[b'file']
147 try:
147 try:
148 return thing(path)
148 return thing(path)
149 except TypeError:
149 except TypeError:
150 # we can't test callable(thing) because 'thing' can be an unloaded
150 # we can't test callable(thing) because 'thing' can be an unloaded
151 # module that implements __call__
151 # module that implements __call__
152 if not util.safehasattr(thing, b'instance'):
152 if not util.safehasattr(thing, b'instance'):
153 raise
153 raise
154 return thing
154 return thing
155
155
156
156
157 def islocal(repo):
157 def islocal(repo):
158 '''return true if repo (or path pointing to repo) is local'''
158 '''return true if repo (or path pointing to repo) is local'''
159 if isinstance(repo, bytes):
159 if isinstance(repo, bytes):
160 try:
160 try:
161 return _peerlookup(repo).islocal(repo)
161 return _peerlookup(repo).islocal(repo)
162 except AttributeError:
162 except AttributeError:
163 return False
163 return False
164 return repo.local()
164 return repo.local()
165
165
166
166
167 def openpath(ui, path, sendaccept=True):
167 def openpath(ui, path, sendaccept=True):
168 '''open path with open if local, url.open if remote'''
168 '''open path with open if local, url.open if remote'''
169 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
169 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
170 if pathurl.islocal():
170 if pathurl.islocal():
171 return util.posixfile(pathurl.localpath(), b'rb')
171 return util.posixfile(pathurl.localpath(), b'rb')
172 else:
172 else:
173 return url.open(ui, path, sendaccept=sendaccept)
173 return url.open(ui, path, sendaccept=sendaccept)
174
174
175
175
176 # a list of (ui, repo) functions called for wire peer initialization
176 # a list of (ui, repo) functions called for wire peer initialization
177 wirepeersetupfuncs = []
177 wirepeersetupfuncs = []
178
178
179
179
180 def _peerorrepo(
180 def _peerorrepo(
181 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
181 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
182 ):
182 ):
183 """return a repository object for the specified path"""
183 """return a repository object for the specified path"""
184 obj = _peerlookup(path).instance(
184 obj = _peerlookup(path).instance(
185 ui, path, create, intents=intents, createopts=createopts
185 ui, path, create, intents=intents, createopts=createopts
186 )
186 )
187 ui = getattr(obj, "ui", ui)
187 ui = getattr(obj, "ui", ui)
188 for f in presetupfuncs or []:
188 for f in presetupfuncs or []:
189 f(ui, obj)
189 f(ui, obj)
190 ui.log(b'extension', b'- executing reposetup hooks\n')
190 ui.log(b'extension', b'- executing reposetup hooks\n')
191 with util.timedcm('all reposetup') as allreposetupstats:
191 with util.timedcm('all reposetup') as allreposetupstats:
192 for name, module in extensions.extensions(ui):
192 for name, module in extensions.extensions(ui):
193 ui.log(b'extension', b' - running reposetup for %s\n', name)
193 ui.log(b'extension', b' - running reposetup for %s\n', name)
194 hook = getattr(module, 'reposetup', None)
194 hook = getattr(module, 'reposetup', None)
195 if hook:
195 if hook:
196 with util.timedcm('reposetup %r', name) as stats:
196 with util.timedcm('reposetup %r', name) as stats:
197 hook(ui, obj)
197 hook(ui, obj)
198 ui.log(
198 ui.log(
199 b'extension', b' > reposetup for %s took %s\n', name, stats
199 b'extension', b' > reposetup for %s took %s\n', name, stats
200 )
200 )
201 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
201 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
202 if not obj.local():
202 if not obj.local():
203 for f in wirepeersetupfuncs:
203 for f in wirepeersetupfuncs:
204 f(ui, obj)
204 f(ui, obj)
205 return obj
205 return obj
206
206
207
207
208 def repository(
208 def repository(
209 ui,
209 ui,
210 path=b'',
210 path=b'',
211 create=False,
211 create=False,
212 presetupfuncs=None,
212 presetupfuncs=None,
213 intents=None,
213 intents=None,
214 createopts=None,
214 createopts=None,
215 ):
215 ):
216 """return a repository object for the specified path"""
216 """return a repository object for the specified path"""
217 peer = _peerorrepo(
217 peer = _peerorrepo(
218 ui,
218 ui,
219 path,
219 path,
220 create,
220 create,
221 presetupfuncs=presetupfuncs,
221 presetupfuncs=presetupfuncs,
222 intents=intents,
222 intents=intents,
223 createopts=createopts,
223 createopts=createopts,
224 )
224 )
225 repo = peer.local()
225 repo = peer.local()
226 if not repo:
226 if not repo:
227 raise error.Abort(
227 raise error.Abort(
228 _(b"repository '%s' is not local") % (path or peer.url())
228 _(b"repository '%s' is not local") % (path or peer.url())
229 )
229 )
230 return repo.filtered(b'visible')
230 return repo.filtered(b'visible')
231
231
232
232
233 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
233 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
234 '''return a repository peer for the specified path'''
234 '''return a repository peer for the specified path'''
235 rui = remoteui(uiorrepo, opts)
235 rui = remoteui(uiorrepo, opts)
236 return _peerorrepo(
236 return _peerorrepo(
237 rui, path, create, intents=intents, createopts=createopts
237 rui, path, create, intents=intents, createopts=createopts
238 ).peer()
238 ).peer()
239
239
240
240
241 def defaultdest(source):
241 def defaultdest(source):
242 """return default destination of clone if none is given
242 """return default destination of clone if none is given
243
243
244 >>> defaultdest(b'foo')
244 >>> defaultdest(b'foo')
245 'foo'
245 'foo'
246 >>> defaultdest(b'/foo/bar')
246 >>> defaultdest(b'/foo/bar')
247 'bar'
247 'bar'
248 >>> defaultdest(b'/')
248 >>> defaultdest(b'/')
249 ''
249 ''
250 >>> defaultdest(b'')
250 >>> defaultdest(b'')
251 ''
251 ''
252 >>> defaultdest(b'http://example.org/')
252 >>> defaultdest(b'http://example.org/')
253 ''
253 ''
254 >>> defaultdest(b'http://example.org/foo/')
254 >>> defaultdest(b'http://example.org/foo/')
255 'foo'
255 'foo'
256 """
256 """
257 path = urlutil.url(source).path
257 path = urlutil.url(source).path
258 if not path:
258 if not path:
259 return b''
259 return b''
260 return os.path.basename(os.path.normpath(path))
260 return os.path.basename(os.path.normpath(path))
261
261
262
262
263 def sharedreposource(repo):
263 def sharedreposource(repo):
264 """Returns repository object for source repository of a shared repo.
264 """Returns repository object for source repository of a shared repo.
265
265
266 If repo is not a shared repository, returns None.
266 If repo is not a shared repository, returns None.
267 """
267 """
268 if repo.sharedpath == repo.path:
268 if repo.sharedpath == repo.path:
269 return None
269 return None
270
270
271 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
271 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
272 return repo.srcrepo
272 return repo.srcrepo
273
273
274 # the sharedpath always ends in the .hg; we want the path to the repo
274 # the sharedpath always ends in the .hg; we want the path to the repo
275 source = repo.vfs.split(repo.sharedpath)[0]
275 source = repo.vfs.split(repo.sharedpath)[0]
276 srcurl, branches = urlutil.parseurl(source)
276 srcurl, branches = urlutil.parseurl(source)
277 srcrepo = repository(repo.ui, srcurl)
277 srcrepo = repository(repo.ui, srcurl)
278 repo.srcrepo = srcrepo
278 repo.srcrepo = srcrepo
279 return srcrepo
279 return srcrepo
280
280
281
281
282 def share(
282 def share(
283 ui,
283 ui,
284 source,
284 source,
285 dest=None,
285 dest=None,
286 update=True,
286 update=True,
287 bookmarks=True,
287 bookmarks=True,
288 defaultpath=None,
288 defaultpath=None,
289 relative=False,
289 relative=False,
290 ):
290 ):
291 '''create a shared repository'''
291 '''create a shared repository'''
292
292
293 if not islocal(source):
293 if not islocal(source):
294 raise error.Abort(_(b'can only share local repositories'))
294 raise error.Abort(_(b'can only share local repositories'))
295
295
296 if not dest:
296 if not dest:
297 dest = defaultdest(source)
297 dest = defaultdest(source)
298 else:
298 else:
299 dest = urlutil.get_clone_path(ui, dest)[1]
299 dest = urlutil.get_clone_path(ui, dest)[1]
300
300
301 if isinstance(source, bytes):
301 if isinstance(source, bytes):
302 origsource, source, branches = urlutil.get_clone_path(ui, source)
302 origsource, source, branches = urlutil.get_clone_path(ui, source)
303 srcrepo = repository(ui, source)
303 srcrepo = repository(ui, source)
304 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
304 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
305 else:
305 else:
306 srcrepo = source.local()
306 srcrepo = source.local()
307 checkout = None
307 checkout = None
308
308
309 shareditems = set()
309 shareditems = set()
310 if bookmarks:
310 if bookmarks:
311 shareditems.add(sharedbookmarks)
311 shareditems.add(sharedbookmarks)
312
312
313 r = repository(
313 r = repository(
314 ui,
314 ui,
315 dest,
315 dest,
316 create=True,
316 create=True,
317 createopts={
317 createopts={
318 b'sharedrepo': srcrepo,
318 b'sharedrepo': srcrepo,
319 b'sharedrelative': relative,
319 b'sharedrelative': relative,
320 b'shareditems': shareditems,
320 b'shareditems': shareditems,
321 },
321 },
322 )
322 )
323
323
324 postshare(srcrepo, r, defaultpath=defaultpath)
324 postshare(srcrepo, r, defaultpath=defaultpath)
325 r = repository(ui, dest)
325 r = repository(ui, dest)
326 _postshareupdate(r, update, checkout=checkout)
326 _postshareupdate(r, update, checkout=checkout)
327 return r
327 return r
328
328
329
329
330 def _prependsourcehgrc(repo):
330 def _prependsourcehgrc(repo):
331 """copies the source repo config and prepend it in current repo .hg/hgrc
331 """copies the source repo config and prepend it in current repo .hg/hgrc
332 on unshare. This is only done if the share was perfomed using share safe
332 on unshare. This is only done if the share was perfomed using share safe
333 method where we share config of source in shares"""
333 method where we share config of source in shares"""
334 srcvfs = vfsmod.vfs(repo.sharedpath)
334 srcvfs = vfsmod.vfs(repo.sharedpath)
335 dstvfs = vfsmod.vfs(repo.path)
335 dstvfs = vfsmod.vfs(repo.path)
336
336
337 if not srcvfs.exists(b'hgrc'):
337 if not srcvfs.exists(b'hgrc'):
338 return
338 return
339
339
340 currentconfig = b''
340 currentconfig = b''
341 if dstvfs.exists(b'hgrc'):
341 if dstvfs.exists(b'hgrc'):
342 currentconfig = dstvfs.read(b'hgrc')
342 currentconfig = dstvfs.read(b'hgrc')
343
343
344 with dstvfs(b'hgrc', b'wb') as fp:
344 with dstvfs(b'hgrc', b'wb') as fp:
345 sourceconfig = srcvfs.read(b'hgrc')
345 sourceconfig = srcvfs.read(b'hgrc')
346 fp.write(b"# Config copied from shared source\n")
346 fp.write(b"# Config copied from shared source\n")
347 fp.write(sourceconfig)
347 fp.write(sourceconfig)
348 fp.write(b'\n')
348 fp.write(b'\n')
349 fp.write(currentconfig)
349 fp.write(currentconfig)
350
350
351
351
352 def unshare(ui, repo):
352 def unshare(ui, repo):
353 """convert a shared repository to a normal one
353 """convert a shared repository to a normal one
354
354
355 Copy the store data to the repo and remove the sharedpath data.
355 Copy the store data to the repo and remove the sharedpath data.
356
356
357 Returns a new repository object representing the unshared repository.
357 Returns a new repository object representing the unshared repository.
358
358
359 The passed repository object is not usable after this function is
359 The passed repository object is not usable after this function is
360 called.
360 called.
361 """
361 """
362
362
363 with repo.lock():
363 with repo.lock():
364 # we use locks here because if we race with commit, we
364 # we use locks here because if we race with commit, we
365 # can end up with extra data in the cloned revlogs that's
365 # can end up with extra data in the cloned revlogs that's
366 # not pointed to by changesets, thus causing verify to
366 # not pointed to by changesets, thus causing verify to
367 # fail
367 # fail
368 destlock = copystore(ui, repo, repo.path)
368 destlock = copystore(ui, repo, repo.path)
369 with destlock or util.nullcontextmanager():
369 with destlock or util.nullcontextmanager():
370 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
370 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
371 # we were sharing .hg/hgrc of the share source with the current
371 # we were sharing .hg/hgrc of the share source with the current
372 # repo. We need to copy that while unsharing otherwise it can
372 # repo. We need to copy that while unsharing otherwise it can
373 # disable hooks and other checks
373 # disable hooks and other checks
374 _prependsourcehgrc(repo)
374 _prependsourcehgrc(repo)
375
375
376 sharefile = repo.vfs.join(b'sharedpath')
376 sharefile = repo.vfs.join(b'sharedpath')
377 util.rename(sharefile, sharefile + b'.old')
377 util.rename(sharefile, sharefile + b'.old')
378
378
379 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
379 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
380 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
380 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
381 scmutil.writereporequirements(repo)
381 scmutil.writereporequirements(repo)
382
382
383 # Removing share changes some fundamental properties of the repo instance.
383 # Removing share changes some fundamental properties of the repo instance.
384 # So we instantiate a new repo object and operate on it rather than
384 # So we instantiate a new repo object and operate on it rather than
385 # try to keep the existing repo usable.
385 # try to keep the existing repo usable.
386 newrepo = repository(repo.baseui, repo.root, create=False)
386 newrepo = repository(repo.baseui, repo.root, create=False)
387
387
388 # TODO: figure out how to access subrepos that exist, but were previously
388 # TODO: figure out how to access subrepos that exist, but were previously
389 # removed from .hgsub
389 # removed from .hgsub
390 c = newrepo[b'.']
390 c = newrepo[b'.']
391 subs = c.substate
391 subs = c.substate
392 for s in sorted(subs):
392 for s in sorted(subs):
393 c.sub(s).unshare()
393 c.sub(s).unshare()
394
394
395 localrepo.poisonrepository(repo)
395 localrepo.poisonrepository(repo)
396
396
397 return newrepo
397 return newrepo
398
398
399
399
400 def postshare(sourcerepo, destrepo, defaultpath=None):
400 def postshare(sourcerepo, destrepo, defaultpath=None):
401 """Called after a new shared repo is created.
401 """Called after a new shared repo is created.
402
402
403 The new repo only has a requirements file and pointer to the source.
403 The new repo only has a requirements file and pointer to the source.
404 This function configures additional shared data.
404 This function configures additional shared data.
405
405
406 Extensions can wrap this function and write additional entries to
406 Extensions can wrap this function and write additional entries to
407 destrepo/.hg/shared to indicate additional pieces of data to be shared.
407 destrepo/.hg/shared to indicate additional pieces of data to be shared.
408 """
408 """
409 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
409 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
410 if default:
410 if default:
411 template = b'[paths]\ndefault = %s\n'
411 template = b'[paths]\ndefault = %s\n'
412 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
412 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
413 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
413 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
414 with destrepo.wlock():
414 with destrepo.wlock():
415 narrowspec.copytoworkingcopy(destrepo)
415 narrowspec.copytoworkingcopy(destrepo)
416
416
417
417
418 def _postshareupdate(repo, update, checkout=None):
418 def _postshareupdate(repo, update, checkout=None):
419 """Maybe perform a working directory update after a shared repo is created.
419 """Maybe perform a working directory update after a shared repo is created.
420
420
421 ``update`` can be a boolean or a revision to update to.
421 ``update`` can be a boolean or a revision to update to.
422 """
422 """
423 if not update:
423 if not update:
424 return
424 return
425
425
426 repo.ui.status(_(b"updating working directory\n"))
426 repo.ui.status(_(b"updating working directory\n"))
427 if update is not True:
427 if update is not True:
428 checkout = update
428 checkout = update
429 for test in (checkout, b'default', b'tip'):
429 for test in (checkout, b'default', b'tip'):
430 if test is None:
430 if test is None:
431 continue
431 continue
432 try:
432 try:
433 uprev = repo.lookup(test)
433 uprev = repo.lookup(test)
434 break
434 break
435 except error.RepoLookupError:
435 except error.RepoLookupError:
436 continue
436 continue
437 _update(repo, uprev)
437 _update(repo, uprev)
438
438
439
439
440 def copystore(ui, srcrepo, destpath):
440 def copystore(ui, srcrepo, destpath):
441 """copy files from store of srcrepo in destpath
441 """copy files from store of srcrepo in destpath
442
442
443 returns destlock
443 returns destlock
444 """
444 """
445 destlock = None
445 destlock = None
446 try:
446 try:
447 hardlink = None
447 hardlink = None
448 topic = _(b'linking') if hardlink else _(b'copying')
448 topic = _(b'linking') if hardlink else _(b'copying')
449 with ui.makeprogress(topic, unit=_(b'files')) as progress:
449 with ui.makeprogress(topic, unit=_(b'files')) as progress:
450 num = 0
450 num = 0
451 srcpublishing = srcrepo.publishing()
451 srcpublishing = srcrepo.publishing()
452 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
452 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
453 dstvfs = vfsmod.vfs(destpath)
453 dstvfs = vfsmod.vfs(destpath)
454 for f in srcrepo.store.copylist():
454 for f in srcrepo.store.copylist():
455 if srcpublishing and f.endswith(b'phaseroots'):
455 if srcpublishing and f.endswith(b'phaseroots'):
456 continue
456 continue
457 dstbase = os.path.dirname(f)
457 dstbase = os.path.dirname(f)
458 if dstbase and not dstvfs.exists(dstbase):
458 if dstbase and not dstvfs.exists(dstbase):
459 dstvfs.mkdir(dstbase)
459 dstvfs.mkdir(dstbase)
460 if srcvfs.exists(f):
460 if srcvfs.exists(f):
461 if f.endswith(b'data'):
461 if f.endswith(b'data'):
462 # 'dstbase' may be empty (e.g. revlog format 0)
462 # 'dstbase' may be empty (e.g. revlog format 0)
463 lockfile = os.path.join(dstbase, b"lock")
463 lockfile = os.path.join(dstbase, b"lock")
464 # lock to avoid premature writing to the target
464 # lock to avoid premature writing to the target
465 destlock = lock.lock(dstvfs, lockfile)
465 destlock = lock.lock(dstvfs, lockfile)
466 hardlink, n = util.copyfiles(
466 hardlink, n = util.copyfiles(
467 srcvfs.join(f), dstvfs.join(f), hardlink, progress
467 srcvfs.join(f), dstvfs.join(f), hardlink, progress
468 )
468 )
469 num += n
469 num += n
470 if hardlink:
470 if hardlink:
471 ui.debug(b"linked %d files\n" % num)
471 ui.debug(b"linked %d files\n" % num)
472 else:
472 else:
473 ui.debug(b"copied %d files\n" % num)
473 ui.debug(b"copied %d files\n" % num)
474 return destlock
474 return destlock
475 except: # re-raises
475 except: # re-raises
476 release(destlock)
476 release(destlock)
477 raise
477 raise
478
478
479
479
480 def clonewithshare(
480 def clonewithshare(
481 ui,
481 ui,
482 peeropts,
482 peeropts,
483 sharepath,
483 sharepath,
484 source,
484 source,
485 srcpeer,
485 srcpeer,
486 dest,
486 dest,
487 pull=False,
487 pull=False,
488 rev=None,
488 rev=None,
489 update=True,
489 update=True,
490 stream=False,
490 stream=False,
491 ):
491 ):
492 """Perform a clone using a shared repo.
492 """Perform a clone using a shared repo.
493
493
494 The store for the repository will be located at <sharepath>/.hg. The
494 The store for the repository will be located at <sharepath>/.hg. The
495 specified revisions will be cloned or pulled from "source". A shared repo
495 specified revisions will be cloned or pulled from "source". A shared repo
496 will be created at "dest" and a working copy will be created if "update" is
496 will be created at "dest" and a working copy will be created if "update" is
497 True.
497 True.
498 """
498 """
499 revs = None
499 revs = None
500 if rev:
500 if rev:
501 if not srcpeer.capable(b'lookup'):
501 if not srcpeer.capable(b'lookup'):
502 raise error.Abort(
502 raise error.Abort(
503 _(
503 _(
504 b"src repository does not support "
504 b"src repository does not support "
505 b"revision lookup and so doesn't "
505 b"revision lookup and so doesn't "
506 b"support clone by revision"
506 b"support clone by revision"
507 )
507 )
508 )
508 )
509
509
510 # TODO this is batchable.
510 # TODO this is batchable.
511 remoterevs = []
511 remoterevs = []
512 for r in rev:
512 for r in rev:
513 with srcpeer.commandexecutor() as e:
513 with srcpeer.commandexecutor() as e:
514 remoterevs.append(
514 remoterevs.append(
515 e.callcommand(
515 e.callcommand(
516 b'lookup',
516 b'lookup',
517 {
517 {
518 b'key': r,
518 b'key': r,
519 },
519 },
520 ).result()
520 ).result()
521 )
521 )
522 revs = remoterevs
522 revs = remoterevs
523
523
524 # Obtain a lock before checking for or cloning the pooled repo otherwise
524 # Obtain a lock before checking for or cloning the pooled repo otherwise
525 # 2 clients may race creating or populating it.
525 # 2 clients may race creating or populating it.
526 pooldir = os.path.dirname(sharepath)
526 pooldir = os.path.dirname(sharepath)
527 # lock class requires the directory to exist.
527 # lock class requires the directory to exist.
528 try:
528 try:
529 util.makedir(pooldir, False)
529 util.makedir(pooldir, False)
530 except FileExistsError:
530 except FileExistsError:
531 pass
531 pass
532
532
533 poolvfs = vfsmod.vfs(pooldir)
533 poolvfs = vfsmod.vfs(pooldir)
534 basename = os.path.basename(sharepath)
534 basename = os.path.basename(sharepath)
535
535
536 with lock.lock(poolvfs, b'%s.lock' % basename):
536 with lock.lock(poolvfs, b'%s.lock' % basename):
537 if os.path.exists(sharepath):
537 if os.path.exists(sharepath):
538 ui.status(
538 ui.status(
539 _(b'(sharing from existing pooled repository %s)\n') % basename
539 _(b'(sharing from existing pooled repository %s)\n') % basename
540 )
540 )
541 else:
541 else:
542 ui.status(
542 ui.status(
543 _(b'(sharing from new pooled repository %s)\n') % basename
543 _(b'(sharing from new pooled repository %s)\n') % basename
544 )
544 )
545 # Always use pull mode because hardlinks in share mode don't work
545 # Always use pull mode because hardlinks in share mode don't work
546 # well. Never update because working copies aren't necessary in
546 # well. Never update because working copies aren't necessary in
547 # share mode.
547 # share mode.
548 clone(
548 clone(
549 ui,
549 ui,
550 peeropts,
550 peeropts,
551 source,
551 source,
552 dest=sharepath,
552 dest=sharepath,
553 pull=True,
553 pull=True,
554 revs=rev,
554 revs=rev,
555 update=False,
555 update=False,
556 stream=stream,
556 stream=stream,
557 )
557 )
558
558
559 # Resolve the value to put in [paths] section for the source.
559 # Resolve the value to put in [paths] section for the source.
560 if islocal(source):
560 if islocal(source):
561 defaultpath = util.abspath(urlutil.urllocalpath(source))
561 defaultpath = util.abspath(urlutil.urllocalpath(source))
562 else:
562 else:
563 defaultpath = source
563 defaultpath = source
564
564
565 sharerepo = repository(ui, path=sharepath)
565 sharerepo = repository(ui, path=sharepath)
566 destrepo = share(
566 destrepo = share(
567 ui,
567 ui,
568 sharerepo,
568 sharerepo,
569 dest=dest,
569 dest=dest,
570 update=False,
570 update=False,
571 bookmarks=False,
571 bookmarks=False,
572 defaultpath=defaultpath,
572 defaultpath=defaultpath,
573 )
573 )
574
574
575 # We need to perform a pull against the dest repo to fetch bookmarks
575 # We need to perform a pull against the dest repo to fetch bookmarks
576 # and other non-store data that isn't shared by default. In the case of
576 # and other non-store data that isn't shared by default. In the case of
577 # non-existing shared repo, this means we pull from the remote twice. This
577 # non-existing shared repo, this means we pull from the remote twice. This
578 # is a bit weird. But at the time it was implemented, there wasn't an easy
578 # is a bit weird. But at the time it was implemented, there wasn't an easy
579 # way to pull just non-changegroup data.
579 # way to pull just non-changegroup data.
580 exchange.pull(destrepo, srcpeer, heads=revs)
580 exchange.pull(destrepo, srcpeer, heads=revs)
581
581
582 _postshareupdate(destrepo, update)
582 _postshareupdate(destrepo, update)
583
583
584 return srcpeer, peer(ui, peeropts, dest)
584 return srcpeer, peer(ui, peeropts, dest)
585
585
586
586
587 # Recomputing caches is often slow on big repos, so copy them.
587 # Recomputing caches is often slow on big repos, so copy them.
588 def _copycache(srcrepo, dstcachedir, fname):
588 def _copycache(srcrepo, dstcachedir, fname):
589 """copy a cache from srcrepo to destcachedir (if it exists)"""
589 """copy a cache from srcrepo to destcachedir (if it exists)"""
590 srcfname = srcrepo.cachevfs.join(fname)
590 srcfname = srcrepo.cachevfs.join(fname)
591 dstfname = os.path.join(dstcachedir, fname)
591 dstfname = os.path.join(dstcachedir, fname)
592 if os.path.exists(srcfname):
592 if os.path.exists(srcfname):
593 if not os.path.exists(dstcachedir):
593 if not os.path.exists(dstcachedir):
594 os.mkdir(dstcachedir)
594 os.mkdir(dstcachedir)
595 util.copyfile(srcfname, dstfname)
595 util.copyfile(srcfname, dstfname)
596
596
597
597
598 def clone(
598 def clone(
599 ui,
599 ui,
600 peeropts,
600 peeropts,
601 source,
601 source,
602 dest=None,
602 dest=None,
603 pull=False,
603 pull=False,
604 revs=None,
604 revs=None,
605 update=True,
605 update=True,
606 stream=False,
606 stream=False,
607 branch=None,
607 branch=None,
608 shareopts=None,
608 shareopts=None,
609 storeincludepats=None,
609 storeincludepats=None,
610 storeexcludepats=None,
610 storeexcludepats=None,
611 depth=None,
611 depth=None,
612 ):
612 ):
613 """Make a copy of an existing repository.
613 """Make a copy of an existing repository.
614
614
615 Create a copy of an existing repository in a new directory. The
615 Create a copy of an existing repository in a new directory. The
616 source and destination are URLs, as passed to the repository
616 source and destination are URLs, as passed to the repository
617 function. Returns a pair of repository peers, the source and
617 function. Returns a pair of repository peers, the source and
618 newly created destination.
618 newly created destination.
619
619
620 The location of the source is added to the new repository's
620 The location of the source is added to the new repository's
621 .hg/hgrc file, as the default to be used for future pulls and
621 .hg/hgrc file, as the default to be used for future pulls and
622 pushes.
622 pushes.
623
623
624 If an exception is raised, the partly cloned/updated destination
624 If an exception is raised, the partly cloned/updated destination
625 repository will be deleted.
625 repository will be deleted.
626
626
627 Arguments:
627 Arguments:
628
628
629 source: repository object or URL
629 source: repository object or URL
630
630
631 dest: URL of destination repository to create (defaults to base
631 dest: URL of destination repository to create (defaults to base
632 name of source repository)
632 name of source repository)
633
633
634 pull: always pull from source repository, even in local case or if the
634 pull: always pull from source repository, even in local case or if the
635 server prefers streaming
635 server prefers streaming
636
636
637 stream: stream raw data uncompressed from repository (fast over
637 stream: stream raw data uncompressed from repository (fast over
638 LAN, slow over WAN)
638 LAN, slow over WAN)
639
639
640 revs: revision to clone up to (implies pull=True)
640 revs: revision to clone up to (implies pull=True)
641
641
642 update: update working directory after clone completes, if
642 update: update working directory after clone completes, if
643 destination is local repository (True means update to default rev,
643 destination is local repository (True means update to default rev,
644 anything else is treated as a revision)
644 anything else is treated as a revision)
645
645
646 branch: branches to clone
646 branch: branches to clone
647
647
648 shareopts: dict of options to control auto sharing behavior. The "pool" key
648 shareopts: dict of options to control auto sharing behavior. The "pool" key
649 activates auto sharing mode and defines the directory for stores. The
649 activates auto sharing mode and defines the directory for stores. The
650 "mode" key determines how to construct the directory name of the shared
650 "mode" key determines how to construct the directory name of the shared
651 repository. "identity" means the name is derived from the node of the first
651 repository. "identity" means the name is derived from the node of the first
652 changeset in the repository. "remote" means the name is derived from the
652 changeset in the repository. "remote" means the name is derived from the
653 remote's path/URL. Defaults to "identity."
653 remote's path/URL. Defaults to "identity."
654
654
655 storeincludepats and storeexcludepats: sets of file patterns to include and
655 storeincludepats and storeexcludepats: sets of file patterns to include and
656 exclude in the repository copy, respectively. If not defined, all files
656 exclude in the repository copy, respectively. If not defined, all files
657 will be included (a "full" clone). Otherwise a "narrow" clone containing
657 will be included (a "full" clone). Otherwise a "narrow" clone containing
658 only the requested files will be performed. If ``storeincludepats`` is not
658 only the requested files will be performed. If ``storeincludepats`` is not
659 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
659 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
660 ``path:.``. If both are empty sets, no files will be cloned.
660 ``path:.``. If both are empty sets, no files will be cloned.
661 """
661 """
662
662
663 if isinstance(source, bytes):
663 if isinstance(source, bytes):
664 src = urlutil.get_clone_path(ui, source, branch)
664 src = urlutil.get_clone_path(ui, source, branch)
665 origsource, source, branches = src
665 origsource, source, branches = src
666 srcpeer = peer(ui, peeropts, source)
666 srcpeer = peer(ui, peeropts, source)
667 else:
667 else:
668 srcpeer = source.peer() # in case we were called with a localrepo
668 srcpeer = source.peer() # in case we were called with a localrepo
669 branches = (None, branch or [])
669 branches = (None, branch or [])
670 origsource = source = srcpeer.url()
670 origsource = source = srcpeer.url()
671 srclock = destlock = destwlock = cleandir = None
671 srclock = destlock = destwlock = cleandir = None
672 destpeer = None
672 destpeer = None
673 try:
673 try:
674 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
674 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
675
675
676 if dest is None:
676 if dest is None:
677 dest = defaultdest(source)
677 dest = defaultdest(source)
678 if dest:
678 if dest:
679 ui.status(_(b"destination directory: %s\n") % dest)
679 ui.status(_(b"destination directory: %s\n") % dest)
680 else:
680 else:
681 dest = urlutil.get_clone_path(ui, dest)[0]
681 dest = urlutil.get_clone_path(ui, dest)[0]
682
682
683 dest = urlutil.urllocalpath(dest)
683 dest = urlutil.urllocalpath(dest)
684 source = urlutil.urllocalpath(source)
684 source = urlutil.urllocalpath(source)
685
685
686 if not dest:
686 if not dest:
687 raise error.InputError(_(b"empty destination path is not valid"))
687 raise error.InputError(_(b"empty destination path is not valid"))
688
688
689 destvfs = vfsmod.vfs(dest, expandpath=True)
689 destvfs = vfsmod.vfs(dest, expandpath=True)
690 if destvfs.lexists():
690 if destvfs.lexists():
691 if not destvfs.isdir():
691 if not destvfs.isdir():
692 raise error.InputError(
692 raise error.InputError(
693 _(b"destination '%s' already exists") % dest
693 _(b"destination '%s' already exists") % dest
694 )
694 )
695 elif destvfs.listdir():
695 elif destvfs.listdir():
696 raise error.InputError(
696 raise error.InputError(
697 _(b"destination '%s' is not empty") % dest
697 _(b"destination '%s' is not empty") % dest
698 )
698 )
699
699
700 createopts = {}
700 createopts = {}
701 narrow = False
701 narrow = False
702
702
703 if storeincludepats is not None:
703 if storeincludepats is not None:
704 narrowspec.validatepatterns(storeincludepats)
704 narrowspec.validatepatterns(storeincludepats)
705 narrow = True
705 narrow = True
706
706
707 if storeexcludepats is not None:
707 if storeexcludepats is not None:
708 narrowspec.validatepatterns(storeexcludepats)
708 narrowspec.validatepatterns(storeexcludepats)
709 narrow = True
709 narrow = True
710
710
711 if narrow:
711 if narrow:
712 # Include everything by default if only exclusion patterns defined.
712 # Include everything by default if only exclusion patterns defined.
713 if storeexcludepats and not storeincludepats:
713 if storeexcludepats and not storeincludepats:
714 storeincludepats = {b'path:.'}
714 storeincludepats = {b'path:.'}
715
715
716 createopts[b'narrowfiles'] = True
716 createopts[b'narrowfiles'] = True
717
717
718 if depth:
718 if depth:
719 createopts[b'shallowfilestore'] = True
719 createopts[b'shallowfilestore'] = True
720
720
721 if srcpeer.capable(b'lfs-serve'):
721 if srcpeer.capable(b'lfs-serve'):
722 # Repository creation honors the config if it disabled the extension, so
722 # Repository creation honors the config if it disabled the extension, so
723 # we can't just announce that lfs will be enabled. This check avoids
723 # we can't just announce that lfs will be enabled. This check avoids
724 # saying that lfs will be enabled, and then saying it's an unknown
724 # saying that lfs will be enabled, and then saying it's an unknown
725 # feature. The lfs creation option is set in either case so that a
725 # feature. The lfs creation option is set in either case so that a
726 # requirement is added. If the extension is explicitly disabled but the
726 # requirement is added. If the extension is explicitly disabled but the
727 # requirement is set, the clone aborts early, before transferring any
727 # requirement is set, the clone aborts early, before transferring any
728 # data.
728 # data.
729 createopts[b'lfs'] = True
729 createopts[b'lfs'] = True
730
730
731 if extensions.disabled_help(b'lfs'):
731 if b'lfs' in extensions.disabled():
732 ui.status(
732 ui.status(
733 _(
733 _(
734 b'(remote is using large file support (lfs), but it is '
734 b'(remote is using large file support (lfs), but it is '
735 b'explicitly disabled in the local configuration)\n'
735 b'explicitly disabled in the local configuration)\n'
736 )
736 )
737 )
737 )
738 else:
738 else:
739 ui.status(
739 ui.status(
740 _(
740 _(
741 b'(remote is using large file support (lfs); lfs will '
741 b'(remote is using large file support (lfs); lfs will '
742 b'be enabled for this repository)\n'
742 b'be enabled for this repository)\n'
743 )
743 )
744 )
744 )
745
745
746 shareopts = shareopts or {}
746 shareopts = shareopts or {}
747 sharepool = shareopts.get(b'pool')
747 sharepool = shareopts.get(b'pool')
748 sharenamemode = shareopts.get(b'mode')
748 sharenamemode = shareopts.get(b'mode')
749 if sharepool and islocal(dest):
749 if sharepool and islocal(dest):
750 sharepath = None
750 sharepath = None
751 if sharenamemode == b'identity':
751 if sharenamemode == b'identity':
752 # Resolve the name from the initial changeset in the remote
752 # Resolve the name from the initial changeset in the remote
753 # repository. This returns nullid when the remote is empty. It
753 # repository. This returns nullid when the remote is empty. It
754 # raises RepoLookupError if revision 0 is filtered or otherwise
754 # raises RepoLookupError if revision 0 is filtered or otherwise
755 # not available. If we fail to resolve, sharing is not enabled.
755 # not available. If we fail to resolve, sharing is not enabled.
756 try:
756 try:
757 with srcpeer.commandexecutor() as e:
757 with srcpeer.commandexecutor() as e:
758 rootnode = e.callcommand(
758 rootnode = e.callcommand(
759 b'lookup',
759 b'lookup',
760 {
760 {
761 b'key': b'0',
761 b'key': b'0',
762 },
762 },
763 ).result()
763 ).result()
764
764
765 if rootnode != sha1nodeconstants.nullid:
765 if rootnode != sha1nodeconstants.nullid:
766 sharepath = os.path.join(sharepool, hex(rootnode))
766 sharepath = os.path.join(sharepool, hex(rootnode))
767 else:
767 else:
768 ui.status(
768 ui.status(
769 _(
769 _(
770 b'(not using pooled storage: '
770 b'(not using pooled storage: '
771 b'remote appears to be empty)\n'
771 b'remote appears to be empty)\n'
772 )
772 )
773 )
773 )
774 except error.RepoLookupError:
774 except error.RepoLookupError:
775 ui.status(
775 ui.status(
776 _(
776 _(
777 b'(not using pooled storage: '
777 b'(not using pooled storage: '
778 b'unable to resolve identity of remote)\n'
778 b'unable to resolve identity of remote)\n'
779 )
779 )
780 )
780 )
781 elif sharenamemode == b'remote':
781 elif sharenamemode == b'remote':
782 sharepath = os.path.join(
782 sharepath = os.path.join(
783 sharepool, hex(hashutil.sha1(source).digest())
783 sharepool, hex(hashutil.sha1(source).digest())
784 )
784 )
785 else:
785 else:
786 raise error.Abort(
786 raise error.Abort(
787 _(b'unknown share naming mode: %s') % sharenamemode
787 _(b'unknown share naming mode: %s') % sharenamemode
788 )
788 )
789
789
790 # TODO this is a somewhat arbitrary restriction.
790 # TODO this is a somewhat arbitrary restriction.
791 if narrow:
791 if narrow:
792 ui.status(
792 ui.status(
793 _(b'(pooled storage not supported for narrow clones)\n')
793 _(b'(pooled storage not supported for narrow clones)\n')
794 )
794 )
795 sharepath = None
795 sharepath = None
796
796
797 if sharepath:
797 if sharepath:
798 return clonewithshare(
798 return clonewithshare(
799 ui,
799 ui,
800 peeropts,
800 peeropts,
801 sharepath,
801 sharepath,
802 source,
802 source,
803 srcpeer,
803 srcpeer,
804 dest,
804 dest,
805 pull=pull,
805 pull=pull,
806 rev=revs,
806 rev=revs,
807 update=update,
807 update=update,
808 stream=stream,
808 stream=stream,
809 )
809 )
810
810
811 srcrepo = srcpeer.local()
811 srcrepo = srcpeer.local()
812
812
813 abspath = origsource
813 abspath = origsource
814 if islocal(origsource):
814 if islocal(origsource):
815 abspath = util.abspath(urlutil.urllocalpath(origsource))
815 abspath = util.abspath(urlutil.urllocalpath(origsource))
816
816
817 if islocal(dest):
817 if islocal(dest):
818 if os.path.exists(dest):
818 if os.path.exists(dest):
819 # only clean up directories we create ourselves
819 # only clean up directories we create ourselves
820 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
820 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
821 cleandir = hgdir
821 cleandir = hgdir
822 else:
822 else:
823 cleandir = dest
823 cleandir = dest
824
824
825 copy = False
825 copy = False
826 if (
826 if (
827 srcrepo
827 srcrepo
828 and srcrepo.cancopy()
828 and srcrepo.cancopy()
829 and islocal(dest)
829 and islocal(dest)
830 and not phases.hassecret(srcrepo)
830 and not phases.hassecret(srcrepo)
831 ):
831 ):
832 copy = not pull and not revs
832 copy = not pull and not revs
833
833
834 # TODO this is a somewhat arbitrary restriction.
834 # TODO this is a somewhat arbitrary restriction.
835 if narrow:
835 if narrow:
836 copy = False
836 copy = False
837
837
838 if copy:
838 if copy:
839 try:
839 try:
840 # we use a lock here because if we race with commit, we
840 # we use a lock here because if we race with commit, we
841 # can end up with extra data in the cloned revlogs that's
841 # can end up with extra data in the cloned revlogs that's
842 # not pointed to by changesets, thus causing verify to
842 # not pointed to by changesets, thus causing verify to
843 # fail
843 # fail
844 srclock = srcrepo.lock(wait=False)
844 srclock = srcrepo.lock(wait=False)
845 except error.LockError:
845 except error.LockError:
846 copy = False
846 copy = False
847
847
848 if copy:
848 if copy:
849 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
849 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
850
850
851 destrootpath = urlutil.urllocalpath(dest)
851 destrootpath = urlutil.urllocalpath(dest)
852 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
852 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
853 localrepo.createrepository(
853 localrepo.createrepository(
854 ui,
854 ui,
855 destrootpath,
855 destrootpath,
856 requirements=dest_reqs,
856 requirements=dest_reqs,
857 )
857 )
858 destrepo = localrepo.makelocalrepository(ui, destrootpath)
858 destrepo = localrepo.makelocalrepository(ui, destrootpath)
859
859
860 destwlock = destrepo.wlock()
860 destwlock = destrepo.wlock()
861 destlock = destrepo.lock()
861 destlock = destrepo.lock()
862 from . import streamclone # avoid cycle
862 from . import streamclone # avoid cycle
863
863
864 streamclone.local_copy(srcrepo, destrepo)
864 streamclone.local_copy(srcrepo, destrepo)
865
865
866 # we need to re-init the repo after manually copying the data
866 # we need to re-init the repo after manually copying the data
867 # into it
867 # into it
868 destpeer = peer(srcrepo, peeropts, dest)
868 destpeer = peer(srcrepo, peeropts, dest)
869
869
870 # make the peer aware that is it already locked
870 # make the peer aware that is it already locked
871 #
871 #
872 # important:
872 # important:
873 #
873 #
874 # We still need to release that lock at the end of the function
874 # We still need to release that lock at the end of the function
875 destpeer.local()._lockref = weakref.ref(destlock)
875 destpeer.local()._lockref = weakref.ref(destlock)
876 destpeer.local()._wlockref = weakref.ref(destwlock)
876 destpeer.local()._wlockref = weakref.ref(destwlock)
877 # dirstate also needs to be copied because `_wlockref` has a reference
877 # dirstate also needs to be copied because `_wlockref` has a reference
878 # to it: this dirstate is saved to disk when the wlock is released
878 # to it: this dirstate is saved to disk when the wlock is released
879 destpeer.local().dirstate = destrepo.dirstate
879 destpeer.local().dirstate = destrepo.dirstate
880
880
881 srcrepo.hook(
881 srcrepo.hook(
882 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
882 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
883 )
883 )
884 else:
884 else:
885 try:
885 try:
886 # only pass ui when no srcrepo
886 # only pass ui when no srcrepo
887 destpeer = peer(
887 destpeer = peer(
888 srcrepo or ui,
888 srcrepo or ui,
889 peeropts,
889 peeropts,
890 dest,
890 dest,
891 create=True,
891 create=True,
892 createopts=createopts,
892 createopts=createopts,
893 )
893 )
894 except FileExistsError:
894 except FileExistsError:
895 cleandir = None
895 cleandir = None
896 raise error.Abort(_(b"destination '%s' already exists") % dest)
896 raise error.Abort(_(b"destination '%s' already exists") % dest)
897
897
898 if revs:
898 if revs:
899 if not srcpeer.capable(b'lookup'):
899 if not srcpeer.capable(b'lookup'):
900 raise error.Abort(
900 raise error.Abort(
901 _(
901 _(
902 b"src repository does not support "
902 b"src repository does not support "
903 b"revision lookup and so doesn't "
903 b"revision lookup and so doesn't "
904 b"support clone by revision"
904 b"support clone by revision"
905 )
905 )
906 )
906 )
907
907
908 # TODO this is batchable.
908 # TODO this is batchable.
909 remoterevs = []
909 remoterevs = []
910 for rev in revs:
910 for rev in revs:
911 with srcpeer.commandexecutor() as e:
911 with srcpeer.commandexecutor() as e:
912 remoterevs.append(
912 remoterevs.append(
913 e.callcommand(
913 e.callcommand(
914 b'lookup',
914 b'lookup',
915 {
915 {
916 b'key': rev,
916 b'key': rev,
917 },
917 },
918 ).result()
918 ).result()
919 )
919 )
920 revs = remoterevs
920 revs = remoterevs
921
921
922 checkout = revs[0]
922 checkout = revs[0]
923 else:
923 else:
924 revs = None
924 revs = None
925 local = destpeer.local()
925 local = destpeer.local()
926 if local:
926 if local:
927 if narrow:
927 if narrow:
928 with local.wlock(), local.lock():
928 with local.wlock(), local.lock():
929 local.setnarrowpats(storeincludepats, storeexcludepats)
929 local.setnarrowpats(storeincludepats, storeexcludepats)
930 narrowspec.copytoworkingcopy(local)
930 narrowspec.copytoworkingcopy(local)
931
931
932 u = urlutil.url(abspath)
932 u = urlutil.url(abspath)
933 defaulturl = bytes(u)
933 defaulturl = bytes(u)
934 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
934 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
935 if not stream:
935 if not stream:
936 if pull:
936 if pull:
937 stream = False
937 stream = False
938 else:
938 else:
939 stream = None
939 stream = None
940 # internal config: ui.quietbookmarkmove
940 # internal config: ui.quietbookmarkmove
941 overrides = {(b'ui', b'quietbookmarkmove'): True}
941 overrides = {(b'ui', b'quietbookmarkmove'): True}
942 with local.ui.configoverride(overrides, b'clone'):
942 with local.ui.configoverride(overrides, b'clone'):
943 exchange.pull(
943 exchange.pull(
944 local,
944 local,
945 srcpeer,
945 srcpeer,
946 heads=revs,
946 heads=revs,
947 streamclonerequested=stream,
947 streamclonerequested=stream,
948 includepats=storeincludepats,
948 includepats=storeincludepats,
949 excludepats=storeexcludepats,
949 excludepats=storeexcludepats,
950 depth=depth,
950 depth=depth,
951 )
951 )
952 elif srcrepo:
952 elif srcrepo:
953 # TODO lift restriction once exchange.push() accepts narrow
953 # TODO lift restriction once exchange.push() accepts narrow
954 # push.
954 # push.
955 if narrow:
955 if narrow:
956 raise error.Abort(
956 raise error.Abort(
957 _(
957 _(
958 b'narrow clone not available for '
958 b'narrow clone not available for '
959 b'remote destinations'
959 b'remote destinations'
960 )
960 )
961 )
961 )
962
962
963 exchange.push(
963 exchange.push(
964 srcrepo,
964 srcrepo,
965 destpeer,
965 destpeer,
966 revs=revs,
966 revs=revs,
967 bookmarks=srcrepo._bookmarks.keys(),
967 bookmarks=srcrepo._bookmarks.keys(),
968 )
968 )
969 else:
969 else:
970 raise error.Abort(
970 raise error.Abort(
971 _(b"clone from remote to remote not supported")
971 _(b"clone from remote to remote not supported")
972 )
972 )
973
973
974 cleandir = None
974 cleandir = None
975
975
976 destrepo = destpeer.local()
976 destrepo = destpeer.local()
977 if destrepo:
977 if destrepo:
978 template = uimod.samplehgrcs[b'cloned']
978 template = uimod.samplehgrcs[b'cloned']
979 u = urlutil.url(abspath)
979 u = urlutil.url(abspath)
980 u.passwd = None
980 u.passwd = None
981 defaulturl = bytes(u)
981 defaulturl = bytes(u)
982 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
982 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
983 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
983 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
984
984
985 if ui.configbool(b'experimental', b'remotenames'):
985 if ui.configbool(b'experimental', b'remotenames'):
986 logexchange.pullremotenames(destrepo, srcpeer)
986 logexchange.pullremotenames(destrepo, srcpeer)
987
987
988 if update:
988 if update:
989 if update is not True:
989 if update is not True:
990 with srcpeer.commandexecutor() as e:
990 with srcpeer.commandexecutor() as e:
991 checkout = e.callcommand(
991 checkout = e.callcommand(
992 b'lookup',
992 b'lookup',
993 {
993 {
994 b'key': update,
994 b'key': update,
995 },
995 },
996 ).result()
996 ).result()
997
997
998 uprev = None
998 uprev = None
999 status = None
999 status = None
1000 if checkout is not None:
1000 if checkout is not None:
1001 # Some extensions (at least hg-git and hg-subversion) have
1001 # Some extensions (at least hg-git and hg-subversion) have
1002 # a peer.lookup() implementation that returns a name instead
1002 # a peer.lookup() implementation that returns a name instead
1003 # of a nodeid. We work around it here until we've figured
1003 # of a nodeid. We work around it here until we've figured
1004 # out a better solution.
1004 # out a better solution.
1005 if len(checkout) == 20 and checkout in destrepo:
1005 if len(checkout) == 20 and checkout in destrepo:
1006 uprev = checkout
1006 uprev = checkout
1007 elif scmutil.isrevsymbol(destrepo, checkout):
1007 elif scmutil.isrevsymbol(destrepo, checkout):
1008 uprev = scmutil.revsymbol(destrepo, checkout).node()
1008 uprev = scmutil.revsymbol(destrepo, checkout).node()
1009 else:
1009 else:
1010 if update is not True:
1010 if update is not True:
1011 try:
1011 try:
1012 uprev = destrepo.lookup(update)
1012 uprev = destrepo.lookup(update)
1013 except error.RepoLookupError:
1013 except error.RepoLookupError:
1014 pass
1014 pass
1015 if uprev is None:
1015 if uprev is None:
1016 try:
1016 try:
1017 if destrepo._activebookmark:
1017 if destrepo._activebookmark:
1018 uprev = destrepo.lookup(destrepo._activebookmark)
1018 uprev = destrepo.lookup(destrepo._activebookmark)
1019 update = destrepo._activebookmark
1019 update = destrepo._activebookmark
1020 else:
1020 else:
1021 uprev = destrepo._bookmarks[b'@']
1021 uprev = destrepo._bookmarks[b'@']
1022 update = b'@'
1022 update = b'@'
1023 bn = destrepo[uprev].branch()
1023 bn = destrepo[uprev].branch()
1024 if bn == b'default':
1024 if bn == b'default':
1025 status = _(b"updating to bookmark %s\n" % update)
1025 status = _(b"updating to bookmark %s\n" % update)
1026 else:
1026 else:
1027 status = (
1027 status = (
1028 _(b"updating to bookmark %s on branch %s\n")
1028 _(b"updating to bookmark %s on branch %s\n")
1029 ) % (update, bn)
1029 ) % (update, bn)
1030 except KeyError:
1030 except KeyError:
1031 try:
1031 try:
1032 uprev = destrepo.branchtip(b'default')
1032 uprev = destrepo.branchtip(b'default')
1033 except error.RepoLookupError:
1033 except error.RepoLookupError:
1034 uprev = destrepo.lookup(b'tip')
1034 uprev = destrepo.lookup(b'tip')
1035 if not status:
1035 if not status:
1036 bn = destrepo[uprev].branch()
1036 bn = destrepo[uprev].branch()
1037 status = _(b"updating to branch %s\n") % bn
1037 status = _(b"updating to branch %s\n") % bn
1038 destrepo.ui.status(status)
1038 destrepo.ui.status(status)
1039 _update(destrepo, uprev)
1039 _update(destrepo, uprev)
1040 if update in destrepo._bookmarks:
1040 if update in destrepo._bookmarks:
1041 bookmarks.activate(destrepo, update)
1041 bookmarks.activate(destrepo, update)
1042 if destlock is not None:
1042 if destlock is not None:
1043 release(destlock)
1043 release(destlock)
1044 if destwlock is not None:
1044 if destwlock is not None:
1045 release(destlock)
1045 release(destlock)
1046 # here is a tiny windows were someone could end up writing the
1046 # here is a tiny windows were someone could end up writing the
1047 # repository before the cache are sure to be warm. This is "fine"
1047 # repository before the cache are sure to be warm. This is "fine"
1048 # as the only "bad" outcome would be some slowness. That potential
1048 # as the only "bad" outcome would be some slowness. That potential
1049 # slowness already affect reader.
1049 # slowness already affect reader.
1050 with destrepo.lock():
1050 with destrepo.lock():
1051 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1051 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1052 finally:
1052 finally:
1053 release(srclock, destlock, destwlock)
1053 release(srclock, destlock, destwlock)
1054 if cleandir is not None:
1054 if cleandir is not None:
1055 shutil.rmtree(cleandir, True)
1055 shutil.rmtree(cleandir, True)
1056 if srcpeer is not None:
1056 if srcpeer is not None:
1057 srcpeer.close()
1057 srcpeer.close()
1058 if destpeer and destpeer.local() is None:
1058 if destpeer and destpeer.local() is None:
1059 destpeer.close()
1059 destpeer.close()
1060 return srcpeer, destpeer
1060 return srcpeer, destpeer
1061
1061
1062
1062
1063 def _showstats(repo, stats, quietempty=False):
1063 def _showstats(repo, stats, quietempty=False):
1064 if quietempty and stats.isempty():
1064 if quietempty and stats.isempty():
1065 return
1065 return
1066 repo.ui.status(
1066 repo.ui.status(
1067 _(
1067 _(
1068 b"%d files updated, %d files merged, "
1068 b"%d files updated, %d files merged, "
1069 b"%d files removed, %d files unresolved\n"
1069 b"%d files removed, %d files unresolved\n"
1070 )
1070 )
1071 % (
1071 % (
1072 stats.updatedcount,
1072 stats.updatedcount,
1073 stats.mergedcount,
1073 stats.mergedcount,
1074 stats.removedcount,
1074 stats.removedcount,
1075 stats.unresolvedcount,
1075 stats.unresolvedcount,
1076 )
1076 )
1077 )
1077 )
1078
1078
1079
1079
1080 def updaterepo(repo, node, overwrite, updatecheck=None):
1080 def updaterepo(repo, node, overwrite, updatecheck=None):
1081 """Update the working directory to node.
1081 """Update the working directory to node.
1082
1082
1083 When overwrite is set, changes are clobbered, merged else
1083 When overwrite is set, changes are clobbered, merged else
1084
1084
1085 returns stats (see pydoc mercurial.merge.applyupdates)"""
1085 returns stats (see pydoc mercurial.merge.applyupdates)"""
1086 repo.ui.deprecwarn(
1086 repo.ui.deprecwarn(
1087 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1087 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1088 b'5.7',
1088 b'5.7',
1089 )
1089 )
1090 return mergemod._update(
1090 return mergemod._update(
1091 repo,
1091 repo,
1092 node,
1092 node,
1093 branchmerge=False,
1093 branchmerge=False,
1094 force=overwrite,
1094 force=overwrite,
1095 labels=[b'working copy', b'destination'],
1095 labels=[b'working copy', b'destination'],
1096 updatecheck=updatecheck,
1096 updatecheck=updatecheck,
1097 )
1097 )
1098
1098
1099
1099
1100 def update(repo, node, quietempty=False, updatecheck=None):
1100 def update(repo, node, quietempty=False, updatecheck=None):
1101 """update the working directory to node"""
1101 """update the working directory to node"""
1102 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1102 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1103 _showstats(repo, stats, quietempty)
1103 _showstats(repo, stats, quietempty)
1104 if stats.unresolvedcount:
1104 if stats.unresolvedcount:
1105 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1105 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1106 return stats.unresolvedcount > 0
1106 return stats.unresolvedcount > 0
1107
1107
1108
1108
1109 # naming conflict in clone()
1109 # naming conflict in clone()
1110 _update = update
1110 _update = update
1111
1111
1112
1112
1113 def clean(repo, node, show_stats=True, quietempty=False):
1113 def clean(repo, node, show_stats=True, quietempty=False):
1114 """forcibly switch the working directory to node, clobbering changes"""
1114 """forcibly switch the working directory to node, clobbering changes"""
1115 stats = mergemod.clean_update(repo[node])
1115 stats = mergemod.clean_update(repo[node])
1116 assert stats.unresolvedcount == 0
1116 assert stats.unresolvedcount == 0
1117 if show_stats:
1117 if show_stats:
1118 _showstats(repo, stats, quietempty)
1118 _showstats(repo, stats, quietempty)
1119 return False
1119 return False
1120
1120
1121
1121
1122 # naming conflict in updatetotally()
1122 # naming conflict in updatetotally()
1123 _clean = clean
1123 _clean = clean
1124
1124
1125 _VALID_UPDATECHECKS = {
1125 _VALID_UPDATECHECKS = {
1126 mergemod.UPDATECHECK_ABORT,
1126 mergemod.UPDATECHECK_ABORT,
1127 mergemod.UPDATECHECK_NONE,
1127 mergemod.UPDATECHECK_NONE,
1128 mergemod.UPDATECHECK_LINEAR,
1128 mergemod.UPDATECHECK_LINEAR,
1129 mergemod.UPDATECHECK_NO_CONFLICT,
1129 mergemod.UPDATECHECK_NO_CONFLICT,
1130 }
1130 }
1131
1131
1132
1132
1133 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1133 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1134 """Update the working directory with extra care for non-file components
1134 """Update the working directory with extra care for non-file components
1135
1135
1136 This takes care of non-file components below:
1136 This takes care of non-file components below:
1137
1137
1138 :bookmark: might be advanced or (in)activated
1138 :bookmark: might be advanced or (in)activated
1139
1139
1140 This takes arguments below:
1140 This takes arguments below:
1141
1141
1142 :checkout: to which revision the working directory is updated
1142 :checkout: to which revision the working directory is updated
1143 :brev: a name, which might be a bookmark to be activated after updating
1143 :brev: a name, which might be a bookmark to be activated after updating
1144 :clean: whether changes in the working directory can be discarded
1144 :clean: whether changes in the working directory can be discarded
1145 :updatecheck: how to deal with a dirty working directory
1145 :updatecheck: how to deal with a dirty working directory
1146
1146
1147 Valid values for updatecheck are the UPDATECHECK_* constants
1147 Valid values for updatecheck are the UPDATECHECK_* constants
1148 defined in the merge module. Passing `None` will result in using the
1148 defined in the merge module. Passing `None` will result in using the
1149 configured default.
1149 configured default.
1150
1150
1151 * ABORT: abort if the working directory is dirty
1151 * ABORT: abort if the working directory is dirty
1152 * NONE: don't check (merge working directory changes into destination)
1152 * NONE: don't check (merge working directory changes into destination)
1153 * LINEAR: check that update is linear before merging working directory
1153 * LINEAR: check that update is linear before merging working directory
1154 changes into destination
1154 changes into destination
1155 * NO_CONFLICT: check that the update does not result in file merges
1155 * NO_CONFLICT: check that the update does not result in file merges
1156
1156
1157 This returns whether conflict is detected at updating or not.
1157 This returns whether conflict is detected at updating or not.
1158 """
1158 """
1159 if updatecheck is None:
1159 if updatecheck is None:
1160 updatecheck = ui.config(b'commands', b'update.check')
1160 updatecheck = ui.config(b'commands', b'update.check')
1161 if updatecheck not in _VALID_UPDATECHECKS:
1161 if updatecheck not in _VALID_UPDATECHECKS:
1162 # If not configured, or invalid value configured
1162 # If not configured, or invalid value configured
1163 updatecheck = mergemod.UPDATECHECK_LINEAR
1163 updatecheck = mergemod.UPDATECHECK_LINEAR
1164 if updatecheck not in _VALID_UPDATECHECKS:
1164 if updatecheck not in _VALID_UPDATECHECKS:
1165 raise ValueError(
1165 raise ValueError(
1166 r'Invalid updatecheck value %r (can accept %r)'
1166 r'Invalid updatecheck value %r (can accept %r)'
1167 % (updatecheck, _VALID_UPDATECHECKS)
1167 % (updatecheck, _VALID_UPDATECHECKS)
1168 )
1168 )
1169 with repo.wlock():
1169 with repo.wlock():
1170 movemarkfrom = None
1170 movemarkfrom = None
1171 warndest = False
1171 warndest = False
1172 if checkout is None:
1172 if checkout is None:
1173 updata = destutil.destupdate(repo, clean=clean)
1173 updata = destutil.destupdate(repo, clean=clean)
1174 checkout, movemarkfrom, brev = updata
1174 checkout, movemarkfrom, brev = updata
1175 warndest = True
1175 warndest = True
1176
1176
1177 if clean:
1177 if clean:
1178 ret = _clean(repo, checkout)
1178 ret = _clean(repo, checkout)
1179 else:
1179 else:
1180 if updatecheck == mergemod.UPDATECHECK_ABORT:
1180 if updatecheck == mergemod.UPDATECHECK_ABORT:
1181 cmdutil.bailifchanged(repo, merge=False)
1181 cmdutil.bailifchanged(repo, merge=False)
1182 updatecheck = mergemod.UPDATECHECK_NONE
1182 updatecheck = mergemod.UPDATECHECK_NONE
1183 ret = _update(repo, checkout, updatecheck=updatecheck)
1183 ret = _update(repo, checkout, updatecheck=updatecheck)
1184
1184
1185 if not ret and movemarkfrom:
1185 if not ret and movemarkfrom:
1186 if movemarkfrom == repo[b'.'].node():
1186 if movemarkfrom == repo[b'.'].node():
1187 pass # no-op update
1187 pass # no-op update
1188 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1188 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1189 b = ui.label(repo._activebookmark, b'bookmarks.active')
1189 b = ui.label(repo._activebookmark, b'bookmarks.active')
1190 ui.status(_(b"updating bookmark %s\n") % b)
1190 ui.status(_(b"updating bookmark %s\n") % b)
1191 else:
1191 else:
1192 # this can happen with a non-linear update
1192 # this can happen with a non-linear update
1193 b = ui.label(repo._activebookmark, b'bookmarks')
1193 b = ui.label(repo._activebookmark, b'bookmarks')
1194 ui.status(_(b"(leaving bookmark %s)\n") % b)
1194 ui.status(_(b"(leaving bookmark %s)\n") % b)
1195 bookmarks.deactivate(repo)
1195 bookmarks.deactivate(repo)
1196 elif brev in repo._bookmarks:
1196 elif brev in repo._bookmarks:
1197 if brev != repo._activebookmark:
1197 if brev != repo._activebookmark:
1198 b = ui.label(brev, b'bookmarks.active')
1198 b = ui.label(brev, b'bookmarks.active')
1199 ui.status(_(b"(activating bookmark %s)\n") % b)
1199 ui.status(_(b"(activating bookmark %s)\n") % b)
1200 bookmarks.activate(repo, brev)
1200 bookmarks.activate(repo, brev)
1201 elif brev:
1201 elif brev:
1202 if repo._activebookmark:
1202 if repo._activebookmark:
1203 b = ui.label(repo._activebookmark, b'bookmarks')
1203 b = ui.label(repo._activebookmark, b'bookmarks')
1204 ui.status(_(b"(leaving bookmark %s)\n") % b)
1204 ui.status(_(b"(leaving bookmark %s)\n") % b)
1205 bookmarks.deactivate(repo)
1205 bookmarks.deactivate(repo)
1206
1206
1207 if warndest:
1207 if warndest:
1208 destutil.statusotherdests(ui, repo)
1208 destutil.statusotherdests(ui, repo)
1209
1209
1210 return ret
1210 return ret
1211
1211
1212
1212
1213 def merge(
1213 def merge(
1214 ctx,
1214 ctx,
1215 force=False,
1215 force=False,
1216 remind=True,
1216 remind=True,
1217 labels=None,
1217 labels=None,
1218 ):
1218 ):
1219 """Branch merge with node, resolving changes. Return true if any
1219 """Branch merge with node, resolving changes. Return true if any
1220 unresolved conflicts."""
1220 unresolved conflicts."""
1221 repo = ctx.repo()
1221 repo = ctx.repo()
1222 stats = mergemod.merge(ctx, force=force, labels=labels)
1222 stats = mergemod.merge(ctx, force=force, labels=labels)
1223 _showstats(repo, stats)
1223 _showstats(repo, stats)
1224 if stats.unresolvedcount:
1224 if stats.unresolvedcount:
1225 repo.ui.status(
1225 repo.ui.status(
1226 _(
1226 _(
1227 b"use 'hg resolve' to retry unresolved file merges "
1227 b"use 'hg resolve' to retry unresolved file merges "
1228 b"or 'hg merge --abort' to abandon\n"
1228 b"or 'hg merge --abort' to abandon\n"
1229 )
1229 )
1230 )
1230 )
1231 elif remind:
1231 elif remind:
1232 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1232 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1233 return stats.unresolvedcount > 0
1233 return stats.unresolvedcount > 0
1234
1234
1235
1235
1236 def abortmerge(ui, repo):
1236 def abortmerge(ui, repo):
1237 ms = mergestatemod.mergestate.read(repo)
1237 ms = mergestatemod.mergestate.read(repo)
1238 if ms.active():
1238 if ms.active():
1239 # there were conflicts
1239 # there were conflicts
1240 node = ms.localctx.hex()
1240 node = ms.localctx.hex()
1241 else:
1241 else:
1242 # there were no conficts, mergestate was not stored
1242 # there were no conficts, mergestate was not stored
1243 node = repo[b'.'].hex()
1243 node = repo[b'.'].hex()
1244
1244
1245 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1245 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1246 stats = mergemod.clean_update(repo[node])
1246 stats = mergemod.clean_update(repo[node])
1247 assert stats.unresolvedcount == 0
1247 assert stats.unresolvedcount == 0
1248 _showstats(repo, stats)
1248 _showstats(repo, stats)
1249
1249
1250
1250
1251 def _incoming(
1251 def _incoming(
1252 displaychlist,
1252 displaychlist,
1253 subreporecurse,
1253 subreporecurse,
1254 ui,
1254 ui,
1255 repo,
1255 repo,
1256 source,
1256 source,
1257 opts,
1257 opts,
1258 buffered=False,
1258 buffered=False,
1259 subpath=None,
1259 subpath=None,
1260 ):
1260 ):
1261 """
1261 """
1262 Helper for incoming / gincoming.
1262 Helper for incoming / gincoming.
1263 displaychlist gets called with
1263 displaychlist gets called with
1264 (remoterepo, incomingchangesetlist, displayer) parameters,
1264 (remoterepo, incomingchangesetlist, displayer) parameters,
1265 and is supposed to contain only code that can't be unified.
1265 and is supposed to contain only code that can't be unified.
1266 """
1266 """
1267 srcs = urlutil.get_pull_paths(repo, ui, [source])
1267 srcs = urlutil.get_pull_paths(repo, ui, [source])
1268 srcs = list(srcs)
1268 srcs = list(srcs)
1269 if len(srcs) != 1:
1269 if len(srcs) != 1:
1270 msg = _(b'for now, incoming supports only a single source, %d provided')
1270 msg = _(b'for now, incoming supports only a single source, %d provided')
1271 msg %= len(srcs)
1271 msg %= len(srcs)
1272 raise error.Abort(msg)
1272 raise error.Abort(msg)
1273 path = srcs[0]
1273 path = srcs[0]
1274 source, branches = urlutil.parseurl(path.rawloc, opts.get(b'branch'))
1274 source, branches = urlutil.parseurl(path.rawloc, opts.get(b'branch'))
1275 if subpath is not None:
1275 if subpath is not None:
1276 subpath = urlutil.url(subpath)
1276 subpath = urlutil.url(subpath)
1277 if subpath.isabs():
1277 if subpath.isabs():
1278 source = bytes(subpath)
1278 source = bytes(subpath)
1279 else:
1279 else:
1280 p = urlutil.url(source)
1280 p = urlutil.url(source)
1281 if p.islocal():
1281 if p.islocal():
1282 normpath = os.path.normpath
1282 normpath = os.path.normpath
1283 else:
1283 else:
1284 normpath = posixpath.normpath
1284 normpath = posixpath.normpath
1285 p.path = normpath(b'%s/%s' % (p.path, subpath))
1285 p.path = normpath(b'%s/%s' % (p.path, subpath))
1286 source = bytes(p)
1286 source = bytes(p)
1287 other = peer(repo, opts, source)
1287 other = peer(repo, opts, source)
1288 cleanupfn = other.close
1288 cleanupfn = other.close
1289 try:
1289 try:
1290 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source))
1290 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source))
1291 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1291 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1292
1292
1293 if revs:
1293 if revs:
1294 revs = [other.lookup(rev) for rev in revs]
1294 revs = [other.lookup(rev) for rev in revs]
1295 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1295 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1296 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1296 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1297 )
1297 )
1298
1298
1299 if not chlist:
1299 if not chlist:
1300 ui.status(_(b"no changes found\n"))
1300 ui.status(_(b"no changes found\n"))
1301 return subreporecurse()
1301 return subreporecurse()
1302 ui.pager(b'incoming')
1302 ui.pager(b'incoming')
1303 displayer = logcmdutil.changesetdisplayer(
1303 displayer = logcmdutil.changesetdisplayer(
1304 ui, other, opts, buffered=buffered
1304 ui, other, opts, buffered=buffered
1305 )
1305 )
1306 displaychlist(other, chlist, displayer)
1306 displaychlist(other, chlist, displayer)
1307 displayer.close()
1307 displayer.close()
1308 finally:
1308 finally:
1309 cleanupfn()
1309 cleanupfn()
1310 subreporecurse()
1310 subreporecurse()
1311 return 0 # exit code is zero since we found incoming changes
1311 return 0 # exit code is zero since we found incoming changes
1312
1312
1313
1313
1314 def incoming(ui, repo, source, opts, subpath=None):
1314 def incoming(ui, repo, source, opts, subpath=None):
1315 def subreporecurse():
1315 def subreporecurse():
1316 ret = 1
1316 ret = 1
1317 if opts.get(b'subrepos'):
1317 if opts.get(b'subrepos'):
1318 ctx = repo[None]
1318 ctx = repo[None]
1319 for subpath in sorted(ctx.substate):
1319 for subpath in sorted(ctx.substate):
1320 sub = ctx.sub(subpath)
1320 sub = ctx.sub(subpath)
1321 ret = min(ret, sub.incoming(ui, source, opts))
1321 ret = min(ret, sub.incoming(ui, source, opts))
1322 return ret
1322 return ret
1323
1323
1324 def display(other, chlist, displayer):
1324 def display(other, chlist, displayer):
1325 limit = logcmdutil.getlimit(opts)
1325 limit = logcmdutil.getlimit(opts)
1326 if opts.get(b'newest_first'):
1326 if opts.get(b'newest_first'):
1327 chlist.reverse()
1327 chlist.reverse()
1328 count = 0
1328 count = 0
1329 for n in chlist:
1329 for n in chlist:
1330 if limit is not None and count >= limit:
1330 if limit is not None and count >= limit:
1331 break
1331 break
1332 parents = [
1332 parents = [
1333 p for p in other.changelog.parents(n) if p != repo.nullid
1333 p for p in other.changelog.parents(n) if p != repo.nullid
1334 ]
1334 ]
1335 if opts.get(b'no_merges') and len(parents) == 2:
1335 if opts.get(b'no_merges') and len(parents) == 2:
1336 continue
1336 continue
1337 count += 1
1337 count += 1
1338 displayer.show(other[n])
1338 displayer.show(other[n])
1339
1339
1340 return _incoming(
1340 return _incoming(
1341 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1341 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1342 )
1342 )
1343
1343
1344
1344
1345 def _outgoing(ui, repo, dests, opts, subpath=None):
1345 def _outgoing(ui, repo, dests, opts, subpath=None):
1346 out = set()
1346 out = set()
1347 others = []
1347 others = []
1348 for path in urlutil.get_push_paths(repo, ui, dests):
1348 for path in urlutil.get_push_paths(repo, ui, dests):
1349 dest = path.pushloc or path.loc
1349 dest = path.pushloc or path.loc
1350 if subpath is not None:
1350 if subpath is not None:
1351 subpath = urlutil.url(subpath)
1351 subpath = urlutil.url(subpath)
1352 if subpath.isabs():
1352 if subpath.isabs():
1353 dest = bytes(subpath)
1353 dest = bytes(subpath)
1354 else:
1354 else:
1355 p = urlutil.url(dest)
1355 p = urlutil.url(dest)
1356 if p.islocal():
1356 if p.islocal():
1357 normpath = os.path.normpath
1357 normpath = os.path.normpath
1358 else:
1358 else:
1359 normpath = posixpath.normpath
1359 normpath = posixpath.normpath
1360 p.path = normpath(b'%s/%s' % (p.path, subpath))
1360 p.path = normpath(b'%s/%s' % (p.path, subpath))
1361 dest = bytes(p)
1361 dest = bytes(p)
1362 branches = path.branch, opts.get(b'branch') or []
1362 branches = path.branch, opts.get(b'branch') or []
1363
1363
1364 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1364 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1365 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1365 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1366 if revs:
1366 if revs:
1367 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1367 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1368
1368
1369 other = peer(repo, opts, dest)
1369 other = peer(repo, opts, dest)
1370 try:
1370 try:
1371 outgoing = discovery.findcommonoutgoing(
1371 outgoing = discovery.findcommonoutgoing(
1372 repo, other, revs, force=opts.get(b'force')
1372 repo, other, revs, force=opts.get(b'force')
1373 )
1373 )
1374 o = outgoing.missing
1374 o = outgoing.missing
1375 out.update(o)
1375 out.update(o)
1376 if not o:
1376 if not o:
1377 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1377 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1378 others.append(other)
1378 others.append(other)
1379 except: # re-raises
1379 except: # re-raises
1380 other.close()
1380 other.close()
1381 raise
1381 raise
1382 # make sure this is ordered by revision number
1382 # make sure this is ordered by revision number
1383 outgoing_revs = list(out)
1383 outgoing_revs = list(out)
1384 cl = repo.changelog
1384 cl = repo.changelog
1385 outgoing_revs.sort(key=cl.rev)
1385 outgoing_revs.sort(key=cl.rev)
1386 return outgoing_revs, others
1386 return outgoing_revs, others
1387
1387
1388
1388
1389 def _outgoing_recurse(ui, repo, dests, opts):
1389 def _outgoing_recurse(ui, repo, dests, opts):
1390 ret = 1
1390 ret = 1
1391 if opts.get(b'subrepos'):
1391 if opts.get(b'subrepos'):
1392 ctx = repo[None]
1392 ctx = repo[None]
1393 for subpath in sorted(ctx.substate):
1393 for subpath in sorted(ctx.substate):
1394 sub = ctx.sub(subpath)
1394 sub = ctx.sub(subpath)
1395 ret = min(ret, sub.outgoing(ui, dests, opts))
1395 ret = min(ret, sub.outgoing(ui, dests, opts))
1396 return ret
1396 return ret
1397
1397
1398
1398
1399 def _outgoing_filter(repo, revs, opts):
1399 def _outgoing_filter(repo, revs, opts):
1400 """apply revision filtering/ordering option for outgoing"""
1400 """apply revision filtering/ordering option for outgoing"""
1401 limit = logcmdutil.getlimit(opts)
1401 limit = logcmdutil.getlimit(opts)
1402 no_merges = opts.get(b'no_merges')
1402 no_merges = opts.get(b'no_merges')
1403 if opts.get(b'newest_first'):
1403 if opts.get(b'newest_first'):
1404 revs.reverse()
1404 revs.reverse()
1405 if limit is None and not no_merges:
1405 if limit is None and not no_merges:
1406 for r in revs:
1406 for r in revs:
1407 yield r
1407 yield r
1408 return
1408 return
1409
1409
1410 count = 0
1410 count = 0
1411 cl = repo.changelog
1411 cl = repo.changelog
1412 for n in revs:
1412 for n in revs:
1413 if limit is not None and count >= limit:
1413 if limit is not None and count >= limit:
1414 break
1414 break
1415 parents = [p for p in cl.parents(n) if p != repo.nullid]
1415 parents = [p for p in cl.parents(n) if p != repo.nullid]
1416 if no_merges and len(parents) == 2:
1416 if no_merges and len(parents) == 2:
1417 continue
1417 continue
1418 count += 1
1418 count += 1
1419 yield n
1419 yield n
1420
1420
1421
1421
1422 def outgoing(ui, repo, dests, opts, subpath=None):
1422 def outgoing(ui, repo, dests, opts, subpath=None):
1423 if opts.get(b'graph'):
1423 if opts.get(b'graph'):
1424 logcmdutil.checkunsupportedgraphflags([], opts)
1424 logcmdutil.checkunsupportedgraphflags([], opts)
1425 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1425 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1426 ret = 1
1426 ret = 1
1427 try:
1427 try:
1428 if o:
1428 if o:
1429 ret = 0
1429 ret = 0
1430
1430
1431 if opts.get(b'graph'):
1431 if opts.get(b'graph'):
1432 revdag = logcmdutil.graphrevs(repo, o, opts)
1432 revdag = logcmdutil.graphrevs(repo, o, opts)
1433 ui.pager(b'outgoing')
1433 ui.pager(b'outgoing')
1434 displayer = logcmdutil.changesetdisplayer(
1434 displayer = logcmdutil.changesetdisplayer(
1435 ui, repo, opts, buffered=True
1435 ui, repo, opts, buffered=True
1436 )
1436 )
1437 logcmdutil.displaygraph(
1437 logcmdutil.displaygraph(
1438 ui, repo, revdag, displayer, graphmod.asciiedges
1438 ui, repo, revdag, displayer, graphmod.asciiedges
1439 )
1439 )
1440 else:
1440 else:
1441 ui.pager(b'outgoing')
1441 ui.pager(b'outgoing')
1442 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1442 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1443 for n in _outgoing_filter(repo, o, opts):
1443 for n in _outgoing_filter(repo, o, opts):
1444 displayer.show(repo[n])
1444 displayer.show(repo[n])
1445 displayer.close()
1445 displayer.close()
1446 for oth in others:
1446 for oth in others:
1447 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1447 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1448 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1448 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1449 return ret # exit code is zero since we found outgoing changes
1449 return ret # exit code is zero since we found outgoing changes
1450 finally:
1450 finally:
1451 for oth in others:
1451 for oth in others:
1452 oth.close()
1452 oth.close()
1453
1453
1454
1454
1455 def verify(repo, level=None):
1455 def verify(repo, level=None):
1456 """verify the consistency of a repository"""
1456 """verify the consistency of a repository"""
1457 ret = verifymod.verify(repo, level=level)
1457 ret = verifymod.verify(repo, level=level)
1458
1458
1459 # Broken subrepo references in hidden csets don't seem worth worrying about,
1459 # Broken subrepo references in hidden csets don't seem worth worrying about,
1460 # since they can't be pushed/pulled, and --hidden can be used if they are a
1460 # since they can't be pushed/pulled, and --hidden can be used if they are a
1461 # concern.
1461 # concern.
1462
1462
1463 # pathto() is needed for -R case
1463 # pathto() is needed for -R case
1464 revs = repo.revs(
1464 revs = repo.revs(
1465 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1465 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1466 )
1466 )
1467
1467
1468 if revs:
1468 if revs:
1469 repo.ui.status(_(b'checking subrepo links\n'))
1469 repo.ui.status(_(b'checking subrepo links\n'))
1470 for rev in revs:
1470 for rev in revs:
1471 ctx = repo[rev]
1471 ctx = repo[rev]
1472 try:
1472 try:
1473 for subpath in ctx.substate:
1473 for subpath in ctx.substate:
1474 try:
1474 try:
1475 ret = (
1475 ret = (
1476 ctx.sub(subpath, allowcreate=False).verify() or ret
1476 ctx.sub(subpath, allowcreate=False).verify() or ret
1477 )
1477 )
1478 except error.RepoError as e:
1478 except error.RepoError as e:
1479 repo.ui.warn(b'%d: %s\n' % (rev, e))
1479 repo.ui.warn(b'%d: %s\n' % (rev, e))
1480 except Exception:
1480 except Exception:
1481 repo.ui.warn(
1481 repo.ui.warn(
1482 _(b'.hgsubstate is corrupt in revision %s\n')
1482 _(b'.hgsubstate is corrupt in revision %s\n')
1483 % short(ctx.node())
1483 % short(ctx.node())
1484 )
1484 )
1485
1485
1486 return ret
1486 return ret
1487
1487
1488
1488
1489 def remoteui(src, opts):
1489 def remoteui(src, opts):
1490 """build a remote ui from ui or repo and opts"""
1490 """build a remote ui from ui or repo and opts"""
1491 if util.safehasattr(src, b'baseui'): # looks like a repository
1491 if util.safehasattr(src, b'baseui'): # looks like a repository
1492 dst = src.baseui.copy() # drop repo-specific config
1492 dst = src.baseui.copy() # drop repo-specific config
1493 src = src.ui # copy target options from repo
1493 src = src.ui # copy target options from repo
1494 else: # assume it's a global ui object
1494 else: # assume it's a global ui object
1495 dst = src.copy() # keep all global options
1495 dst = src.copy() # keep all global options
1496
1496
1497 # copy ssh-specific options
1497 # copy ssh-specific options
1498 for o in b'ssh', b'remotecmd':
1498 for o in b'ssh', b'remotecmd':
1499 v = opts.get(o) or src.config(b'ui', o)
1499 v = opts.get(o) or src.config(b'ui', o)
1500 if v:
1500 if v:
1501 dst.setconfig(b"ui", o, v, b'copied')
1501 dst.setconfig(b"ui", o, v, b'copied')
1502
1502
1503 # copy bundle-specific options
1503 # copy bundle-specific options
1504 r = src.config(b'bundle', b'mainreporoot')
1504 r = src.config(b'bundle', b'mainreporoot')
1505 if r:
1505 if r:
1506 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1506 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1507
1507
1508 # copy selected local settings to the remote ui
1508 # copy selected local settings to the remote ui
1509 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1509 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1510 for key, val in src.configitems(sect):
1510 for key, val in src.configitems(sect):
1511 dst.setconfig(sect, key, val, b'copied')
1511 dst.setconfig(sect, key, val, b'copied')
1512 v = src.config(b'web', b'cacerts')
1512 v = src.config(b'web', b'cacerts')
1513 if v:
1513 if v:
1514 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1514 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1515
1515
1516 return dst
1516 return dst
1517
1517
1518
1518
1519 # Files of interest
1519 # Files of interest
1520 # Used to check if the repository has changed looking at mtime and size of
1520 # Used to check if the repository has changed looking at mtime and size of
1521 # these files.
1521 # these files.
1522 foi = [
1522 foi = [
1523 (b'spath', b'00changelog.i'),
1523 (b'spath', b'00changelog.i'),
1524 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1524 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1525 (b'spath', b'obsstore'),
1525 (b'spath', b'obsstore'),
1526 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1526 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1527 ]
1527 ]
1528
1528
1529
1529
1530 class cachedlocalrepo:
1530 class cachedlocalrepo:
1531 """Holds a localrepository that can be cached and reused."""
1531 """Holds a localrepository that can be cached and reused."""
1532
1532
1533 def __init__(self, repo):
1533 def __init__(self, repo):
1534 """Create a new cached repo from an existing repo.
1534 """Create a new cached repo from an existing repo.
1535
1535
1536 We assume the passed in repo was recently created. If the
1536 We assume the passed in repo was recently created. If the
1537 repo has changed between when it was created and when it was
1537 repo has changed between when it was created and when it was
1538 turned into a cache, it may not refresh properly.
1538 turned into a cache, it may not refresh properly.
1539 """
1539 """
1540 assert isinstance(repo, localrepo.localrepository)
1540 assert isinstance(repo, localrepo.localrepository)
1541 self._repo = repo
1541 self._repo = repo
1542 self._state, self.mtime = self._repostate()
1542 self._state, self.mtime = self._repostate()
1543 self._filtername = repo.filtername
1543 self._filtername = repo.filtername
1544
1544
1545 def fetch(self):
1545 def fetch(self):
1546 """Refresh (if necessary) and return a repository.
1546 """Refresh (if necessary) and return a repository.
1547
1547
1548 If the cached instance is out of date, it will be recreated
1548 If the cached instance is out of date, it will be recreated
1549 automatically and returned.
1549 automatically and returned.
1550
1550
1551 Returns a tuple of the repo and a boolean indicating whether a new
1551 Returns a tuple of the repo and a boolean indicating whether a new
1552 repo instance was created.
1552 repo instance was created.
1553 """
1553 """
1554 # We compare the mtimes and sizes of some well-known files to
1554 # We compare the mtimes and sizes of some well-known files to
1555 # determine if the repo changed. This is not precise, as mtimes
1555 # determine if the repo changed. This is not precise, as mtimes
1556 # are susceptible to clock skew and imprecise filesystems and
1556 # are susceptible to clock skew and imprecise filesystems and
1557 # file content can change while maintaining the same size.
1557 # file content can change while maintaining the same size.
1558
1558
1559 state, mtime = self._repostate()
1559 state, mtime = self._repostate()
1560 if state == self._state:
1560 if state == self._state:
1561 return self._repo, False
1561 return self._repo, False
1562
1562
1563 repo = repository(self._repo.baseui, self._repo.url())
1563 repo = repository(self._repo.baseui, self._repo.url())
1564 if self._filtername:
1564 if self._filtername:
1565 self._repo = repo.filtered(self._filtername)
1565 self._repo = repo.filtered(self._filtername)
1566 else:
1566 else:
1567 self._repo = repo.unfiltered()
1567 self._repo = repo.unfiltered()
1568 self._state = state
1568 self._state = state
1569 self.mtime = mtime
1569 self.mtime = mtime
1570
1570
1571 return self._repo, True
1571 return self._repo, True
1572
1572
1573 def _repostate(self):
1573 def _repostate(self):
1574 state = []
1574 state = []
1575 maxmtime = -1
1575 maxmtime = -1
1576 for attr, fname in foi:
1576 for attr, fname in foi:
1577 prefix = getattr(self._repo, attr)
1577 prefix = getattr(self._repo, attr)
1578 p = os.path.join(prefix, fname)
1578 p = os.path.join(prefix, fname)
1579 try:
1579 try:
1580 st = os.stat(p)
1580 st = os.stat(p)
1581 except OSError:
1581 except OSError:
1582 st = os.stat(prefix)
1582 st = os.stat(prefix)
1583 state.append((st[stat.ST_MTIME], st.st_size))
1583 state.append((st[stat.ST_MTIME], st.st_size))
1584 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1584 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1585
1585
1586 return tuple(state), maxmtime
1586 return tuple(state), maxmtime
1587
1587
1588 def copy(self):
1588 def copy(self):
1589 """Obtain a copy of this class instance.
1589 """Obtain a copy of this class instance.
1590
1590
1591 A new localrepository instance is obtained. The new instance should be
1591 A new localrepository instance is obtained. The new instance should be
1592 completely independent of the original.
1592 completely independent of the original.
1593 """
1593 """
1594 repo = repository(self._repo.baseui, self._repo.origroot)
1594 repo = repository(self._repo.baseui, self._repo.origroot)
1595 if self._filtername:
1595 if self._filtername:
1596 repo = repo.filtered(self._filtername)
1596 repo = repo.filtered(self._filtername)
1597 else:
1597 else:
1598 repo = repo.unfiltered()
1598 repo = repo.unfiltered()
1599 c = cachedlocalrepo(repo)
1599 c = cachedlocalrepo(repo)
1600 c._state = self._state
1600 c._state = self._state
1601 c.mtime = self.mtime
1601 c.mtime = self.mtime
1602 return c
1602 return c
General Comments 0
You need to be logged in to leave comments. Login now