##// END OF EJS Templates
sparse: lock the store when updating requirements config...
Arseniy Alekseyev -
r49326:b74ee41a stable
parent child Browse files
Show More
@@ -1,1591 +1,1608 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import os
12 import os
13 import shutil
13 import shutil
14 import stat
14 import stat
15 import weakref
15
16
16 from .i18n import _
17 from .i18n import _
17 from .node import (
18 from .node import (
18 hex,
19 hex,
19 sha1nodeconstants,
20 sha1nodeconstants,
20 short,
21 short,
21 )
22 )
22 from .pycompat import getattr
23 from .pycompat import getattr
23
24
24 from . import (
25 from . import (
25 bookmarks,
26 bookmarks,
26 bundlerepo,
27 bundlerepo,
27 cmdutil,
28 cmdutil,
28 destutil,
29 destutil,
29 discovery,
30 discovery,
30 error,
31 error,
31 exchange,
32 exchange,
32 extensions,
33 extensions,
33 graphmod,
34 graphmod,
34 httppeer,
35 httppeer,
35 localrepo,
36 localrepo,
36 lock,
37 lock,
37 logcmdutil,
38 logcmdutil,
38 logexchange,
39 logexchange,
39 merge as mergemod,
40 merge as mergemod,
40 mergestate as mergestatemod,
41 mergestate as mergestatemod,
41 narrowspec,
42 narrowspec,
42 phases,
43 phases,
43 requirements,
44 requirements,
44 scmutil,
45 scmutil,
45 sshpeer,
46 sshpeer,
46 statichttprepo,
47 statichttprepo,
47 ui as uimod,
48 ui as uimod,
48 unionrepo,
49 unionrepo,
49 url,
50 url,
50 util,
51 util,
51 verify as verifymod,
52 verify as verifymod,
52 vfs as vfsmod,
53 vfs as vfsmod,
53 )
54 )
54 from .interfaces import repository as repositorymod
55 from .interfaces import repository as repositorymod
55 from .utils import (
56 from .utils import (
56 hashutil,
57 hashutil,
57 stringutil,
58 stringutil,
58 urlutil,
59 urlutil,
59 )
60 )
60
61
61
62
62 release = lock.release
63 release = lock.release
63
64
64 # shared features
65 # shared features
65 sharedbookmarks = b'bookmarks'
66 sharedbookmarks = b'bookmarks'
66
67
67
68
68 def _local(path):
69 def _local(path):
69 path = util.expandpath(urlutil.urllocalpath(path))
70 path = util.expandpath(urlutil.urllocalpath(path))
70
71
71 try:
72 try:
72 # we use os.stat() directly here instead of os.path.isfile()
73 # we use os.stat() directly here instead of os.path.isfile()
73 # because the latter started returning `False` on invalid path
74 # because the latter started returning `False` on invalid path
74 # exceptions starting in 3.8 and we care about handling
75 # exceptions starting in 3.8 and we care about handling
75 # invalid paths specially here.
76 # invalid paths specially here.
76 st = os.stat(path)
77 st = os.stat(path)
77 isfile = stat.S_ISREG(st.st_mode)
78 isfile = stat.S_ISREG(st.st_mode)
78 # Python 2 raises TypeError, Python 3 ValueError.
79 # Python 2 raises TypeError, Python 3 ValueError.
79 except (TypeError, ValueError) as e:
80 except (TypeError, ValueError) as e:
80 raise error.Abort(
81 raise error.Abort(
81 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
82 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
82 )
83 )
83 except OSError:
84 except OSError:
84 isfile = False
85 isfile = False
85
86
86 return isfile and bundlerepo or localrepo
87 return isfile and bundlerepo or localrepo
87
88
88
89
89 def addbranchrevs(lrepo, other, branches, revs):
90 def addbranchrevs(lrepo, other, branches, revs):
90 peer = other.peer() # a courtesy to callers using a localrepo for other
91 peer = other.peer() # a courtesy to callers using a localrepo for other
91 hashbranch, branches = branches
92 hashbranch, branches = branches
92 if not hashbranch and not branches:
93 if not hashbranch and not branches:
93 x = revs or None
94 x = revs or None
94 if revs:
95 if revs:
95 y = revs[0]
96 y = revs[0]
96 else:
97 else:
97 y = None
98 y = None
98 return x, y
99 return x, y
99 if revs:
100 if revs:
100 revs = list(revs)
101 revs = list(revs)
101 else:
102 else:
102 revs = []
103 revs = []
103
104
104 if not peer.capable(b'branchmap'):
105 if not peer.capable(b'branchmap'):
105 if branches:
106 if branches:
106 raise error.Abort(_(b"remote branch lookup not supported"))
107 raise error.Abort(_(b"remote branch lookup not supported"))
107 revs.append(hashbranch)
108 revs.append(hashbranch)
108 return revs, revs[0]
109 return revs, revs[0]
109
110
110 with peer.commandexecutor() as e:
111 with peer.commandexecutor() as e:
111 branchmap = e.callcommand(b'branchmap', {}).result()
112 branchmap = e.callcommand(b'branchmap', {}).result()
112
113
113 def primary(branch):
114 def primary(branch):
114 if branch == b'.':
115 if branch == b'.':
115 if not lrepo:
116 if not lrepo:
116 raise error.Abort(_(b"dirstate branch not accessible"))
117 raise error.Abort(_(b"dirstate branch not accessible"))
117 branch = lrepo.dirstate.branch()
118 branch = lrepo.dirstate.branch()
118 if branch in branchmap:
119 if branch in branchmap:
119 revs.extend(hex(r) for r in reversed(branchmap[branch]))
120 revs.extend(hex(r) for r in reversed(branchmap[branch]))
120 return True
121 return True
121 else:
122 else:
122 return False
123 return False
123
124
124 for branch in branches:
125 for branch in branches:
125 if not primary(branch):
126 if not primary(branch):
126 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
127 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
127 if hashbranch:
128 if hashbranch:
128 if not primary(hashbranch):
129 if not primary(hashbranch):
129 revs.append(hashbranch)
130 revs.append(hashbranch)
130 return revs, revs[0]
131 return revs, revs[0]
131
132
132
133
133 def parseurl(path, branches=None):
134 def parseurl(path, branches=None):
134 '''parse url#branch, returning (url, (branch, branches))'''
135 '''parse url#branch, returning (url, (branch, branches))'''
135 msg = b'parseurl(...) moved to mercurial.utils.urlutil'
136 msg = b'parseurl(...) moved to mercurial.utils.urlutil'
136 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
137 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
137 return urlutil.parseurl(path, branches=branches)
138 return urlutil.parseurl(path, branches=branches)
138
139
139
140
140 schemes = {
141 schemes = {
141 b'bundle': bundlerepo,
142 b'bundle': bundlerepo,
142 b'union': unionrepo,
143 b'union': unionrepo,
143 b'file': _local,
144 b'file': _local,
144 b'http': httppeer,
145 b'http': httppeer,
145 b'https': httppeer,
146 b'https': httppeer,
146 b'ssh': sshpeer,
147 b'ssh': sshpeer,
147 b'static-http': statichttprepo,
148 b'static-http': statichttprepo,
148 }
149 }
149
150
150
151
151 def _peerlookup(path):
152 def _peerlookup(path):
152 u = urlutil.url(path)
153 u = urlutil.url(path)
153 scheme = u.scheme or b'file'
154 scheme = u.scheme or b'file'
154 thing = schemes.get(scheme) or schemes[b'file']
155 thing = schemes.get(scheme) or schemes[b'file']
155 try:
156 try:
156 return thing(path)
157 return thing(path)
157 except TypeError:
158 except TypeError:
158 # we can't test callable(thing) because 'thing' can be an unloaded
159 # we can't test callable(thing) because 'thing' can be an unloaded
159 # module that implements __call__
160 # module that implements __call__
160 if not util.safehasattr(thing, b'instance'):
161 if not util.safehasattr(thing, b'instance'):
161 raise
162 raise
162 return thing
163 return thing
163
164
164
165
165 def islocal(repo):
166 def islocal(repo):
166 '''return true if repo (or path pointing to repo) is local'''
167 '''return true if repo (or path pointing to repo) is local'''
167 if isinstance(repo, bytes):
168 if isinstance(repo, bytes):
168 try:
169 try:
169 return _peerlookup(repo).islocal(repo)
170 return _peerlookup(repo).islocal(repo)
170 except AttributeError:
171 except AttributeError:
171 return False
172 return False
172 return repo.local()
173 return repo.local()
173
174
174
175
175 def openpath(ui, path, sendaccept=True):
176 def openpath(ui, path, sendaccept=True):
176 '''open path with open if local, url.open if remote'''
177 '''open path with open if local, url.open if remote'''
177 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
178 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
178 if pathurl.islocal():
179 if pathurl.islocal():
179 return util.posixfile(pathurl.localpath(), b'rb')
180 return util.posixfile(pathurl.localpath(), b'rb')
180 else:
181 else:
181 return url.open(ui, path, sendaccept=sendaccept)
182 return url.open(ui, path, sendaccept=sendaccept)
182
183
183
184
184 # a list of (ui, repo) functions called for wire peer initialization
185 # a list of (ui, repo) functions called for wire peer initialization
185 wirepeersetupfuncs = []
186 wirepeersetupfuncs = []
186
187
187
188
188 def _peerorrepo(
189 def _peerorrepo(
189 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
190 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
190 ):
191 ):
191 """return a repository object for the specified path"""
192 """return a repository object for the specified path"""
192 obj = _peerlookup(path).instance(
193 obj = _peerlookup(path).instance(
193 ui, path, create, intents=intents, createopts=createopts
194 ui, path, create, intents=intents, createopts=createopts
194 )
195 )
195 ui = getattr(obj, "ui", ui)
196 ui = getattr(obj, "ui", ui)
196 for f in presetupfuncs or []:
197 for f in presetupfuncs or []:
197 f(ui, obj)
198 f(ui, obj)
198 ui.log(b'extension', b'- executing reposetup hooks\n')
199 ui.log(b'extension', b'- executing reposetup hooks\n')
199 with util.timedcm('all reposetup') as allreposetupstats:
200 with util.timedcm('all reposetup') as allreposetupstats:
200 for name, module in extensions.extensions(ui):
201 for name, module in extensions.extensions(ui):
201 ui.log(b'extension', b' - running reposetup for %s\n', name)
202 ui.log(b'extension', b' - running reposetup for %s\n', name)
202 hook = getattr(module, 'reposetup', None)
203 hook = getattr(module, 'reposetup', None)
203 if hook:
204 if hook:
204 with util.timedcm('reposetup %r', name) as stats:
205 with util.timedcm('reposetup %r', name) as stats:
205 hook(ui, obj)
206 hook(ui, obj)
206 ui.log(
207 ui.log(
207 b'extension', b' > reposetup for %s took %s\n', name, stats
208 b'extension', b' > reposetup for %s took %s\n', name, stats
208 )
209 )
209 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
210 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
210 if not obj.local():
211 if not obj.local():
211 for f in wirepeersetupfuncs:
212 for f in wirepeersetupfuncs:
212 f(ui, obj)
213 f(ui, obj)
213 return obj
214 return obj
214
215
215
216
216 def repository(
217 def repository(
217 ui,
218 ui,
218 path=b'',
219 path=b'',
219 create=False,
220 create=False,
220 presetupfuncs=None,
221 presetupfuncs=None,
221 intents=None,
222 intents=None,
222 createopts=None,
223 createopts=None,
223 ):
224 ):
224 """return a repository object for the specified path"""
225 """return a repository object for the specified path"""
225 peer = _peerorrepo(
226 peer = _peerorrepo(
226 ui,
227 ui,
227 path,
228 path,
228 create,
229 create,
229 presetupfuncs=presetupfuncs,
230 presetupfuncs=presetupfuncs,
230 intents=intents,
231 intents=intents,
231 createopts=createopts,
232 createopts=createopts,
232 )
233 )
233 repo = peer.local()
234 repo = peer.local()
234 if not repo:
235 if not repo:
235 raise error.Abort(
236 raise error.Abort(
236 _(b"repository '%s' is not local") % (path or peer.url())
237 _(b"repository '%s' is not local") % (path or peer.url())
237 )
238 )
238 return repo.filtered(b'visible')
239 return repo.filtered(b'visible')
239
240
240
241
241 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
242 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
242 '''return a repository peer for the specified path'''
243 '''return a repository peer for the specified path'''
243 rui = remoteui(uiorrepo, opts)
244 rui = remoteui(uiorrepo, opts)
244 return _peerorrepo(
245 return _peerorrepo(
245 rui, path, create, intents=intents, createopts=createopts
246 rui, path, create, intents=intents, createopts=createopts
246 ).peer()
247 ).peer()
247
248
248
249
249 def defaultdest(source):
250 def defaultdest(source):
250 """return default destination of clone if none is given
251 """return default destination of clone if none is given
251
252
252 >>> defaultdest(b'foo')
253 >>> defaultdest(b'foo')
253 'foo'
254 'foo'
254 >>> defaultdest(b'/foo/bar')
255 >>> defaultdest(b'/foo/bar')
255 'bar'
256 'bar'
256 >>> defaultdest(b'/')
257 >>> defaultdest(b'/')
257 ''
258 ''
258 >>> defaultdest(b'')
259 >>> defaultdest(b'')
259 ''
260 ''
260 >>> defaultdest(b'http://example.org/')
261 >>> defaultdest(b'http://example.org/')
261 ''
262 ''
262 >>> defaultdest(b'http://example.org/foo/')
263 >>> defaultdest(b'http://example.org/foo/')
263 'foo'
264 'foo'
264 """
265 """
265 path = urlutil.url(source).path
266 path = urlutil.url(source).path
266 if not path:
267 if not path:
267 return b''
268 return b''
268 return os.path.basename(os.path.normpath(path))
269 return os.path.basename(os.path.normpath(path))
269
270
270
271
271 def sharedreposource(repo):
272 def sharedreposource(repo):
272 """Returns repository object for source repository of a shared repo.
273 """Returns repository object for source repository of a shared repo.
273
274
274 If repo is not a shared repository, returns None.
275 If repo is not a shared repository, returns None.
275 """
276 """
276 if repo.sharedpath == repo.path:
277 if repo.sharedpath == repo.path:
277 return None
278 return None
278
279
279 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
280 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
280 return repo.srcrepo
281 return repo.srcrepo
281
282
282 # the sharedpath always ends in the .hg; we want the path to the repo
283 # the sharedpath always ends in the .hg; we want the path to the repo
283 source = repo.vfs.split(repo.sharedpath)[0]
284 source = repo.vfs.split(repo.sharedpath)[0]
284 srcurl, branches = urlutil.parseurl(source)
285 srcurl, branches = urlutil.parseurl(source)
285 srcrepo = repository(repo.ui, srcurl)
286 srcrepo = repository(repo.ui, srcurl)
286 repo.srcrepo = srcrepo
287 repo.srcrepo = srcrepo
287 return srcrepo
288 return srcrepo
288
289
289
290
290 def share(
291 def share(
291 ui,
292 ui,
292 source,
293 source,
293 dest=None,
294 dest=None,
294 update=True,
295 update=True,
295 bookmarks=True,
296 bookmarks=True,
296 defaultpath=None,
297 defaultpath=None,
297 relative=False,
298 relative=False,
298 ):
299 ):
299 '''create a shared repository'''
300 '''create a shared repository'''
300
301
301 if not islocal(source):
302 if not islocal(source):
302 raise error.Abort(_(b'can only share local repositories'))
303 raise error.Abort(_(b'can only share local repositories'))
303
304
304 if not dest:
305 if not dest:
305 dest = defaultdest(source)
306 dest = defaultdest(source)
306 else:
307 else:
307 dest = urlutil.get_clone_path(ui, dest)[1]
308 dest = urlutil.get_clone_path(ui, dest)[1]
308
309
309 if isinstance(source, bytes):
310 if isinstance(source, bytes):
310 origsource, source, branches = urlutil.get_clone_path(ui, source)
311 origsource, source, branches = urlutil.get_clone_path(ui, source)
311 srcrepo = repository(ui, source)
312 srcrepo = repository(ui, source)
312 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
313 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
313 else:
314 else:
314 srcrepo = source.local()
315 srcrepo = source.local()
315 checkout = None
316 checkout = None
316
317
317 shareditems = set()
318 shareditems = set()
318 if bookmarks:
319 if bookmarks:
319 shareditems.add(sharedbookmarks)
320 shareditems.add(sharedbookmarks)
320
321
321 r = repository(
322 r = repository(
322 ui,
323 ui,
323 dest,
324 dest,
324 create=True,
325 create=True,
325 createopts={
326 createopts={
326 b'sharedrepo': srcrepo,
327 b'sharedrepo': srcrepo,
327 b'sharedrelative': relative,
328 b'sharedrelative': relative,
328 b'shareditems': shareditems,
329 b'shareditems': shareditems,
329 },
330 },
330 )
331 )
331
332
332 postshare(srcrepo, r, defaultpath=defaultpath)
333 postshare(srcrepo, r, defaultpath=defaultpath)
333 r = repository(ui, dest)
334 r = repository(ui, dest)
334 _postshareupdate(r, update, checkout=checkout)
335 _postshareupdate(r, update, checkout=checkout)
335 return r
336 return r
336
337
337
338
338 def _prependsourcehgrc(repo):
339 def _prependsourcehgrc(repo):
339 """copies the source repo config and prepend it in current repo .hg/hgrc
340 """copies the source repo config and prepend it in current repo .hg/hgrc
340 on unshare. This is only done if the share was perfomed using share safe
341 on unshare. This is only done if the share was perfomed using share safe
341 method where we share config of source in shares"""
342 method where we share config of source in shares"""
342 srcvfs = vfsmod.vfs(repo.sharedpath)
343 srcvfs = vfsmod.vfs(repo.sharedpath)
343 dstvfs = vfsmod.vfs(repo.path)
344 dstvfs = vfsmod.vfs(repo.path)
344
345
345 if not srcvfs.exists(b'hgrc'):
346 if not srcvfs.exists(b'hgrc'):
346 return
347 return
347
348
348 currentconfig = b''
349 currentconfig = b''
349 if dstvfs.exists(b'hgrc'):
350 if dstvfs.exists(b'hgrc'):
350 currentconfig = dstvfs.read(b'hgrc')
351 currentconfig = dstvfs.read(b'hgrc')
351
352
352 with dstvfs(b'hgrc', b'wb') as fp:
353 with dstvfs(b'hgrc', b'wb') as fp:
353 sourceconfig = srcvfs.read(b'hgrc')
354 sourceconfig = srcvfs.read(b'hgrc')
354 fp.write(b"# Config copied from shared source\n")
355 fp.write(b"# Config copied from shared source\n")
355 fp.write(sourceconfig)
356 fp.write(sourceconfig)
356 fp.write(b'\n')
357 fp.write(b'\n')
357 fp.write(currentconfig)
358 fp.write(currentconfig)
358
359
359
360
360 def unshare(ui, repo):
361 def unshare(ui, repo):
361 """convert a shared repository to a normal one
362 """convert a shared repository to a normal one
362
363
363 Copy the store data to the repo and remove the sharedpath data.
364 Copy the store data to the repo and remove the sharedpath data.
364
365
365 Returns a new repository object representing the unshared repository.
366 Returns a new repository object representing the unshared repository.
366
367
367 The passed repository object is not usable after this function is
368 The passed repository object is not usable after this function is
368 called.
369 called.
369 """
370 """
370
371
371 with repo.lock():
372 with repo.lock():
372 # we use locks here because if we race with commit, we
373 # we use locks here because if we race with commit, we
373 # can end up with extra data in the cloned revlogs that's
374 # can end up with extra data in the cloned revlogs that's
374 # not pointed to by changesets, thus causing verify to
375 # not pointed to by changesets, thus causing verify to
375 # fail
376 # fail
376 destlock = copystore(ui, repo, repo.path)
377 destlock = copystore(ui, repo, repo.path)
377 with destlock or util.nullcontextmanager():
378 with destlock or util.nullcontextmanager():
378 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
379 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
379 # we were sharing .hg/hgrc of the share source with the current
380 # we were sharing .hg/hgrc of the share source with the current
380 # repo. We need to copy that while unsharing otherwise it can
381 # repo. We need to copy that while unsharing otherwise it can
381 # disable hooks and other checks
382 # disable hooks and other checks
382 _prependsourcehgrc(repo)
383 _prependsourcehgrc(repo)
383
384
384 sharefile = repo.vfs.join(b'sharedpath')
385 sharefile = repo.vfs.join(b'sharedpath')
385 util.rename(sharefile, sharefile + b'.old')
386 util.rename(sharefile, sharefile + b'.old')
386
387
387 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
388 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
388 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
389 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
389 scmutil.writereporequirements(repo)
390 scmutil.writereporequirements(repo)
390
391
391 # Removing share changes some fundamental properties of the repo instance.
392 # Removing share changes some fundamental properties of the repo instance.
392 # So we instantiate a new repo object and operate on it rather than
393 # So we instantiate a new repo object and operate on it rather than
393 # try to keep the existing repo usable.
394 # try to keep the existing repo usable.
394 newrepo = repository(repo.baseui, repo.root, create=False)
395 newrepo = repository(repo.baseui, repo.root, create=False)
395
396
396 # TODO: figure out how to access subrepos that exist, but were previously
397 # TODO: figure out how to access subrepos that exist, but were previously
397 # removed from .hgsub
398 # removed from .hgsub
398 c = newrepo[b'.']
399 c = newrepo[b'.']
399 subs = c.substate
400 subs = c.substate
400 for s in sorted(subs):
401 for s in sorted(subs):
401 c.sub(s).unshare()
402 c.sub(s).unshare()
402
403
403 localrepo.poisonrepository(repo)
404 localrepo.poisonrepository(repo)
404
405
405 return newrepo
406 return newrepo
406
407
407
408
408 def postshare(sourcerepo, destrepo, defaultpath=None):
409 def postshare(sourcerepo, destrepo, defaultpath=None):
409 """Called after a new shared repo is created.
410 """Called after a new shared repo is created.
410
411
411 The new repo only has a requirements file and pointer to the source.
412 The new repo only has a requirements file and pointer to the source.
412 This function configures additional shared data.
413 This function configures additional shared data.
413
414
414 Extensions can wrap this function and write additional entries to
415 Extensions can wrap this function and write additional entries to
415 destrepo/.hg/shared to indicate additional pieces of data to be shared.
416 destrepo/.hg/shared to indicate additional pieces of data to be shared.
416 """
417 """
417 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
418 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
418 if default:
419 if default:
419 template = b'[paths]\ndefault = %s\n'
420 template = b'[paths]\ndefault = %s\n'
420 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
421 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
421 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
422 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
422 with destrepo.wlock():
423 with destrepo.wlock():
423 narrowspec.copytoworkingcopy(destrepo)
424 narrowspec.copytoworkingcopy(destrepo)
424
425
425
426
426 def _postshareupdate(repo, update, checkout=None):
427 def _postshareupdate(repo, update, checkout=None):
427 """Maybe perform a working directory update after a shared repo is created.
428 """Maybe perform a working directory update after a shared repo is created.
428
429
429 ``update`` can be a boolean or a revision to update to.
430 ``update`` can be a boolean or a revision to update to.
430 """
431 """
431 if not update:
432 if not update:
432 return
433 return
433
434
434 repo.ui.status(_(b"updating working directory\n"))
435 repo.ui.status(_(b"updating working directory\n"))
435 if update is not True:
436 if update is not True:
436 checkout = update
437 checkout = update
437 for test in (checkout, b'default', b'tip'):
438 for test in (checkout, b'default', b'tip'):
438 if test is None:
439 if test is None:
439 continue
440 continue
440 try:
441 try:
441 uprev = repo.lookup(test)
442 uprev = repo.lookup(test)
442 break
443 break
443 except error.RepoLookupError:
444 except error.RepoLookupError:
444 continue
445 continue
445 _update(repo, uprev)
446 _update(repo, uprev)
446
447
447
448
448 def copystore(ui, srcrepo, destpath):
449 def copystore(ui, srcrepo, destpath):
449 """copy files from store of srcrepo in destpath
450 """copy files from store of srcrepo in destpath
450
451
451 returns destlock
452 returns destlock
452 """
453 """
453 destlock = None
454 destlock = None
454 try:
455 try:
455 hardlink = None
456 hardlink = None
456 topic = _(b'linking') if hardlink else _(b'copying')
457 topic = _(b'linking') if hardlink else _(b'copying')
457 with ui.makeprogress(topic, unit=_(b'files')) as progress:
458 with ui.makeprogress(topic, unit=_(b'files')) as progress:
458 num = 0
459 num = 0
459 srcpublishing = srcrepo.publishing()
460 srcpublishing = srcrepo.publishing()
460 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
461 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
461 dstvfs = vfsmod.vfs(destpath)
462 dstvfs = vfsmod.vfs(destpath)
462 for f in srcrepo.store.copylist():
463 for f in srcrepo.store.copylist():
463 if srcpublishing and f.endswith(b'phaseroots'):
464 if srcpublishing and f.endswith(b'phaseroots'):
464 continue
465 continue
465 dstbase = os.path.dirname(f)
466 dstbase = os.path.dirname(f)
466 if dstbase and not dstvfs.exists(dstbase):
467 if dstbase and not dstvfs.exists(dstbase):
467 dstvfs.mkdir(dstbase)
468 dstvfs.mkdir(dstbase)
468 if srcvfs.exists(f):
469 if srcvfs.exists(f):
469 if f.endswith(b'data'):
470 if f.endswith(b'data'):
470 # 'dstbase' may be empty (e.g. revlog format 0)
471 # 'dstbase' may be empty (e.g. revlog format 0)
471 lockfile = os.path.join(dstbase, b"lock")
472 lockfile = os.path.join(dstbase, b"lock")
472 # lock to avoid premature writing to the target
473 # lock to avoid premature writing to the target
473 destlock = lock.lock(dstvfs, lockfile)
474 destlock = lock.lock(dstvfs, lockfile)
474 hardlink, n = util.copyfiles(
475 hardlink, n = util.copyfiles(
475 srcvfs.join(f), dstvfs.join(f), hardlink, progress
476 srcvfs.join(f), dstvfs.join(f), hardlink, progress
476 )
477 )
477 num += n
478 num += n
478 if hardlink:
479 if hardlink:
479 ui.debug(b"linked %d files\n" % num)
480 ui.debug(b"linked %d files\n" % num)
480 else:
481 else:
481 ui.debug(b"copied %d files\n" % num)
482 ui.debug(b"copied %d files\n" % num)
482 return destlock
483 return destlock
483 except: # re-raises
484 except: # re-raises
484 release(destlock)
485 release(destlock)
485 raise
486 raise
486
487
487
488
488 def clonewithshare(
489 def clonewithshare(
489 ui,
490 ui,
490 peeropts,
491 peeropts,
491 sharepath,
492 sharepath,
492 source,
493 source,
493 srcpeer,
494 srcpeer,
494 dest,
495 dest,
495 pull=False,
496 pull=False,
496 rev=None,
497 rev=None,
497 update=True,
498 update=True,
498 stream=False,
499 stream=False,
499 ):
500 ):
500 """Perform a clone using a shared repo.
501 """Perform a clone using a shared repo.
501
502
502 The store for the repository will be located at <sharepath>/.hg. The
503 The store for the repository will be located at <sharepath>/.hg. The
503 specified revisions will be cloned or pulled from "source". A shared repo
504 specified revisions will be cloned or pulled from "source". A shared repo
504 will be created at "dest" and a working copy will be created if "update" is
505 will be created at "dest" and a working copy will be created if "update" is
505 True.
506 True.
506 """
507 """
507 revs = None
508 revs = None
508 if rev:
509 if rev:
509 if not srcpeer.capable(b'lookup'):
510 if not srcpeer.capable(b'lookup'):
510 raise error.Abort(
511 raise error.Abort(
511 _(
512 _(
512 b"src repository does not support "
513 b"src repository does not support "
513 b"revision lookup and so doesn't "
514 b"revision lookup and so doesn't "
514 b"support clone by revision"
515 b"support clone by revision"
515 )
516 )
516 )
517 )
517
518
518 # TODO this is batchable.
519 # TODO this is batchable.
519 remoterevs = []
520 remoterevs = []
520 for r in rev:
521 for r in rev:
521 with srcpeer.commandexecutor() as e:
522 with srcpeer.commandexecutor() as e:
522 remoterevs.append(
523 remoterevs.append(
523 e.callcommand(
524 e.callcommand(
524 b'lookup',
525 b'lookup',
525 {
526 {
526 b'key': r,
527 b'key': r,
527 },
528 },
528 ).result()
529 ).result()
529 )
530 )
530 revs = remoterevs
531 revs = remoterevs
531
532
532 # Obtain a lock before checking for or cloning the pooled repo otherwise
533 # Obtain a lock before checking for or cloning the pooled repo otherwise
533 # 2 clients may race creating or populating it.
534 # 2 clients may race creating or populating it.
534 pooldir = os.path.dirname(sharepath)
535 pooldir = os.path.dirname(sharepath)
535 # lock class requires the directory to exist.
536 # lock class requires the directory to exist.
536 try:
537 try:
537 util.makedir(pooldir, False)
538 util.makedir(pooldir, False)
538 except OSError as e:
539 except OSError as e:
539 if e.errno != errno.EEXIST:
540 if e.errno != errno.EEXIST:
540 raise
541 raise
541
542
542 poolvfs = vfsmod.vfs(pooldir)
543 poolvfs = vfsmod.vfs(pooldir)
543 basename = os.path.basename(sharepath)
544 basename = os.path.basename(sharepath)
544
545
545 with lock.lock(poolvfs, b'%s.lock' % basename):
546 with lock.lock(poolvfs, b'%s.lock' % basename):
546 if os.path.exists(sharepath):
547 if os.path.exists(sharepath):
547 ui.status(
548 ui.status(
548 _(b'(sharing from existing pooled repository %s)\n') % basename
549 _(b'(sharing from existing pooled repository %s)\n') % basename
549 )
550 )
550 else:
551 else:
551 ui.status(
552 ui.status(
552 _(b'(sharing from new pooled repository %s)\n') % basename
553 _(b'(sharing from new pooled repository %s)\n') % basename
553 )
554 )
554 # Always use pull mode because hardlinks in share mode don't work
555 # Always use pull mode because hardlinks in share mode don't work
555 # well. Never update because working copies aren't necessary in
556 # well. Never update because working copies aren't necessary in
556 # share mode.
557 # share mode.
557 clone(
558 clone(
558 ui,
559 ui,
559 peeropts,
560 peeropts,
560 source,
561 source,
561 dest=sharepath,
562 dest=sharepath,
562 pull=True,
563 pull=True,
563 revs=rev,
564 revs=rev,
564 update=False,
565 update=False,
565 stream=stream,
566 stream=stream,
566 )
567 )
567
568
568 # Resolve the value to put in [paths] section for the source.
569 # Resolve the value to put in [paths] section for the source.
569 if islocal(source):
570 if islocal(source):
570 defaultpath = util.abspath(urlutil.urllocalpath(source))
571 defaultpath = util.abspath(urlutil.urllocalpath(source))
571 else:
572 else:
572 defaultpath = source
573 defaultpath = source
573
574
574 sharerepo = repository(ui, path=sharepath)
575 sharerepo = repository(ui, path=sharepath)
575 destrepo = share(
576 destrepo = share(
576 ui,
577 ui,
577 sharerepo,
578 sharerepo,
578 dest=dest,
579 dest=dest,
579 update=False,
580 update=False,
580 bookmarks=False,
581 bookmarks=False,
581 defaultpath=defaultpath,
582 defaultpath=defaultpath,
582 )
583 )
583
584
584 # We need to perform a pull against the dest repo to fetch bookmarks
585 # We need to perform a pull against the dest repo to fetch bookmarks
585 # and other non-store data that isn't shared by default. In the case of
586 # and other non-store data that isn't shared by default. In the case of
586 # non-existing shared repo, this means we pull from the remote twice. This
587 # non-existing shared repo, this means we pull from the remote twice. This
587 # is a bit weird. But at the time it was implemented, there wasn't an easy
588 # is a bit weird. But at the time it was implemented, there wasn't an easy
588 # way to pull just non-changegroup data.
589 # way to pull just non-changegroup data.
589 exchange.pull(destrepo, srcpeer, heads=revs)
590 exchange.pull(destrepo, srcpeer, heads=revs)
590
591
591 _postshareupdate(destrepo, update)
592 _postshareupdate(destrepo, update)
592
593
593 return srcpeer, peer(ui, peeropts, dest)
594 return srcpeer, peer(ui, peeropts, dest)
594
595
595
596
596 # Recomputing caches is often slow on big repos, so copy them.
597 # Recomputing caches is often slow on big repos, so copy them.
597 def _copycache(srcrepo, dstcachedir, fname):
598 def _copycache(srcrepo, dstcachedir, fname):
598 """copy a cache from srcrepo to destcachedir (if it exists)"""
599 """copy a cache from srcrepo to destcachedir (if it exists)"""
599 srcfname = srcrepo.cachevfs.join(fname)
600 srcfname = srcrepo.cachevfs.join(fname)
600 dstfname = os.path.join(dstcachedir, fname)
601 dstfname = os.path.join(dstcachedir, fname)
601 if os.path.exists(srcfname):
602 if os.path.exists(srcfname):
602 if not os.path.exists(dstcachedir):
603 if not os.path.exists(dstcachedir):
603 os.mkdir(dstcachedir)
604 os.mkdir(dstcachedir)
604 util.copyfile(srcfname, dstfname)
605 util.copyfile(srcfname, dstfname)
605
606
606
607
607 def clone(
608 def clone(
608 ui,
609 ui,
609 peeropts,
610 peeropts,
610 source,
611 source,
611 dest=None,
612 dest=None,
612 pull=False,
613 pull=False,
613 revs=None,
614 revs=None,
614 update=True,
615 update=True,
615 stream=False,
616 stream=False,
616 branch=None,
617 branch=None,
617 shareopts=None,
618 shareopts=None,
618 storeincludepats=None,
619 storeincludepats=None,
619 storeexcludepats=None,
620 storeexcludepats=None,
620 depth=None,
621 depth=None,
621 ):
622 ):
622 """Make a copy of an existing repository.
623 """Make a copy of an existing repository.
623
624
624 Create a copy of an existing repository in a new directory. The
625 Create a copy of an existing repository in a new directory. The
625 source and destination are URLs, as passed to the repository
626 source and destination are URLs, as passed to the repository
626 function. Returns a pair of repository peers, the source and
627 function. Returns a pair of repository peers, the source and
627 newly created destination.
628 newly created destination.
628
629
629 The location of the source is added to the new repository's
630 The location of the source is added to the new repository's
630 .hg/hgrc file, as the default to be used for future pulls and
631 .hg/hgrc file, as the default to be used for future pulls and
631 pushes.
632 pushes.
632
633
633 If an exception is raised, the partly cloned/updated destination
634 If an exception is raised, the partly cloned/updated destination
634 repository will be deleted.
635 repository will be deleted.
635
636
636 Arguments:
637 Arguments:
637
638
638 source: repository object or URL
639 source: repository object or URL
639
640
640 dest: URL of destination repository to create (defaults to base
641 dest: URL of destination repository to create (defaults to base
641 name of source repository)
642 name of source repository)
642
643
643 pull: always pull from source repository, even in local case or if the
644 pull: always pull from source repository, even in local case or if the
644 server prefers streaming
645 server prefers streaming
645
646
646 stream: stream raw data uncompressed from repository (fast over
647 stream: stream raw data uncompressed from repository (fast over
647 LAN, slow over WAN)
648 LAN, slow over WAN)
648
649
649 revs: revision to clone up to (implies pull=True)
650 revs: revision to clone up to (implies pull=True)
650
651
651 update: update working directory after clone completes, if
652 update: update working directory after clone completes, if
652 destination is local repository (True means update to default rev,
653 destination is local repository (True means update to default rev,
653 anything else is treated as a revision)
654 anything else is treated as a revision)
654
655
655 branch: branches to clone
656 branch: branches to clone
656
657
657 shareopts: dict of options to control auto sharing behavior. The "pool" key
658 shareopts: dict of options to control auto sharing behavior. The "pool" key
658 activates auto sharing mode and defines the directory for stores. The
659 activates auto sharing mode and defines the directory for stores. The
659 "mode" key determines how to construct the directory name of the shared
660 "mode" key determines how to construct the directory name of the shared
660 repository. "identity" means the name is derived from the node of the first
661 repository. "identity" means the name is derived from the node of the first
661 changeset in the repository. "remote" means the name is derived from the
662 changeset in the repository. "remote" means the name is derived from the
662 remote's path/URL. Defaults to "identity."
663 remote's path/URL. Defaults to "identity."
663
664
664 storeincludepats and storeexcludepats: sets of file patterns to include and
665 storeincludepats and storeexcludepats: sets of file patterns to include and
665 exclude in the repository copy, respectively. If not defined, all files
666 exclude in the repository copy, respectively. If not defined, all files
666 will be included (a "full" clone). Otherwise a "narrow" clone containing
667 will be included (a "full" clone). Otherwise a "narrow" clone containing
667 only the requested files will be performed. If ``storeincludepats`` is not
668 only the requested files will be performed. If ``storeincludepats`` is not
668 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
669 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
669 ``path:.``. If both are empty sets, no files will be cloned.
670 ``path:.``. If both are empty sets, no files will be cloned.
670 """
671 """
671
672
672 if isinstance(source, bytes):
673 if isinstance(source, bytes):
673 src = urlutil.get_clone_path(ui, source, branch)
674 src = urlutil.get_clone_path(ui, source, branch)
674 origsource, source, branches = src
675 origsource, source, branches = src
675 srcpeer = peer(ui, peeropts, source)
676 srcpeer = peer(ui, peeropts, source)
676 else:
677 else:
677 srcpeer = source.peer() # in case we were called with a localrepo
678 srcpeer = source.peer() # in case we were called with a localrepo
678 branches = (None, branch or [])
679 branches = (None, branch or [])
679 origsource = source = srcpeer.url()
680 origsource = source = srcpeer.url()
680 srclock = destlock = cleandir = None
681 srclock = destlock = destwlock = cleandir = None
681 destpeer = None
682 destpeer = None
682 try:
683 try:
683 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
684 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
684
685
685 if dest is None:
686 if dest is None:
686 dest = defaultdest(source)
687 dest = defaultdest(source)
687 if dest:
688 if dest:
688 ui.status(_(b"destination directory: %s\n") % dest)
689 ui.status(_(b"destination directory: %s\n") % dest)
689 else:
690 else:
690 dest = urlutil.get_clone_path(ui, dest)[0]
691 dest = urlutil.get_clone_path(ui, dest)[0]
691
692
692 dest = urlutil.urllocalpath(dest)
693 dest = urlutil.urllocalpath(dest)
693 source = urlutil.urllocalpath(source)
694 source = urlutil.urllocalpath(source)
694
695
695 if not dest:
696 if not dest:
696 raise error.InputError(_(b"empty destination path is not valid"))
697 raise error.InputError(_(b"empty destination path is not valid"))
697
698
698 destvfs = vfsmod.vfs(dest, expandpath=True)
699 destvfs = vfsmod.vfs(dest, expandpath=True)
699 if destvfs.lexists():
700 if destvfs.lexists():
700 if not destvfs.isdir():
701 if not destvfs.isdir():
701 raise error.InputError(
702 raise error.InputError(
702 _(b"destination '%s' already exists") % dest
703 _(b"destination '%s' already exists") % dest
703 )
704 )
704 elif destvfs.listdir():
705 elif destvfs.listdir():
705 raise error.InputError(
706 raise error.InputError(
706 _(b"destination '%s' is not empty") % dest
707 _(b"destination '%s' is not empty") % dest
707 )
708 )
708
709
709 createopts = {}
710 createopts = {}
710 narrow = False
711 narrow = False
711
712
712 if storeincludepats is not None:
713 if storeincludepats is not None:
713 narrowspec.validatepatterns(storeincludepats)
714 narrowspec.validatepatterns(storeincludepats)
714 narrow = True
715 narrow = True
715
716
716 if storeexcludepats is not None:
717 if storeexcludepats is not None:
717 narrowspec.validatepatterns(storeexcludepats)
718 narrowspec.validatepatterns(storeexcludepats)
718 narrow = True
719 narrow = True
719
720
720 if narrow:
721 if narrow:
721 # Include everything by default if only exclusion patterns defined.
722 # Include everything by default if only exclusion patterns defined.
722 if storeexcludepats and not storeincludepats:
723 if storeexcludepats and not storeincludepats:
723 storeincludepats = {b'path:.'}
724 storeincludepats = {b'path:.'}
724
725
725 createopts[b'narrowfiles'] = True
726 createopts[b'narrowfiles'] = True
726
727
727 if depth:
728 if depth:
728 createopts[b'shallowfilestore'] = True
729 createopts[b'shallowfilestore'] = True
729
730
730 if srcpeer.capable(b'lfs-serve'):
731 if srcpeer.capable(b'lfs-serve'):
731 # Repository creation honors the config if it disabled the extension, so
732 # Repository creation honors the config if it disabled the extension, so
732 # we can't just announce that lfs will be enabled. This check avoids
733 # we can't just announce that lfs will be enabled. This check avoids
733 # saying that lfs will be enabled, and then saying it's an unknown
734 # saying that lfs will be enabled, and then saying it's an unknown
734 # feature. The lfs creation option is set in either case so that a
735 # feature. The lfs creation option is set in either case so that a
735 # requirement is added. If the extension is explicitly disabled but the
736 # requirement is added. If the extension is explicitly disabled but the
736 # requirement is set, the clone aborts early, before transferring any
737 # requirement is set, the clone aborts early, before transferring any
737 # data.
738 # data.
738 createopts[b'lfs'] = True
739 createopts[b'lfs'] = True
739
740
740 if extensions.disabled_help(b'lfs'):
741 if extensions.disabled_help(b'lfs'):
741 ui.status(
742 ui.status(
742 _(
743 _(
743 b'(remote is using large file support (lfs), but it is '
744 b'(remote is using large file support (lfs), but it is '
744 b'explicitly disabled in the local configuration)\n'
745 b'explicitly disabled in the local configuration)\n'
745 )
746 )
746 )
747 )
747 else:
748 else:
748 ui.status(
749 ui.status(
749 _(
750 _(
750 b'(remote is using large file support (lfs); lfs will '
751 b'(remote is using large file support (lfs); lfs will '
751 b'be enabled for this repository)\n'
752 b'be enabled for this repository)\n'
752 )
753 )
753 )
754 )
754
755
755 shareopts = shareopts or {}
756 shareopts = shareopts or {}
756 sharepool = shareopts.get(b'pool')
757 sharepool = shareopts.get(b'pool')
757 sharenamemode = shareopts.get(b'mode')
758 sharenamemode = shareopts.get(b'mode')
758 if sharepool and islocal(dest):
759 if sharepool and islocal(dest):
759 sharepath = None
760 sharepath = None
760 if sharenamemode == b'identity':
761 if sharenamemode == b'identity':
761 # Resolve the name from the initial changeset in the remote
762 # Resolve the name from the initial changeset in the remote
762 # repository. This returns nullid when the remote is empty. It
763 # repository. This returns nullid when the remote is empty. It
763 # raises RepoLookupError if revision 0 is filtered or otherwise
764 # raises RepoLookupError if revision 0 is filtered or otherwise
764 # not available. If we fail to resolve, sharing is not enabled.
765 # not available. If we fail to resolve, sharing is not enabled.
765 try:
766 try:
766 with srcpeer.commandexecutor() as e:
767 with srcpeer.commandexecutor() as e:
767 rootnode = e.callcommand(
768 rootnode = e.callcommand(
768 b'lookup',
769 b'lookup',
769 {
770 {
770 b'key': b'0',
771 b'key': b'0',
771 },
772 },
772 ).result()
773 ).result()
773
774
774 if rootnode != sha1nodeconstants.nullid:
775 if rootnode != sha1nodeconstants.nullid:
775 sharepath = os.path.join(sharepool, hex(rootnode))
776 sharepath = os.path.join(sharepool, hex(rootnode))
776 else:
777 else:
777 ui.status(
778 ui.status(
778 _(
779 _(
779 b'(not using pooled storage: '
780 b'(not using pooled storage: '
780 b'remote appears to be empty)\n'
781 b'remote appears to be empty)\n'
781 )
782 )
782 )
783 )
783 except error.RepoLookupError:
784 except error.RepoLookupError:
784 ui.status(
785 ui.status(
785 _(
786 _(
786 b'(not using pooled storage: '
787 b'(not using pooled storage: '
787 b'unable to resolve identity of remote)\n'
788 b'unable to resolve identity of remote)\n'
788 )
789 )
789 )
790 )
790 elif sharenamemode == b'remote':
791 elif sharenamemode == b'remote':
791 sharepath = os.path.join(
792 sharepath = os.path.join(
792 sharepool, hex(hashutil.sha1(source).digest())
793 sharepool, hex(hashutil.sha1(source).digest())
793 )
794 )
794 else:
795 else:
795 raise error.Abort(
796 raise error.Abort(
796 _(b'unknown share naming mode: %s') % sharenamemode
797 _(b'unknown share naming mode: %s') % sharenamemode
797 )
798 )
798
799
799 # TODO this is a somewhat arbitrary restriction.
800 # TODO this is a somewhat arbitrary restriction.
800 if narrow:
801 if narrow:
801 ui.status(
802 ui.status(
802 _(b'(pooled storage not supported for narrow clones)\n')
803 _(b'(pooled storage not supported for narrow clones)\n')
803 )
804 )
804 sharepath = None
805 sharepath = None
805
806
806 if sharepath:
807 if sharepath:
807 return clonewithshare(
808 return clonewithshare(
808 ui,
809 ui,
809 peeropts,
810 peeropts,
810 sharepath,
811 sharepath,
811 source,
812 source,
812 srcpeer,
813 srcpeer,
813 dest,
814 dest,
814 pull=pull,
815 pull=pull,
815 rev=revs,
816 rev=revs,
816 update=update,
817 update=update,
817 stream=stream,
818 stream=stream,
818 )
819 )
819
820
820 srcrepo = srcpeer.local()
821 srcrepo = srcpeer.local()
821
822
822 abspath = origsource
823 abspath = origsource
823 if islocal(origsource):
824 if islocal(origsource):
824 abspath = util.abspath(urlutil.urllocalpath(origsource))
825 abspath = util.abspath(urlutil.urllocalpath(origsource))
825
826
826 if islocal(dest):
827 if islocal(dest):
827 if os.path.exists(dest):
828 if os.path.exists(dest):
828 # only clean up directories we create ourselves
829 # only clean up directories we create ourselves
829 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
830 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
830 cleandir = hgdir
831 cleandir = hgdir
831 else:
832 else:
832 cleandir = dest
833 cleandir = dest
833
834
834 copy = False
835 copy = False
835 if (
836 if (
836 srcrepo
837 srcrepo
837 and srcrepo.cancopy()
838 and srcrepo.cancopy()
838 and islocal(dest)
839 and islocal(dest)
839 and not phases.hassecret(srcrepo)
840 and not phases.hassecret(srcrepo)
840 ):
841 ):
841 copy = not pull and not revs
842 copy = not pull and not revs
842
843
843 # TODO this is a somewhat arbitrary restriction.
844 # TODO this is a somewhat arbitrary restriction.
844 if narrow:
845 if narrow:
845 copy = False
846 copy = False
846
847
847 if copy:
848 if copy:
848 try:
849 try:
849 # we use a lock here because if we race with commit, we
850 # we use a lock here because if we race with commit, we
850 # can end up with extra data in the cloned revlogs that's
851 # can end up with extra data in the cloned revlogs that's
851 # not pointed to by changesets, thus causing verify to
852 # not pointed to by changesets, thus causing verify to
852 # fail
853 # fail
853 srclock = srcrepo.lock(wait=False)
854 srclock = srcrepo.lock(wait=False)
854 except error.LockError:
855 except error.LockError:
855 copy = False
856 copy = False
856
857
857 if copy:
858 if copy:
858 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
859 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
859
860
860 destrootpath = urlutil.urllocalpath(dest)
861 destrootpath = urlutil.urllocalpath(dest)
861 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
862 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
862 localrepo.createrepository(
863 localrepo.createrepository(
863 ui,
864 ui,
864 destrootpath,
865 destrootpath,
865 requirements=dest_reqs,
866 requirements=dest_reqs,
866 )
867 )
867 destrepo = localrepo.makelocalrepository(ui, destrootpath)
868 destrepo = localrepo.makelocalrepository(ui, destrootpath)
869
870 destwlock = destrepo.wlock()
868 destlock = destrepo.lock()
871 destlock = destrepo.lock()
869 from . import streamclone # avoid cycle
872 from . import streamclone # avoid cycle
870
873
871 streamclone.local_copy(srcrepo, destrepo)
874 streamclone.local_copy(srcrepo, destrepo)
872
875
873 # we need to re-init the repo after manually copying the data
876 # we need to re-init the repo after manually copying the data
874 # into it
877 # into it
875 destpeer = peer(srcrepo, peeropts, dest)
878 destpeer = peer(srcrepo, peeropts, dest)
879
880 # make the peer aware that is it already locked
881 #
882 # important:
883 #
884 # We still need to release that lock at the end of the function
885 destpeer.local()._lockref = weakref.ref(destlock)
886 destpeer.local()._wlockref = weakref.ref(destwlock)
887 # dirstate also needs to be copied because `_wlockref` has a reference
888 # to it: this dirstate is saved to disk when the wlock is released
889 destpeer.local().dirstate = destrepo.dirstate
890
876 srcrepo.hook(
891 srcrepo.hook(
877 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
892 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
878 )
893 )
879 else:
894 else:
880 try:
895 try:
881 # only pass ui when no srcrepo
896 # only pass ui when no srcrepo
882 destpeer = peer(
897 destpeer = peer(
883 srcrepo or ui,
898 srcrepo or ui,
884 peeropts,
899 peeropts,
885 dest,
900 dest,
886 create=True,
901 create=True,
887 createopts=createopts,
902 createopts=createopts,
888 )
903 )
889 except OSError as inst:
904 except OSError as inst:
890 if inst.errno == errno.EEXIST:
905 if inst.errno == errno.EEXIST:
891 cleandir = None
906 cleandir = None
892 raise error.Abort(
907 raise error.Abort(
893 _(b"destination '%s' already exists") % dest
908 _(b"destination '%s' already exists") % dest
894 )
909 )
895 raise
910 raise
896
911
897 if revs:
912 if revs:
898 if not srcpeer.capable(b'lookup'):
913 if not srcpeer.capable(b'lookup'):
899 raise error.Abort(
914 raise error.Abort(
900 _(
915 _(
901 b"src repository does not support "
916 b"src repository does not support "
902 b"revision lookup and so doesn't "
917 b"revision lookup and so doesn't "
903 b"support clone by revision"
918 b"support clone by revision"
904 )
919 )
905 )
920 )
906
921
907 # TODO this is batchable.
922 # TODO this is batchable.
908 remoterevs = []
923 remoterevs = []
909 for rev in revs:
924 for rev in revs:
910 with srcpeer.commandexecutor() as e:
925 with srcpeer.commandexecutor() as e:
911 remoterevs.append(
926 remoterevs.append(
912 e.callcommand(
927 e.callcommand(
913 b'lookup',
928 b'lookup',
914 {
929 {
915 b'key': rev,
930 b'key': rev,
916 },
931 },
917 ).result()
932 ).result()
918 )
933 )
919 revs = remoterevs
934 revs = remoterevs
920
935
921 checkout = revs[0]
936 checkout = revs[0]
922 else:
937 else:
923 revs = None
938 revs = None
924 local = destpeer.local()
939 local = destpeer.local()
925 if local:
940 if local:
926 if narrow:
941 if narrow:
927 with local.wlock(), local.lock():
942 with local.wlock(), local.lock():
928 local.setnarrowpats(storeincludepats, storeexcludepats)
943 local.setnarrowpats(storeincludepats, storeexcludepats)
929 narrowspec.copytoworkingcopy(local)
944 narrowspec.copytoworkingcopy(local)
930
945
931 u = urlutil.url(abspath)
946 u = urlutil.url(abspath)
932 defaulturl = bytes(u)
947 defaulturl = bytes(u)
933 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
948 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
934 if not stream:
949 if not stream:
935 if pull:
950 if pull:
936 stream = False
951 stream = False
937 else:
952 else:
938 stream = None
953 stream = None
939 # internal config: ui.quietbookmarkmove
954 # internal config: ui.quietbookmarkmove
940 overrides = {(b'ui', b'quietbookmarkmove'): True}
955 overrides = {(b'ui', b'quietbookmarkmove'): True}
941 with local.ui.configoverride(overrides, b'clone'):
956 with local.ui.configoverride(overrides, b'clone'):
942 exchange.pull(
957 exchange.pull(
943 local,
958 local,
944 srcpeer,
959 srcpeer,
945 heads=revs,
960 heads=revs,
946 streamclonerequested=stream,
961 streamclonerequested=stream,
947 includepats=storeincludepats,
962 includepats=storeincludepats,
948 excludepats=storeexcludepats,
963 excludepats=storeexcludepats,
949 depth=depth,
964 depth=depth,
950 )
965 )
951 elif srcrepo:
966 elif srcrepo:
952 # TODO lift restriction once exchange.push() accepts narrow
967 # TODO lift restriction once exchange.push() accepts narrow
953 # push.
968 # push.
954 if narrow:
969 if narrow:
955 raise error.Abort(
970 raise error.Abort(
956 _(
971 _(
957 b'narrow clone not available for '
972 b'narrow clone not available for '
958 b'remote destinations'
973 b'remote destinations'
959 )
974 )
960 )
975 )
961
976
962 exchange.push(
977 exchange.push(
963 srcrepo,
978 srcrepo,
964 destpeer,
979 destpeer,
965 revs=revs,
980 revs=revs,
966 bookmarks=srcrepo._bookmarks.keys(),
981 bookmarks=srcrepo._bookmarks.keys(),
967 )
982 )
968 else:
983 else:
969 raise error.Abort(
984 raise error.Abort(
970 _(b"clone from remote to remote not supported")
985 _(b"clone from remote to remote not supported")
971 )
986 )
972
987
973 cleandir = None
988 cleandir = None
974
989
975 destrepo = destpeer.local()
990 destrepo = destpeer.local()
976 if destrepo:
991 if destrepo:
977 template = uimod.samplehgrcs[b'cloned']
992 template = uimod.samplehgrcs[b'cloned']
978 u = urlutil.url(abspath)
993 u = urlutil.url(abspath)
979 u.passwd = None
994 u.passwd = None
980 defaulturl = bytes(u)
995 defaulturl = bytes(u)
981 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
996 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
982 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
997 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
983
998
984 if ui.configbool(b'experimental', b'remotenames'):
999 if ui.configbool(b'experimental', b'remotenames'):
985 logexchange.pullremotenames(destrepo, srcpeer)
1000 logexchange.pullremotenames(destrepo, srcpeer)
986
1001
987 if update:
1002 if update:
988 if update is not True:
1003 if update is not True:
989 with srcpeer.commandexecutor() as e:
1004 with srcpeer.commandexecutor() as e:
990 checkout = e.callcommand(
1005 checkout = e.callcommand(
991 b'lookup',
1006 b'lookup',
992 {
1007 {
993 b'key': update,
1008 b'key': update,
994 },
1009 },
995 ).result()
1010 ).result()
996
1011
997 uprev = None
1012 uprev = None
998 status = None
1013 status = None
999 if checkout is not None:
1014 if checkout is not None:
1000 # Some extensions (at least hg-git and hg-subversion) have
1015 # Some extensions (at least hg-git and hg-subversion) have
1001 # a peer.lookup() implementation that returns a name instead
1016 # a peer.lookup() implementation that returns a name instead
1002 # of a nodeid. We work around it here until we've figured
1017 # of a nodeid. We work around it here until we've figured
1003 # out a better solution.
1018 # out a better solution.
1004 if len(checkout) == 20 and checkout in destrepo:
1019 if len(checkout) == 20 and checkout in destrepo:
1005 uprev = checkout
1020 uprev = checkout
1006 elif scmutil.isrevsymbol(destrepo, checkout):
1021 elif scmutil.isrevsymbol(destrepo, checkout):
1007 uprev = scmutil.revsymbol(destrepo, checkout).node()
1022 uprev = scmutil.revsymbol(destrepo, checkout).node()
1008 else:
1023 else:
1009 if update is not True:
1024 if update is not True:
1010 try:
1025 try:
1011 uprev = destrepo.lookup(update)
1026 uprev = destrepo.lookup(update)
1012 except error.RepoLookupError:
1027 except error.RepoLookupError:
1013 pass
1028 pass
1014 if uprev is None:
1029 if uprev is None:
1015 try:
1030 try:
1016 if destrepo._activebookmark:
1031 if destrepo._activebookmark:
1017 uprev = destrepo.lookup(destrepo._activebookmark)
1032 uprev = destrepo.lookup(destrepo._activebookmark)
1018 update = destrepo._activebookmark
1033 update = destrepo._activebookmark
1019 else:
1034 else:
1020 uprev = destrepo._bookmarks[b'@']
1035 uprev = destrepo._bookmarks[b'@']
1021 update = b'@'
1036 update = b'@'
1022 bn = destrepo[uprev].branch()
1037 bn = destrepo[uprev].branch()
1023 if bn == b'default':
1038 if bn == b'default':
1024 status = _(b"updating to bookmark %s\n" % update)
1039 status = _(b"updating to bookmark %s\n" % update)
1025 else:
1040 else:
1026 status = (
1041 status = (
1027 _(b"updating to bookmark %s on branch %s\n")
1042 _(b"updating to bookmark %s on branch %s\n")
1028 ) % (update, bn)
1043 ) % (update, bn)
1029 except KeyError:
1044 except KeyError:
1030 try:
1045 try:
1031 uprev = destrepo.branchtip(b'default')
1046 uprev = destrepo.branchtip(b'default')
1032 except error.RepoLookupError:
1047 except error.RepoLookupError:
1033 uprev = destrepo.lookup(b'tip')
1048 uprev = destrepo.lookup(b'tip')
1034 if not status:
1049 if not status:
1035 bn = destrepo[uprev].branch()
1050 bn = destrepo[uprev].branch()
1036 status = _(b"updating to branch %s\n") % bn
1051 status = _(b"updating to branch %s\n") % bn
1037 destrepo.ui.status(status)
1052 destrepo.ui.status(status)
1038 _update(destrepo, uprev)
1053 _update(destrepo, uprev)
1039 if update in destrepo._bookmarks:
1054 if update in destrepo._bookmarks:
1040 bookmarks.activate(destrepo, update)
1055 bookmarks.activate(destrepo, update)
1041 if destlock is not None:
1056 if destlock is not None:
1042 release(destlock)
1057 release(destlock)
1058 if destwlock is not None:
1059 release(destlock)
1043 # here is a tiny windows were someone could end up writing the
1060 # here is a tiny windows were someone could end up writing the
1044 # repository before the cache are sure to be warm. This is "fine"
1061 # repository before the cache are sure to be warm. This is "fine"
1045 # as the only "bad" outcome would be some slowness. That potential
1062 # as the only "bad" outcome would be some slowness. That potential
1046 # slowness already affect reader.
1063 # slowness already affect reader.
1047 with destrepo.lock():
1064 with destrepo.lock():
1048 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1065 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1049 finally:
1066 finally:
1050 release(srclock, destlock)
1067 release(srclock, destlock, destwlock)
1051 if cleandir is not None:
1068 if cleandir is not None:
1052 shutil.rmtree(cleandir, True)
1069 shutil.rmtree(cleandir, True)
1053 if srcpeer is not None:
1070 if srcpeer is not None:
1054 srcpeer.close()
1071 srcpeer.close()
1055 if destpeer and destpeer.local() is None:
1072 if destpeer and destpeer.local() is None:
1056 destpeer.close()
1073 destpeer.close()
1057 return srcpeer, destpeer
1074 return srcpeer, destpeer
1058
1075
1059
1076
1060 def _showstats(repo, stats, quietempty=False):
1077 def _showstats(repo, stats, quietempty=False):
1061 if quietempty and stats.isempty():
1078 if quietempty and stats.isempty():
1062 return
1079 return
1063 repo.ui.status(
1080 repo.ui.status(
1064 _(
1081 _(
1065 b"%d files updated, %d files merged, "
1082 b"%d files updated, %d files merged, "
1066 b"%d files removed, %d files unresolved\n"
1083 b"%d files removed, %d files unresolved\n"
1067 )
1084 )
1068 % (
1085 % (
1069 stats.updatedcount,
1086 stats.updatedcount,
1070 stats.mergedcount,
1087 stats.mergedcount,
1071 stats.removedcount,
1088 stats.removedcount,
1072 stats.unresolvedcount,
1089 stats.unresolvedcount,
1073 )
1090 )
1074 )
1091 )
1075
1092
1076
1093
1077 def updaterepo(repo, node, overwrite, updatecheck=None):
1094 def updaterepo(repo, node, overwrite, updatecheck=None):
1078 """Update the working directory to node.
1095 """Update the working directory to node.
1079
1096
1080 When overwrite is set, changes are clobbered, merged else
1097 When overwrite is set, changes are clobbered, merged else
1081
1098
1082 returns stats (see pydoc mercurial.merge.applyupdates)"""
1099 returns stats (see pydoc mercurial.merge.applyupdates)"""
1083 repo.ui.deprecwarn(
1100 repo.ui.deprecwarn(
1084 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1101 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1085 b'5.7',
1102 b'5.7',
1086 )
1103 )
1087 return mergemod._update(
1104 return mergemod._update(
1088 repo,
1105 repo,
1089 node,
1106 node,
1090 branchmerge=False,
1107 branchmerge=False,
1091 force=overwrite,
1108 force=overwrite,
1092 labels=[b'working copy', b'destination'],
1109 labels=[b'working copy', b'destination'],
1093 updatecheck=updatecheck,
1110 updatecheck=updatecheck,
1094 )
1111 )
1095
1112
1096
1113
1097 def update(repo, node, quietempty=False, updatecheck=None):
1114 def update(repo, node, quietempty=False, updatecheck=None):
1098 """update the working directory to node"""
1115 """update the working directory to node"""
1099 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1116 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1100 _showstats(repo, stats, quietempty)
1117 _showstats(repo, stats, quietempty)
1101 if stats.unresolvedcount:
1118 if stats.unresolvedcount:
1102 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1119 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1103 return stats.unresolvedcount > 0
1120 return stats.unresolvedcount > 0
1104
1121
1105
1122
1106 # naming conflict in clone()
1123 # naming conflict in clone()
1107 _update = update
1124 _update = update
1108
1125
1109
1126
1110 def clean(repo, node, show_stats=True, quietempty=False):
1127 def clean(repo, node, show_stats=True, quietempty=False):
1111 """forcibly switch the working directory to node, clobbering changes"""
1128 """forcibly switch the working directory to node, clobbering changes"""
1112 stats = mergemod.clean_update(repo[node])
1129 stats = mergemod.clean_update(repo[node])
1113 assert stats.unresolvedcount == 0
1130 assert stats.unresolvedcount == 0
1114 if show_stats:
1131 if show_stats:
1115 _showstats(repo, stats, quietempty)
1132 _showstats(repo, stats, quietempty)
1116 return False
1133 return False
1117
1134
1118
1135
1119 # naming conflict in updatetotally()
1136 # naming conflict in updatetotally()
1120 _clean = clean
1137 _clean = clean
1121
1138
1122 _VALID_UPDATECHECKS = {
1139 _VALID_UPDATECHECKS = {
1123 mergemod.UPDATECHECK_ABORT,
1140 mergemod.UPDATECHECK_ABORT,
1124 mergemod.UPDATECHECK_NONE,
1141 mergemod.UPDATECHECK_NONE,
1125 mergemod.UPDATECHECK_LINEAR,
1142 mergemod.UPDATECHECK_LINEAR,
1126 mergemod.UPDATECHECK_NO_CONFLICT,
1143 mergemod.UPDATECHECK_NO_CONFLICT,
1127 }
1144 }
1128
1145
1129
1146
1130 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1147 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1131 """Update the working directory with extra care for non-file components
1148 """Update the working directory with extra care for non-file components
1132
1149
1133 This takes care of non-file components below:
1150 This takes care of non-file components below:
1134
1151
1135 :bookmark: might be advanced or (in)activated
1152 :bookmark: might be advanced or (in)activated
1136
1153
1137 This takes arguments below:
1154 This takes arguments below:
1138
1155
1139 :checkout: to which revision the working directory is updated
1156 :checkout: to which revision the working directory is updated
1140 :brev: a name, which might be a bookmark to be activated after updating
1157 :brev: a name, which might be a bookmark to be activated after updating
1141 :clean: whether changes in the working directory can be discarded
1158 :clean: whether changes in the working directory can be discarded
1142 :updatecheck: how to deal with a dirty working directory
1159 :updatecheck: how to deal with a dirty working directory
1143
1160
1144 Valid values for updatecheck are the UPDATECHECK_* constants
1161 Valid values for updatecheck are the UPDATECHECK_* constants
1145 defined in the merge module. Passing `None` will result in using the
1162 defined in the merge module. Passing `None` will result in using the
1146 configured default.
1163 configured default.
1147
1164
1148 * ABORT: abort if the working directory is dirty
1165 * ABORT: abort if the working directory is dirty
1149 * NONE: don't check (merge working directory changes into destination)
1166 * NONE: don't check (merge working directory changes into destination)
1150 * LINEAR: check that update is linear before merging working directory
1167 * LINEAR: check that update is linear before merging working directory
1151 changes into destination
1168 changes into destination
1152 * NO_CONFLICT: check that the update does not result in file merges
1169 * NO_CONFLICT: check that the update does not result in file merges
1153
1170
1154 This returns whether conflict is detected at updating or not.
1171 This returns whether conflict is detected at updating or not.
1155 """
1172 """
1156 if updatecheck is None:
1173 if updatecheck is None:
1157 updatecheck = ui.config(b'commands', b'update.check')
1174 updatecheck = ui.config(b'commands', b'update.check')
1158 if updatecheck not in _VALID_UPDATECHECKS:
1175 if updatecheck not in _VALID_UPDATECHECKS:
1159 # If not configured, or invalid value configured
1176 # If not configured, or invalid value configured
1160 updatecheck = mergemod.UPDATECHECK_LINEAR
1177 updatecheck = mergemod.UPDATECHECK_LINEAR
1161 if updatecheck not in _VALID_UPDATECHECKS:
1178 if updatecheck not in _VALID_UPDATECHECKS:
1162 raise ValueError(
1179 raise ValueError(
1163 r'Invalid updatecheck value %r (can accept %r)'
1180 r'Invalid updatecheck value %r (can accept %r)'
1164 % (updatecheck, _VALID_UPDATECHECKS)
1181 % (updatecheck, _VALID_UPDATECHECKS)
1165 )
1182 )
1166 with repo.wlock():
1183 with repo.wlock():
1167 movemarkfrom = None
1184 movemarkfrom = None
1168 warndest = False
1185 warndest = False
1169 if checkout is None:
1186 if checkout is None:
1170 updata = destutil.destupdate(repo, clean=clean)
1187 updata = destutil.destupdate(repo, clean=clean)
1171 checkout, movemarkfrom, brev = updata
1188 checkout, movemarkfrom, brev = updata
1172 warndest = True
1189 warndest = True
1173
1190
1174 if clean:
1191 if clean:
1175 ret = _clean(repo, checkout)
1192 ret = _clean(repo, checkout)
1176 else:
1193 else:
1177 if updatecheck == mergemod.UPDATECHECK_ABORT:
1194 if updatecheck == mergemod.UPDATECHECK_ABORT:
1178 cmdutil.bailifchanged(repo, merge=False)
1195 cmdutil.bailifchanged(repo, merge=False)
1179 updatecheck = mergemod.UPDATECHECK_NONE
1196 updatecheck = mergemod.UPDATECHECK_NONE
1180 ret = _update(repo, checkout, updatecheck=updatecheck)
1197 ret = _update(repo, checkout, updatecheck=updatecheck)
1181
1198
1182 if not ret and movemarkfrom:
1199 if not ret and movemarkfrom:
1183 if movemarkfrom == repo[b'.'].node():
1200 if movemarkfrom == repo[b'.'].node():
1184 pass # no-op update
1201 pass # no-op update
1185 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1202 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1186 b = ui.label(repo._activebookmark, b'bookmarks.active')
1203 b = ui.label(repo._activebookmark, b'bookmarks.active')
1187 ui.status(_(b"updating bookmark %s\n") % b)
1204 ui.status(_(b"updating bookmark %s\n") % b)
1188 else:
1205 else:
1189 # this can happen with a non-linear update
1206 # this can happen with a non-linear update
1190 b = ui.label(repo._activebookmark, b'bookmarks')
1207 b = ui.label(repo._activebookmark, b'bookmarks')
1191 ui.status(_(b"(leaving bookmark %s)\n") % b)
1208 ui.status(_(b"(leaving bookmark %s)\n") % b)
1192 bookmarks.deactivate(repo)
1209 bookmarks.deactivate(repo)
1193 elif brev in repo._bookmarks:
1210 elif brev in repo._bookmarks:
1194 if brev != repo._activebookmark:
1211 if brev != repo._activebookmark:
1195 b = ui.label(brev, b'bookmarks.active')
1212 b = ui.label(brev, b'bookmarks.active')
1196 ui.status(_(b"(activating bookmark %s)\n") % b)
1213 ui.status(_(b"(activating bookmark %s)\n") % b)
1197 bookmarks.activate(repo, brev)
1214 bookmarks.activate(repo, brev)
1198 elif brev:
1215 elif brev:
1199 if repo._activebookmark:
1216 if repo._activebookmark:
1200 b = ui.label(repo._activebookmark, b'bookmarks')
1217 b = ui.label(repo._activebookmark, b'bookmarks')
1201 ui.status(_(b"(leaving bookmark %s)\n") % b)
1218 ui.status(_(b"(leaving bookmark %s)\n") % b)
1202 bookmarks.deactivate(repo)
1219 bookmarks.deactivate(repo)
1203
1220
1204 if warndest:
1221 if warndest:
1205 destutil.statusotherdests(ui, repo)
1222 destutil.statusotherdests(ui, repo)
1206
1223
1207 return ret
1224 return ret
1208
1225
1209
1226
1210 def merge(
1227 def merge(
1211 ctx,
1228 ctx,
1212 force=False,
1229 force=False,
1213 remind=True,
1230 remind=True,
1214 labels=None,
1231 labels=None,
1215 ):
1232 ):
1216 """Branch merge with node, resolving changes. Return true if any
1233 """Branch merge with node, resolving changes. Return true if any
1217 unresolved conflicts."""
1234 unresolved conflicts."""
1218 repo = ctx.repo()
1235 repo = ctx.repo()
1219 stats = mergemod.merge(ctx, force=force, labels=labels)
1236 stats = mergemod.merge(ctx, force=force, labels=labels)
1220 _showstats(repo, stats)
1237 _showstats(repo, stats)
1221 if stats.unresolvedcount:
1238 if stats.unresolvedcount:
1222 repo.ui.status(
1239 repo.ui.status(
1223 _(
1240 _(
1224 b"use 'hg resolve' to retry unresolved file merges "
1241 b"use 'hg resolve' to retry unresolved file merges "
1225 b"or 'hg merge --abort' to abandon\n"
1242 b"or 'hg merge --abort' to abandon\n"
1226 )
1243 )
1227 )
1244 )
1228 elif remind:
1245 elif remind:
1229 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1246 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1230 return stats.unresolvedcount > 0
1247 return stats.unresolvedcount > 0
1231
1248
1232
1249
1233 def abortmerge(ui, repo):
1250 def abortmerge(ui, repo):
1234 ms = mergestatemod.mergestate.read(repo)
1251 ms = mergestatemod.mergestate.read(repo)
1235 if ms.active():
1252 if ms.active():
1236 # there were conflicts
1253 # there were conflicts
1237 node = ms.localctx.hex()
1254 node = ms.localctx.hex()
1238 else:
1255 else:
1239 # there were no conficts, mergestate was not stored
1256 # there were no conficts, mergestate was not stored
1240 node = repo[b'.'].hex()
1257 node = repo[b'.'].hex()
1241
1258
1242 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1259 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1243 stats = mergemod.clean_update(repo[node])
1260 stats = mergemod.clean_update(repo[node])
1244 assert stats.unresolvedcount == 0
1261 assert stats.unresolvedcount == 0
1245 _showstats(repo, stats)
1262 _showstats(repo, stats)
1246
1263
1247
1264
1248 def _incoming(
1265 def _incoming(
1249 displaychlist,
1266 displaychlist,
1250 subreporecurse,
1267 subreporecurse,
1251 ui,
1268 ui,
1252 repo,
1269 repo,
1253 source,
1270 source,
1254 opts,
1271 opts,
1255 buffered=False,
1272 buffered=False,
1256 subpath=None,
1273 subpath=None,
1257 ):
1274 ):
1258 """
1275 """
1259 Helper for incoming / gincoming.
1276 Helper for incoming / gincoming.
1260 displaychlist gets called with
1277 displaychlist gets called with
1261 (remoterepo, incomingchangesetlist, displayer) parameters,
1278 (remoterepo, incomingchangesetlist, displayer) parameters,
1262 and is supposed to contain only code that can't be unified.
1279 and is supposed to contain only code that can't be unified.
1263 """
1280 """
1264 srcs = urlutil.get_pull_paths(repo, ui, [source])
1281 srcs = urlutil.get_pull_paths(repo, ui, [source])
1265 srcs = list(srcs)
1282 srcs = list(srcs)
1266 if len(srcs) != 1:
1283 if len(srcs) != 1:
1267 msg = _(b'for now, incoming supports only a single source, %d provided')
1284 msg = _(b'for now, incoming supports only a single source, %d provided')
1268 msg %= len(srcs)
1285 msg %= len(srcs)
1269 raise error.Abort(msg)
1286 raise error.Abort(msg)
1270 path = srcs[0]
1287 path = srcs[0]
1271 source, branches = urlutil.parseurl(path.rawloc, opts.get(b'branch'))
1288 source, branches = urlutil.parseurl(path.rawloc, opts.get(b'branch'))
1272 if subpath is not None:
1289 if subpath is not None:
1273 subpath = urlutil.url(subpath)
1290 subpath = urlutil.url(subpath)
1274 if subpath.isabs():
1291 if subpath.isabs():
1275 source = bytes(subpath)
1292 source = bytes(subpath)
1276 else:
1293 else:
1277 p = urlutil.url(source)
1294 p = urlutil.url(source)
1278 p.path = os.path.normpath(b'%s/%s' % (p.path, subpath))
1295 p.path = os.path.normpath(b'%s/%s' % (p.path, subpath))
1279 source = bytes(p)
1296 source = bytes(p)
1280 other = peer(repo, opts, source)
1297 other = peer(repo, opts, source)
1281 cleanupfn = other.close
1298 cleanupfn = other.close
1282 try:
1299 try:
1283 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source))
1300 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source))
1284 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1301 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1285
1302
1286 if revs:
1303 if revs:
1287 revs = [other.lookup(rev) for rev in revs]
1304 revs = [other.lookup(rev) for rev in revs]
1288 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1305 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1289 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1306 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1290 )
1307 )
1291
1308
1292 if not chlist:
1309 if not chlist:
1293 ui.status(_(b"no changes found\n"))
1310 ui.status(_(b"no changes found\n"))
1294 return subreporecurse()
1311 return subreporecurse()
1295 ui.pager(b'incoming')
1312 ui.pager(b'incoming')
1296 displayer = logcmdutil.changesetdisplayer(
1313 displayer = logcmdutil.changesetdisplayer(
1297 ui, other, opts, buffered=buffered
1314 ui, other, opts, buffered=buffered
1298 )
1315 )
1299 displaychlist(other, chlist, displayer)
1316 displaychlist(other, chlist, displayer)
1300 displayer.close()
1317 displayer.close()
1301 finally:
1318 finally:
1302 cleanupfn()
1319 cleanupfn()
1303 subreporecurse()
1320 subreporecurse()
1304 return 0 # exit code is zero since we found incoming changes
1321 return 0 # exit code is zero since we found incoming changes
1305
1322
1306
1323
1307 def incoming(ui, repo, source, opts, subpath=None):
1324 def incoming(ui, repo, source, opts, subpath=None):
1308 def subreporecurse():
1325 def subreporecurse():
1309 ret = 1
1326 ret = 1
1310 if opts.get(b'subrepos'):
1327 if opts.get(b'subrepos'):
1311 ctx = repo[None]
1328 ctx = repo[None]
1312 for subpath in sorted(ctx.substate):
1329 for subpath in sorted(ctx.substate):
1313 sub = ctx.sub(subpath)
1330 sub = ctx.sub(subpath)
1314 ret = min(ret, sub.incoming(ui, source, opts))
1331 ret = min(ret, sub.incoming(ui, source, opts))
1315 return ret
1332 return ret
1316
1333
1317 def display(other, chlist, displayer):
1334 def display(other, chlist, displayer):
1318 limit = logcmdutil.getlimit(opts)
1335 limit = logcmdutil.getlimit(opts)
1319 if opts.get(b'newest_first'):
1336 if opts.get(b'newest_first'):
1320 chlist.reverse()
1337 chlist.reverse()
1321 count = 0
1338 count = 0
1322 for n in chlist:
1339 for n in chlist:
1323 if limit is not None and count >= limit:
1340 if limit is not None and count >= limit:
1324 break
1341 break
1325 parents = [
1342 parents = [
1326 p for p in other.changelog.parents(n) if p != repo.nullid
1343 p for p in other.changelog.parents(n) if p != repo.nullid
1327 ]
1344 ]
1328 if opts.get(b'no_merges') and len(parents) == 2:
1345 if opts.get(b'no_merges') and len(parents) == 2:
1329 continue
1346 continue
1330 count += 1
1347 count += 1
1331 displayer.show(other[n])
1348 displayer.show(other[n])
1332
1349
1333 return _incoming(
1350 return _incoming(
1334 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1351 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1335 )
1352 )
1336
1353
1337
1354
1338 def _outgoing(ui, repo, dests, opts, subpath=None):
1355 def _outgoing(ui, repo, dests, opts, subpath=None):
1339 out = set()
1356 out = set()
1340 others = []
1357 others = []
1341 for path in urlutil.get_push_paths(repo, ui, dests):
1358 for path in urlutil.get_push_paths(repo, ui, dests):
1342 dest = path.pushloc or path.loc
1359 dest = path.pushloc or path.loc
1343 if subpath is not None:
1360 if subpath is not None:
1344 subpath = urlutil.url(subpath)
1361 subpath = urlutil.url(subpath)
1345 if subpath.isabs():
1362 if subpath.isabs():
1346 dest = bytes(subpath)
1363 dest = bytes(subpath)
1347 else:
1364 else:
1348 p = urlutil.url(dest)
1365 p = urlutil.url(dest)
1349 p.path = os.path.normpath(b'%s/%s' % (p.path, subpath))
1366 p.path = os.path.normpath(b'%s/%s' % (p.path, subpath))
1350 dest = bytes(p)
1367 dest = bytes(p)
1351 branches = path.branch, opts.get(b'branch') or []
1368 branches = path.branch, opts.get(b'branch') or []
1352
1369
1353 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1370 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1354 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1371 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1355 if revs:
1372 if revs:
1356 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1373 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1357
1374
1358 other = peer(repo, opts, dest)
1375 other = peer(repo, opts, dest)
1359 try:
1376 try:
1360 outgoing = discovery.findcommonoutgoing(
1377 outgoing = discovery.findcommonoutgoing(
1361 repo, other, revs, force=opts.get(b'force')
1378 repo, other, revs, force=opts.get(b'force')
1362 )
1379 )
1363 o = outgoing.missing
1380 o = outgoing.missing
1364 out.update(o)
1381 out.update(o)
1365 if not o:
1382 if not o:
1366 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1383 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1367 others.append(other)
1384 others.append(other)
1368 except: # re-raises
1385 except: # re-raises
1369 other.close()
1386 other.close()
1370 raise
1387 raise
1371 # make sure this is ordered by revision number
1388 # make sure this is ordered by revision number
1372 outgoing_revs = list(out)
1389 outgoing_revs = list(out)
1373 cl = repo.changelog
1390 cl = repo.changelog
1374 outgoing_revs.sort(key=cl.rev)
1391 outgoing_revs.sort(key=cl.rev)
1375 return outgoing_revs, others
1392 return outgoing_revs, others
1376
1393
1377
1394
1378 def _outgoing_recurse(ui, repo, dests, opts):
1395 def _outgoing_recurse(ui, repo, dests, opts):
1379 ret = 1
1396 ret = 1
1380 if opts.get(b'subrepos'):
1397 if opts.get(b'subrepos'):
1381 ctx = repo[None]
1398 ctx = repo[None]
1382 for subpath in sorted(ctx.substate):
1399 for subpath in sorted(ctx.substate):
1383 sub = ctx.sub(subpath)
1400 sub = ctx.sub(subpath)
1384 ret = min(ret, sub.outgoing(ui, dests, opts))
1401 ret = min(ret, sub.outgoing(ui, dests, opts))
1385 return ret
1402 return ret
1386
1403
1387
1404
1388 def _outgoing_filter(repo, revs, opts):
1405 def _outgoing_filter(repo, revs, opts):
1389 """apply revision filtering/ordering option for outgoing"""
1406 """apply revision filtering/ordering option for outgoing"""
1390 limit = logcmdutil.getlimit(opts)
1407 limit = logcmdutil.getlimit(opts)
1391 no_merges = opts.get(b'no_merges')
1408 no_merges = opts.get(b'no_merges')
1392 if opts.get(b'newest_first'):
1409 if opts.get(b'newest_first'):
1393 revs.reverse()
1410 revs.reverse()
1394 if limit is None and not no_merges:
1411 if limit is None and not no_merges:
1395 for r in revs:
1412 for r in revs:
1396 yield r
1413 yield r
1397 return
1414 return
1398
1415
1399 count = 0
1416 count = 0
1400 cl = repo.changelog
1417 cl = repo.changelog
1401 for n in revs:
1418 for n in revs:
1402 if limit is not None and count >= limit:
1419 if limit is not None and count >= limit:
1403 break
1420 break
1404 parents = [p for p in cl.parents(n) if p != repo.nullid]
1421 parents = [p for p in cl.parents(n) if p != repo.nullid]
1405 if no_merges and len(parents) == 2:
1422 if no_merges and len(parents) == 2:
1406 continue
1423 continue
1407 count += 1
1424 count += 1
1408 yield n
1425 yield n
1409
1426
1410
1427
1411 def outgoing(ui, repo, dests, opts, subpath=None):
1428 def outgoing(ui, repo, dests, opts, subpath=None):
1412 if opts.get(b'graph'):
1429 if opts.get(b'graph'):
1413 logcmdutil.checkunsupportedgraphflags([], opts)
1430 logcmdutil.checkunsupportedgraphflags([], opts)
1414 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1431 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1415 ret = 1
1432 ret = 1
1416 try:
1433 try:
1417 if o:
1434 if o:
1418 ret = 0
1435 ret = 0
1419
1436
1420 if opts.get(b'graph'):
1437 if opts.get(b'graph'):
1421 revdag = logcmdutil.graphrevs(repo, o, opts)
1438 revdag = logcmdutil.graphrevs(repo, o, opts)
1422 ui.pager(b'outgoing')
1439 ui.pager(b'outgoing')
1423 displayer = logcmdutil.changesetdisplayer(
1440 displayer = logcmdutil.changesetdisplayer(
1424 ui, repo, opts, buffered=True
1441 ui, repo, opts, buffered=True
1425 )
1442 )
1426 logcmdutil.displaygraph(
1443 logcmdutil.displaygraph(
1427 ui, repo, revdag, displayer, graphmod.asciiedges
1444 ui, repo, revdag, displayer, graphmod.asciiedges
1428 )
1445 )
1429 else:
1446 else:
1430 ui.pager(b'outgoing')
1447 ui.pager(b'outgoing')
1431 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1448 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1432 for n in _outgoing_filter(repo, o, opts):
1449 for n in _outgoing_filter(repo, o, opts):
1433 displayer.show(repo[n])
1450 displayer.show(repo[n])
1434 displayer.close()
1451 displayer.close()
1435 for oth in others:
1452 for oth in others:
1436 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1453 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1437 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1454 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1438 return ret # exit code is zero since we found outgoing changes
1455 return ret # exit code is zero since we found outgoing changes
1439 finally:
1456 finally:
1440 for oth in others:
1457 for oth in others:
1441 oth.close()
1458 oth.close()
1442
1459
1443
1460
1444 def verify(repo, level=None):
1461 def verify(repo, level=None):
1445 """verify the consistency of a repository"""
1462 """verify the consistency of a repository"""
1446 ret = verifymod.verify(repo, level=level)
1463 ret = verifymod.verify(repo, level=level)
1447
1464
1448 # Broken subrepo references in hidden csets don't seem worth worrying about,
1465 # Broken subrepo references in hidden csets don't seem worth worrying about,
1449 # since they can't be pushed/pulled, and --hidden can be used if they are a
1466 # since they can't be pushed/pulled, and --hidden can be used if they are a
1450 # concern.
1467 # concern.
1451
1468
1452 # pathto() is needed for -R case
1469 # pathto() is needed for -R case
1453 revs = repo.revs(
1470 revs = repo.revs(
1454 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1471 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1455 )
1472 )
1456
1473
1457 if revs:
1474 if revs:
1458 repo.ui.status(_(b'checking subrepo links\n'))
1475 repo.ui.status(_(b'checking subrepo links\n'))
1459 for rev in revs:
1476 for rev in revs:
1460 ctx = repo[rev]
1477 ctx = repo[rev]
1461 try:
1478 try:
1462 for subpath in ctx.substate:
1479 for subpath in ctx.substate:
1463 try:
1480 try:
1464 ret = (
1481 ret = (
1465 ctx.sub(subpath, allowcreate=False).verify() or ret
1482 ctx.sub(subpath, allowcreate=False).verify() or ret
1466 )
1483 )
1467 except error.RepoError as e:
1484 except error.RepoError as e:
1468 repo.ui.warn(b'%d: %s\n' % (rev, e))
1485 repo.ui.warn(b'%d: %s\n' % (rev, e))
1469 except Exception:
1486 except Exception:
1470 repo.ui.warn(
1487 repo.ui.warn(
1471 _(b'.hgsubstate is corrupt in revision %s\n')
1488 _(b'.hgsubstate is corrupt in revision %s\n')
1472 % short(ctx.node())
1489 % short(ctx.node())
1473 )
1490 )
1474
1491
1475 return ret
1492 return ret
1476
1493
1477
1494
1478 def remoteui(src, opts):
1495 def remoteui(src, opts):
1479 """build a remote ui from ui or repo and opts"""
1496 """build a remote ui from ui or repo and opts"""
1480 if util.safehasattr(src, b'baseui'): # looks like a repository
1497 if util.safehasattr(src, b'baseui'): # looks like a repository
1481 dst = src.baseui.copy() # drop repo-specific config
1498 dst = src.baseui.copy() # drop repo-specific config
1482 src = src.ui # copy target options from repo
1499 src = src.ui # copy target options from repo
1483 else: # assume it's a global ui object
1500 else: # assume it's a global ui object
1484 dst = src.copy() # keep all global options
1501 dst = src.copy() # keep all global options
1485
1502
1486 # copy ssh-specific options
1503 # copy ssh-specific options
1487 for o in b'ssh', b'remotecmd':
1504 for o in b'ssh', b'remotecmd':
1488 v = opts.get(o) or src.config(b'ui', o)
1505 v = opts.get(o) or src.config(b'ui', o)
1489 if v:
1506 if v:
1490 dst.setconfig(b"ui", o, v, b'copied')
1507 dst.setconfig(b"ui", o, v, b'copied')
1491
1508
1492 # copy bundle-specific options
1509 # copy bundle-specific options
1493 r = src.config(b'bundle', b'mainreporoot')
1510 r = src.config(b'bundle', b'mainreporoot')
1494 if r:
1511 if r:
1495 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1512 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1496
1513
1497 # copy selected local settings to the remote ui
1514 # copy selected local settings to the remote ui
1498 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1515 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1499 for key, val in src.configitems(sect):
1516 for key, val in src.configitems(sect):
1500 dst.setconfig(sect, key, val, b'copied')
1517 dst.setconfig(sect, key, val, b'copied')
1501 v = src.config(b'web', b'cacerts')
1518 v = src.config(b'web', b'cacerts')
1502 if v:
1519 if v:
1503 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1520 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1504
1521
1505 return dst
1522 return dst
1506
1523
1507
1524
1508 # Files of interest
1525 # Files of interest
1509 # Used to check if the repository has changed looking at mtime and size of
1526 # Used to check if the repository has changed looking at mtime and size of
1510 # these files.
1527 # these files.
1511 foi = [
1528 foi = [
1512 (b'spath', b'00changelog.i'),
1529 (b'spath', b'00changelog.i'),
1513 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1530 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1514 (b'spath', b'obsstore'),
1531 (b'spath', b'obsstore'),
1515 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1532 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1516 ]
1533 ]
1517
1534
1518
1535
1519 class cachedlocalrepo(object):
1536 class cachedlocalrepo(object):
1520 """Holds a localrepository that can be cached and reused."""
1537 """Holds a localrepository that can be cached and reused."""
1521
1538
1522 def __init__(self, repo):
1539 def __init__(self, repo):
1523 """Create a new cached repo from an existing repo.
1540 """Create a new cached repo from an existing repo.
1524
1541
1525 We assume the passed in repo was recently created. If the
1542 We assume the passed in repo was recently created. If the
1526 repo has changed between when it was created and when it was
1543 repo has changed between when it was created and when it was
1527 turned into a cache, it may not refresh properly.
1544 turned into a cache, it may not refresh properly.
1528 """
1545 """
1529 assert isinstance(repo, localrepo.localrepository)
1546 assert isinstance(repo, localrepo.localrepository)
1530 self._repo = repo
1547 self._repo = repo
1531 self._state, self.mtime = self._repostate()
1548 self._state, self.mtime = self._repostate()
1532 self._filtername = repo.filtername
1549 self._filtername = repo.filtername
1533
1550
1534 def fetch(self):
1551 def fetch(self):
1535 """Refresh (if necessary) and return a repository.
1552 """Refresh (if necessary) and return a repository.
1536
1553
1537 If the cached instance is out of date, it will be recreated
1554 If the cached instance is out of date, it will be recreated
1538 automatically and returned.
1555 automatically and returned.
1539
1556
1540 Returns a tuple of the repo and a boolean indicating whether a new
1557 Returns a tuple of the repo and a boolean indicating whether a new
1541 repo instance was created.
1558 repo instance was created.
1542 """
1559 """
1543 # We compare the mtimes and sizes of some well-known files to
1560 # We compare the mtimes and sizes of some well-known files to
1544 # determine if the repo changed. This is not precise, as mtimes
1561 # determine if the repo changed. This is not precise, as mtimes
1545 # are susceptible to clock skew and imprecise filesystems and
1562 # are susceptible to clock skew and imprecise filesystems and
1546 # file content can change while maintaining the same size.
1563 # file content can change while maintaining the same size.
1547
1564
1548 state, mtime = self._repostate()
1565 state, mtime = self._repostate()
1549 if state == self._state:
1566 if state == self._state:
1550 return self._repo, False
1567 return self._repo, False
1551
1568
1552 repo = repository(self._repo.baseui, self._repo.url())
1569 repo = repository(self._repo.baseui, self._repo.url())
1553 if self._filtername:
1570 if self._filtername:
1554 self._repo = repo.filtered(self._filtername)
1571 self._repo = repo.filtered(self._filtername)
1555 else:
1572 else:
1556 self._repo = repo.unfiltered()
1573 self._repo = repo.unfiltered()
1557 self._state = state
1574 self._state = state
1558 self.mtime = mtime
1575 self.mtime = mtime
1559
1576
1560 return self._repo, True
1577 return self._repo, True
1561
1578
1562 def _repostate(self):
1579 def _repostate(self):
1563 state = []
1580 state = []
1564 maxmtime = -1
1581 maxmtime = -1
1565 for attr, fname in foi:
1582 for attr, fname in foi:
1566 prefix = getattr(self._repo, attr)
1583 prefix = getattr(self._repo, attr)
1567 p = os.path.join(prefix, fname)
1584 p = os.path.join(prefix, fname)
1568 try:
1585 try:
1569 st = os.stat(p)
1586 st = os.stat(p)
1570 except OSError:
1587 except OSError:
1571 st = os.stat(prefix)
1588 st = os.stat(prefix)
1572 state.append((st[stat.ST_MTIME], st.st_size))
1589 state.append((st[stat.ST_MTIME], st.st_size))
1573 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1590 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1574
1591
1575 return tuple(state), maxmtime
1592 return tuple(state), maxmtime
1576
1593
1577 def copy(self):
1594 def copy(self):
1578 """Obtain a copy of this class instance.
1595 """Obtain a copy of this class instance.
1579
1596
1580 A new localrepository instance is obtained. The new instance should be
1597 A new localrepository instance is obtained. The new instance should be
1581 completely independent of the original.
1598 completely independent of the original.
1582 """
1599 """
1583 repo = repository(self._repo.baseui, self._repo.origroot)
1600 repo = repository(self._repo.baseui, self._repo.origroot)
1584 if self._filtername:
1601 if self._filtername:
1585 repo = repo.filtered(self._filtername)
1602 repo = repo.filtered(self._filtername)
1586 else:
1603 else:
1587 repo = repo.unfiltered()
1604 repo = repo.unfiltered()
1588 c = cachedlocalrepo(repo)
1605 c = cachedlocalrepo(repo)
1589 c._state = self._state
1606 c._state = self._state
1590 c.mtime = self.mtime
1607 c.mtime = self.mtime
1591 return c
1608 return c
@@ -1,840 +1,840 b''
1 # sparse.py - functionality for sparse checkouts
1 # sparse.py - functionality for sparse checkouts
2 #
2 #
3 # Copyright 2014 Facebook, Inc.
3 # Copyright 2014 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import os
10 import os
11
11
12 from .i18n import _
12 from .i18n import _
13 from .node import hex
13 from .node import hex
14 from . import (
14 from . import (
15 error,
15 error,
16 match as matchmod,
16 match as matchmod,
17 merge as mergemod,
17 merge as mergemod,
18 mergestate as mergestatemod,
18 mergestate as mergestatemod,
19 pathutil,
19 pathutil,
20 pycompat,
20 pycompat,
21 requirements,
21 requirements,
22 scmutil,
22 scmutil,
23 util,
23 util,
24 )
24 )
25 from .utils import hashutil
25 from .utils import hashutil
26
26
27
27
28 # Whether sparse features are enabled. This variable is intended to be
28 # Whether sparse features are enabled. This variable is intended to be
29 # temporary to facilitate porting sparse to core. It should eventually be
29 # temporary to facilitate porting sparse to core. It should eventually be
30 # a per-repo option, possibly a repo requirement.
30 # a per-repo option, possibly a repo requirement.
31 enabled = False
31 enabled = False
32
32
33
33
34 def parseconfig(ui, raw, action):
34 def parseconfig(ui, raw, action):
35 """Parse sparse config file content.
35 """Parse sparse config file content.
36
36
37 action is the command which is trigerring this read, can be narrow, sparse
37 action is the command which is trigerring this read, can be narrow, sparse
38
38
39 Returns a tuple of includes, excludes, and profiles.
39 Returns a tuple of includes, excludes, and profiles.
40 """
40 """
41 includes = set()
41 includes = set()
42 excludes = set()
42 excludes = set()
43 profiles = set()
43 profiles = set()
44 current = None
44 current = None
45 havesection = False
45 havesection = False
46
46
47 for line in raw.split(b'\n'):
47 for line in raw.split(b'\n'):
48 line = line.strip()
48 line = line.strip()
49 if not line or line.startswith(b'#'):
49 if not line or line.startswith(b'#'):
50 # empty or comment line, skip
50 # empty or comment line, skip
51 continue
51 continue
52 elif line.startswith(b'%include '):
52 elif line.startswith(b'%include '):
53 line = line[9:].strip()
53 line = line[9:].strip()
54 if line:
54 if line:
55 profiles.add(line)
55 profiles.add(line)
56 elif line == b'[include]':
56 elif line == b'[include]':
57 if havesection and current != includes:
57 if havesection and current != includes:
58 # TODO pass filename into this API so we can report it.
58 # TODO pass filename into this API so we can report it.
59 raise error.Abort(
59 raise error.Abort(
60 _(
60 _(
61 b'%(action)s config cannot have includes '
61 b'%(action)s config cannot have includes '
62 b'after excludes'
62 b'after excludes'
63 )
63 )
64 % {b'action': action}
64 % {b'action': action}
65 )
65 )
66 havesection = True
66 havesection = True
67 current = includes
67 current = includes
68 continue
68 continue
69 elif line == b'[exclude]':
69 elif line == b'[exclude]':
70 havesection = True
70 havesection = True
71 current = excludes
71 current = excludes
72 elif line:
72 elif line:
73 if current is None:
73 if current is None:
74 raise error.Abort(
74 raise error.Abort(
75 _(
75 _(
76 b'%(action)s config entry outside of '
76 b'%(action)s config entry outside of '
77 b'section: %(line)s'
77 b'section: %(line)s'
78 )
78 )
79 % {b'action': action, b'line': line},
79 % {b'action': action, b'line': line},
80 hint=_(
80 hint=_(
81 b'add an [include] or [exclude] line '
81 b'add an [include] or [exclude] line '
82 b'to declare the entry type'
82 b'to declare the entry type'
83 ),
83 ),
84 )
84 )
85
85
86 if line.strip().startswith(b'/'):
86 if line.strip().startswith(b'/'):
87 ui.warn(
87 ui.warn(
88 _(
88 _(
89 b'warning: %(action)s profile cannot use'
89 b'warning: %(action)s profile cannot use'
90 b' paths starting with /, ignoring %(line)s\n'
90 b' paths starting with /, ignoring %(line)s\n'
91 )
91 )
92 % {b'action': action, b'line': line}
92 % {b'action': action, b'line': line}
93 )
93 )
94 continue
94 continue
95 current.add(line)
95 current.add(line)
96
96
97 return includes, excludes, profiles
97 return includes, excludes, profiles
98
98
99
99
100 # Exists as separate function to facilitate monkeypatching.
100 # Exists as separate function to facilitate monkeypatching.
101 def readprofile(repo, profile, changeid):
101 def readprofile(repo, profile, changeid):
102 """Resolve the raw content of a sparse profile file."""
102 """Resolve the raw content of a sparse profile file."""
103 # TODO add some kind of cache here because this incurs a manifest
103 # TODO add some kind of cache here because this incurs a manifest
104 # resolve and can be slow.
104 # resolve and can be slow.
105 return repo.filectx(profile, changeid=changeid).data()
105 return repo.filectx(profile, changeid=changeid).data()
106
106
107
107
108 def patternsforrev(repo, rev):
108 def patternsforrev(repo, rev):
109 """Obtain sparse checkout patterns for the given rev.
109 """Obtain sparse checkout patterns for the given rev.
110
110
111 Returns a tuple of iterables representing includes, excludes, and
111 Returns a tuple of iterables representing includes, excludes, and
112 patterns.
112 patterns.
113 """
113 """
114 # Feature isn't enabled. No-op.
114 # Feature isn't enabled. No-op.
115 if not enabled:
115 if not enabled:
116 return set(), set(), set()
116 return set(), set(), set()
117
117
118 raw = repo.vfs.tryread(b'sparse')
118 raw = repo.vfs.tryread(b'sparse')
119 if not raw:
119 if not raw:
120 return set(), set(), set()
120 return set(), set(), set()
121
121
122 if rev is None:
122 if rev is None:
123 raise error.Abort(
123 raise error.Abort(
124 _(b'cannot parse sparse patterns from working directory')
124 _(b'cannot parse sparse patterns from working directory')
125 )
125 )
126
126
127 includes, excludes, profiles = parseconfig(repo.ui, raw, b'sparse')
127 includes, excludes, profiles = parseconfig(repo.ui, raw, b'sparse')
128 ctx = repo[rev]
128 ctx = repo[rev]
129
129
130 if profiles:
130 if profiles:
131 visited = set()
131 visited = set()
132 while profiles:
132 while profiles:
133 profile = profiles.pop()
133 profile = profiles.pop()
134 if profile in visited:
134 if profile in visited:
135 continue
135 continue
136
136
137 visited.add(profile)
137 visited.add(profile)
138
138
139 try:
139 try:
140 raw = readprofile(repo, profile, rev)
140 raw = readprofile(repo, profile, rev)
141 except error.ManifestLookupError:
141 except error.ManifestLookupError:
142 msg = (
142 msg = (
143 b"warning: sparse profile '%s' not found "
143 b"warning: sparse profile '%s' not found "
144 b"in rev %s - ignoring it\n" % (profile, ctx)
144 b"in rev %s - ignoring it\n" % (profile, ctx)
145 )
145 )
146 # experimental config: sparse.missingwarning
146 # experimental config: sparse.missingwarning
147 if repo.ui.configbool(b'sparse', b'missingwarning'):
147 if repo.ui.configbool(b'sparse', b'missingwarning'):
148 repo.ui.warn(msg)
148 repo.ui.warn(msg)
149 else:
149 else:
150 repo.ui.debug(msg)
150 repo.ui.debug(msg)
151 continue
151 continue
152
152
153 pincludes, pexcludes, subprofs = parseconfig(
153 pincludes, pexcludes, subprofs = parseconfig(
154 repo.ui, raw, b'sparse'
154 repo.ui, raw, b'sparse'
155 )
155 )
156 includes.update(pincludes)
156 includes.update(pincludes)
157 excludes.update(pexcludes)
157 excludes.update(pexcludes)
158 profiles.update(subprofs)
158 profiles.update(subprofs)
159
159
160 profiles = visited
160 profiles = visited
161
161
162 if includes:
162 if includes:
163 includes.add(b'.hg*')
163 includes.add(b'.hg*')
164
164
165 return includes, excludes, profiles
165 return includes, excludes, profiles
166
166
167
167
168 def activeconfig(repo):
168 def activeconfig(repo):
169 """Determine the active sparse config rules.
169 """Determine the active sparse config rules.
170
170
171 Rules are constructed by reading the current sparse config and bringing in
171 Rules are constructed by reading the current sparse config and bringing in
172 referenced profiles from parents of the working directory.
172 referenced profiles from parents of the working directory.
173 """
173 """
174 revs = [
174 revs = [
175 repo.changelog.rev(node)
175 repo.changelog.rev(node)
176 for node in repo.dirstate.parents()
176 for node in repo.dirstate.parents()
177 if node != repo.nullid
177 if node != repo.nullid
178 ]
178 ]
179
179
180 allincludes = set()
180 allincludes = set()
181 allexcludes = set()
181 allexcludes = set()
182 allprofiles = set()
182 allprofiles = set()
183
183
184 for rev in revs:
184 for rev in revs:
185 includes, excludes, profiles = patternsforrev(repo, rev)
185 includes, excludes, profiles = patternsforrev(repo, rev)
186 allincludes |= includes
186 allincludes |= includes
187 allexcludes |= excludes
187 allexcludes |= excludes
188 allprofiles |= profiles
188 allprofiles |= profiles
189
189
190 return allincludes, allexcludes, allprofiles
190 return allincludes, allexcludes, allprofiles
191
191
192
192
193 def configsignature(repo, includetemp=True):
193 def configsignature(repo, includetemp=True):
194 """Obtain the signature string for the current sparse configuration.
194 """Obtain the signature string for the current sparse configuration.
195
195
196 This is used to construct a cache key for matchers.
196 This is used to construct a cache key for matchers.
197 """
197 """
198 cache = repo._sparsesignaturecache
198 cache = repo._sparsesignaturecache
199
199
200 signature = cache.get(b'signature')
200 signature = cache.get(b'signature')
201
201
202 if includetemp:
202 if includetemp:
203 tempsignature = cache.get(b'tempsignature')
203 tempsignature = cache.get(b'tempsignature')
204 else:
204 else:
205 tempsignature = b'0'
205 tempsignature = b'0'
206
206
207 if signature is None or (includetemp and tempsignature is None):
207 if signature is None or (includetemp and tempsignature is None):
208 signature = hex(hashutil.sha1(repo.vfs.tryread(b'sparse')).digest())
208 signature = hex(hashutil.sha1(repo.vfs.tryread(b'sparse')).digest())
209 cache[b'signature'] = signature
209 cache[b'signature'] = signature
210
210
211 if includetemp:
211 if includetemp:
212 raw = repo.vfs.tryread(b'tempsparse')
212 raw = repo.vfs.tryread(b'tempsparse')
213 tempsignature = hex(hashutil.sha1(raw).digest())
213 tempsignature = hex(hashutil.sha1(raw).digest())
214 cache[b'tempsignature'] = tempsignature
214 cache[b'tempsignature'] = tempsignature
215
215
216 return b'%s %s' % (signature, tempsignature)
216 return b'%s %s' % (signature, tempsignature)
217
217
218
218
219 def writeconfig(repo, includes, excludes, profiles):
219 def writeconfig(repo, includes, excludes, profiles):
220 """Write the sparse config file given a sparse configuration."""
220 """Write the sparse config file given a sparse configuration."""
221 with repo.vfs(b'sparse', b'wb') as fh:
221 with repo.vfs(b'sparse', b'wb') as fh:
222 for p in sorted(profiles):
222 for p in sorted(profiles):
223 fh.write(b'%%include %s\n' % p)
223 fh.write(b'%%include %s\n' % p)
224
224
225 if includes:
225 if includes:
226 fh.write(b'[include]\n')
226 fh.write(b'[include]\n')
227 for i in sorted(includes):
227 for i in sorted(includes):
228 fh.write(i)
228 fh.write(i)
229 fh.write(b'\n')
229 fh.write(b'\n')
230
230
231 if excludes:
231 if excludes:
232 fh.write(b'[exclude]\n')
232 fh.write(b'[exclude]\n')
233 for e in sorted(excludes):
233 for e in sorted(excludes):
234 fh.write(e)
234 fh.write(e)
235 fh.write(b'\n')
235 fh.write(b'\n')
236
236
237 repo._sparsesignaturecache.clear()
237 repo._sparsesignaturecache.clear()
238
238
239
239
240 def readtemporaryincludes(repo):
240 def readtemporaryincludes(repo):
241 raw = repo.vfs.tryread(b'tempsparse')
241 raw = repo.vfs.tryread(b'tempsparse')
242 if not raw:
242 if not raw:
243 return set()
243 return set()
244
244
245 return set(raw.split(b'\n'))
245 return set(raw.split(b'\n'))
246
246
247
247
248 def writetemporaryincludes(repo, includes):
248 def writetemporaryincludes(repo, includes):
249 repo.vfs.write(b'tempsparse', b'\n'.join(sorted(includes)))
249 repo.vfs.write(b'tempsparse', b'\n'.join(sorted(includes)))
250 repo._sparsesignaturecache.clear()
250 repo._sparsesignaturecache.clear()
251
251
252
252
253 def addtemporaryincludes(repo, additional):
253 def addtemporaryincludes(repo, additional):
254 includes = readtemporaryincludes(repo)
254 includes = readtemporaryincludes(repo)
255 for i in additional:
255 for i in additional:
256 includes.add(i)
256 includes.add(i)
257 writetemporaryincludes(repo, includes)
257 writetemporaryincludes(repo, includes)
258
258
259
259
260 def prunetemporaryincludes(repo):
260 def prunetemporaryincludes(repo):
261 if not enabled or not repo.vfs.exists(b'tempsparse'):
261 if not enabled or not repo.vfs.exists(b'tempsparse'):
262 return
262 return
263
263
264 s = repo.status()
264 s = repo.status()
265 if s.modified or s.added or s.removed or s.deleted:
265 if s.modified or s.added or s.removed or s.deleted:
266 # Still have pending changes. Don't bother trying to prune.
266 # Still have pending changes. Don't bother trying to prune.
267 return
267 return
268
268
269 sparsematch = matcher(repo, includetemp=False)
269 sparsematch = matcher(repo, includetemp=False)
270 dirstate = repo.dirstate
270 dirstate = repo.dirstate
271 mresult = mergemod.mergeresult()
271 mresult = mergemod.mergeresult()
272 dropped = []
272 dropped = []
273 tempincludes = readtemporaryincludes(repo)
273 tempincludes = readtemporaryincludes(repo)
274 for file in tempincludes:
274 for file in tempincludes:
275 if file in dirstate and not sparsematch(file):
275 if file in dirstate and not sparsematch(file):
276 message = _(b'dropping temporarily included sparse files')
276 message = _(b'dropping temporarily included sparse files')
277 mresult.addfile(file, mergestatemod.ACTION_REMOVE, None, message)
277 mresult.addfile(file, mergestatemod.ACTION_REMOVE, None, message)
278 dropped.append(file)
278 dropped.append(file)
279
279
280 mergemod.applyupdates(
280 mergemod.applyupdates(
281 repo, mresult, repo[None], repo[b'.'], False, wantfiledata=False
281 repo, mresult, repo[None], repo[b'.'], False, wantfiledata=False
282 )
282 )
283
283
284 # Fix dirstate
284 # Fix dirstate
285 for file in dropped:
285 for file in dropped:
286 dirstate.update_file(file, p1_tracked=False, wc_tracked=False)
286 dirstate.update_file(file, p1_tracked=False, wc_tracked=False)
287
287
288 repo.vfs.unlink(b'tempsparse')
288 repo.vfs.unlink(b'tempsparse')
289 repo._sparsesignaturecache.clear()
289 repo._sparsesignaturecache.clear()
290 msg = _(
290 msg = _(
291 b'cleaned up %d temporarily added file(s) from the '
291 b'cleaned up %d temporarily added file(s) from the '
292 b'sparse checkout\n'
292 b'sparse checkout\n'
293 )
293 )
294 repo.ui.status(msg % len(tempincludes))
294 repo.ui.status(msg % len(tempincludes))
295
295
296
296
297 def forceincludematcher(matcher, includes):
297 def forceincludematcher(matcher, includes):
298 """Returns a matcher that returns true for any of the forced includes
298 """Returns a matcher that returns true for any of the forced includes
299 before testing against the actual matcher."""
299 before testing against the actual matcher."""
300 kindpats = [(b'path', include, b'') for include in includes]
300 kindpats = [(b'path', include, b'') for include in includes]
301 includematcher = matchmod.includematcher(b'', kindpats)
301 includematcher = matchmod.includematcher(b'', kindpats)
302 return matchmod.unionmatcher([includematcher, matcher])
302 return matchmod.unionmatcher([includematcher, matcher])
303
303
304
304
305 def matcher(repo, revs=None, includetemp=True):
305 def matcher(repo, revs=None, includetemp=True):
306 """Obtain a matcher for sparse working directories for the given revs.
306 """Obtain a matcher for sparse working directories for the given revs.
307
307
308 If multiple revisions are specified, the matcher is the union of all
308 If multiple revisions are specified, the matcher is the union of all
309 revs.
309 revs.
310
310
311 ``includetemp`` indicates whether to use the temporary sparse profile.
311 ``includetemp`` indicates whether to use the temporary sparse profile.
312 """
312 """
313 # If sparse isn't enabled, sparse matcher matches everything.
313 # If sparse isn't enabled, sparse matcher matches everything.
314 if not enabled:
314 if not enabled:
315 return matchmod.always()
315 return matchmod.always()
316
316
317 if not revs or revs == [None]:
317 if not revs or revs == [None]:
318 revs = [
318 revs = [
319 repo.changelog.rev(node)
319 repo.changelog.rev(node)
320 for node in repo.dirstate.parents()
320 for node in repo.dirstate.parents()
321 if node != repo.nullid
321 if node != repo.nullid
322 ]
322 ]
323
323
324 signature = configsignature(repo, includetemp=includetemp)
324 signature = configsignature(repo, includetemp=includetemp)
325
325
326 key = b'%s %s' % (signature, b' '.join(map(pycompat.bytestr, revs)))
326 key = b'%s %s' % (signature, b' '.join(map(pycompat.bytestr, revs)))
327
327
328 result = repo._sparsematchercache.get(key)
328 result = repo._sparsematchercache.get(key)
329 if result:
329 if result:
330 return result
330 return result
331
331
332 matchers = []
332 matchers = []
333 for rev in revs:
333 for rev in revs:
334 try:
334 try:
335 includes, excludes, profiles = patternsforrev(repo, rev)
335 includes, excludes, profiles = patternsforrev(repo, rev)
336
336
337 if includes or excludes:
337 if includes or excludes:
338 matcher = matchmod.match(
338 matcher = matchmod.match(
339 repo.root,
339 repo.root,
340 b'',
340 b'',
341 [],
341 [],
342 include=includes,
342 include=includes,
343 exclude=excludes,
343 exclude=excludes,
344 default=b'relpath',
344 default=b'relpath',
345 )
345 )
346 matchers.append(matcher)
346 matchers.append(matcher)
347 except IOError:
347 except IOError:
348 pass
348 pass
349
349
350 if not matchers:
350 if not matchers:
351 result = matchmod.always()
351 result = matchmod.always()
352 elif len(matchers) == 1:
352 elif len(matchers) == 1:
353 result = matchers[0]
353 result = matchers[0]
354 else:
354 else:
355 result = matchmod.unionmatcher(matchers)
355 result = matchmod.unionmatcher(matchers)
356
356
357 if includetemp:
357 if includetemp:
358 tempincludes = readtemporaryincludes(repo)
358 tempincludes = readtemporaryincludes(repo)
359 result = forceincludematcher(result, tempincludes)
359 result = forceincludematcher(result, tempincludes)
360
360
361 repo._sparsematchercache[key] = result
361 repo._sparsematchercache[key] = result
362
362
363 return result
363 return result
364
364
365
365
366 def filterupdatesactions(repo, wctx, mctx, branchmerge, mresult):
366 def filterupdatesactions(repo, wctx, mctx, branchmerge, mresult):
367 """Filter updates to only lay out files that match the sparse rules."""
367 """Filter updates to only lay out files that match the sparse rules."""
368 if not enabled:
368 if not enabled:
369 return
369 return
370
370
371 oldrevs = [pctx.rev() for pctx in wctx.parents()]
371 oldrevs = [pctx.rev() for pctx in wctx.parents()]
372 oldsparsematch = matcher(repo, oldrevs)
372 oldsparsematch = matcher(repo, oldrevs)
373
373
374 if oldsparsematch.always():
374 if oldsparsematch.always():
375 return
375 return
376
376
377 files = set()
377 files = set()
378 prunedactions = {}
378 prunedactions = {}
379
379
380 if branchmerge:
380 if branchmerge:
381 # If we're merging, use the wctx filter, since we're merging into
381 # If we're merging, use the wctx filter, since we're merging into
382 # the wctx.
382 # the wctx.
383 sparsematch = matcher(repo, [wctx.p1().rev()])
383 sparsematch = matcher(repo, [wctx.p1().rev()])
384 else:
384 else:
385 # If we're updating, use the target context's filter, since we're
385 # If we're updating, use the target context's filter, since we're
386 # moving to the target context.
386 # moving to the target context.
387 sparsematch = matcher(repo, [mctx.rev()])
387 sparsematch = matcher(repo, [mctx.rev()])
388
388
389 temporaryfiles = []
389 temporaryfiles = []
390 for file, action in mresult.filemap():
390 for file, action in mresult.filemap():
391 type, args, msg = action
391 type, args, msg = action
392 files.add(file)
392 files.add(file)
393 if sparsematch(file):
393 if sparsematch(file):
394 prunedactions[file] = action
394 prunedactions[file] = action
395 elif type == mergestatemod.ACTION_MERGE:
395 elif type == mergestatemod.ACTION_MERGE:
396 temporaryfiles.append(file)
396 temporaryfiles.append(file)
397 prunedactions[file] = action
397 prunedactions[file] = action
398 elif branchmerge:
398 elif branchmerge:
399 if type not in mergestatemod.NO_OP_ACTIONS:
399 if type not in mergestatemod.NO_OP_ACTIONS:
400 temporaryfiles.append(file)
400 temporaryfiles.append(file)
401 prunedactions[file] = action
401 prunedactions[file] = action
402 elif type == mergestatemod.ACTION_FORGET:
402 elif type == mergestatemod.ACTION_FORGET:
403 prunedactions[file] = action
403 prunedactions[file] = action
404 elif file in wctx:
404 elif file in wctx:
405 prunedactions[file] = (mergestatemod.ACTION_REMOVE, args, msg)
405 prunedactions[file] = (mergestatemod.ACTION_REMOVE, args, msg)
406
406
407 # in case or rename on one side, it is possible that f1 might not
407 # in case or rename on one side, it is possible that f1 might not
408 # be present in sparse checkout we should include it
408 # be present in sparse checkout we should include it
409 # TODO: should we do the same for f2?
409 # TODO: should we do the same for f2?
410 # exists as a separate check because file can be in sparse and hence
410 # exists as a separate check because file can be in sparse and hence
411 # if we try to club this condition in above `elif type == ACTION_MERGE`
411 # if we try to club this condition in above `elif type == ACTION_MERGE`
412 # it won't be triggered
412 # it won't be triggered
413 if branchmerge and type == mergestatemod.ACTION_MERGE:
413 if branchmerge and type == mergestatemod.ACTION_MERGE:
414 f1, f2, fa, move, anc = args
414 f1, f2, fa, move, anc = args
415 if not sparsematch(f1):
415 if not sparsematch(f1):
416 temporaryfiles.append(f1)
416 temporaryfiles.append(f1)
417
417
418 if len(temporaryfiles) > 0:
418 if len(temporaryfiles) > 0:
419 repo.ui.status(
419 repo.ui.status(
420 _(
420 _(
421 b'temporarily included %d file(s) in the sparse '
421 b'temporarily included %d file(s) in the sparse '
422 b'checkout for merging\n'
422 b'checkout for merging\n'
423 )
423 )
424 % len(temporaryfiles)
424 % len(temporaryfiles)
425 )
425 )
426 addtemporaryincludes(repo, temporaryfiles)
426 addtemporaryincludes(repo, temporaryfiles)
427
427
428 # Add the new files to the working copy so they can be merged, etc
428 # Add the new files to the working copy so they can be merged, etc
429 tmresult = mergemod.mergeresult()
429 tmresult = mergemod.mergeresult()
430 message = b'temporarily adding to sparse checkout'
430 message = b'temporarily adding to sparse checkout'
431 wctxmanifest = repo[None].manifest()
431 wctxmanifest = repo[None].manifest()
432 for file in temporaryfiles:
432 for file in temporaryfiles:
433 if file in wctxmanifest:
433 if file in wctxmanifest:
434 fctx = repo[None][file]
434 fctx = repo[None][file]
435 tmresult.addfile(
435 tmresult.addfile(
436 file,
436 file,
437 mergestatemod.ACTION_GET,
437 mergestatemod.ACTION_GET,
438 (fctx.flags(), False),
438 (fctx.flags(), False),
439 message,
439 message,
440 )
440 )
441
441
442 with repo.dirstate.parentchange():
442 with repo.dirstate.parentchange():
443 mergemod.applyupdates(
443 mergemod.applyupdates(
444 repo,
444 repo,
445 tmresult,
445 tmresult,
446 repo[None],
446 repo[None],
447 repo[b'.'],
447 repo[b'.'],
448 False,
448 False,
449 wantfiledata=False,
449 wantfiledata=False,
450 )
450 )
451
451
452 dirstate = repo.dirstate
452 dirstate = repo.dirstate
453 for file, flags, msg in tmresult.getactions(
453 for file, flags, msg in tmresult.getactions(
454 [mergestatemod.ACTION_GET]
454 [mergestatemod.ACTION_GET]
455 ):
455 ):
456 dirstate.update_file(file, p1_tracked=True, wc_tracked=True)
456 dirstate.update_file(file, p1_tracked=True, wc_tracked=True)
457
457
458 profiles = activeconfig(repo)[2]
458 profiles = activeconfig(repo)[2]
459 changedprofiles = profiles & files
459 changedprofiles = profiles & files
460 # If an active profile changed during the update, refresh the checkout.
460 # If an active profile changed during the update, refresh the checkout.
461 # Don't do this during a branch merge, since all incoming changes should
461 # Don't do this during a branch merge, since all incoming changes should
462 # have been handled by the temporary includes above.
462 # have been handled by the temporary includes above.
463 if changedprofiles and not branchmerge:
463 if changedprofiles and not branchmerge:
464 mf = mctx.manifest()
464 mf = mctx.manifest()
465 for file in mf:
465 for file in mf:
466 old = oldsparsematch(file)
466 old = oldsparsematch(file)
467 new = sparsematch(file)
467 new = sparsematch(file)
468 if not old and new:
468 if not old and new:
469 flags = mf.flags(file)
469 flags = mf.flags(file)
470 prunedactions[file] = (
470 prunedactions[file] = (
471 mergestatemod.ACTION_GET,
471 mergestatemod.ACTION_GET,
472 (flags, False),
472 (flags, False),
473 b'',
473 b'',
474 )
474 )
475 elif old and not new:
475 elif old and not new:
476 prunedactions[file] = (mergestatemod.ACTION_REMOVE, [], b'')
476 prunedactions[file] = (mergestatemod.ACTION_REMOVE, [], b'')
477
477
478 mresult.setactions(prunedactions)
478 mresult.setactions(prunedactions)
479
479
480
480
481 def refreshwdir(repo, origstatus, origsparsematch, force=False):
481 def refreshwdir(repo, origstatus, origsparsematch, force=False):
482 """Refreshes working directory by taking sparse config into account.
482 """Refreshes working directory by taking sparse config into account.
483
483
484 The old status and sparse matcher is compared against the current sparse
484 The old status and sparse matcher is compared against the current sparse
485 matcher.
485 matcher.
486
486
487 Will abort if a file with pending changes is being excluded or included
487 Will abort if a file with pending changes is being excluded or included
488 unless ``force`` is True.
488 unless ``force`` is True.
489 """
489 """
490 # Verify there are no pending changes
490 # Verify there are no pending changes
491 pending = set()
491 pending = set()
492 pending.update(origstatus.modified)
492 pending.update(origstatus.modified)
493 pending.update(origstatus.added)
493 pending.update(origstatus.added)
494 pending.update(origstatus.removed)
494 pending.update(origstatus.removed)
495 sparsematch = matcher(repo)
495 sparsematch = matcher(repo)
496 abort = False
496 abort = False
497
497
498 for f in pending:
498 for f in pending:
499 if not sparsematch(f):
499 if not sparsematch(f):
500 repo.ui.warn(_(b"pending changes to '%s'\n") % f)
500 repo.ui.warn(_(b"pending changes to '%s'\n") % f)
501 abort = not force
501 abort = not force
502
502
503 if abort:
503 if abort:
504 raise error.Abort(
504 raise error.Abort(
505 _(b'could not update sparseness due to pending changes')
505 _(b'could not update sparseness due to pending changes')
506 )
506 )
507
507
508 # Calculate merge result
508 # Calculate merge result
509 dirstate = repo.dirstate
509 dirstate = repo.dirstate
510 ctx = repo[b'.']
510 ctx = repo[b'.']
511 added = []
511 added = []
512 lookup = []
512 lookup = []
513 dropped = []
513 dropped = []
514 mf = ctx.manifest()
514 mf = ctx.manifest()
515 files = set(mf)
515 files = set(mf)
516 mresult = mergemod.mergeresult()
516 mresult = mergemod.mergeresult()
517
517
518 for file in files:
518 for file in files:
519 old = origsparsematch(file)
519 old = origsparsematch(file)
520 new = sparsematch(file)
520 new = sparsematch(file)
521 # Add files that are newly included, or that don't exist in
521 # Add files that are newly included, or that don't exist in
522 # the dirstate yet.
522 # the dirstate yet.
523 if (new and not old) or (old and new and not file in dirstate):
523 if (new and not old) or (old and new and not file in dirstate):
524 fl = mf.flags(file)
524 fl = mf.flags(file)
525 if repo.wvfs.exists(file):
525 if repo.wvfs.exists(file):
526 mresult.addfile(file, mergestatemod.ACTION_EXEC, (fl,), b'')
526 mresult.addfile(file, mergestatemod.ACTION_EXEC, (fl,), b'')
527 lookup.append(file)
527 lookup.append(file)
528 else:
528 else:
529 mresult.addfile(
529 mresult.addfile(
530 file, mergestatemod.ACTION_GET, (fl, False), b''
530 file, mergestatemod.ACTION_GET, (fl, False), b''
531 )
531 )
532 added.append(file)
532 added.append(file)
533 # Drop files that are newly excluded, or that still exist in
533 # Drop files that are newly excluded, or that still exist in
534 # the dirstate.
534 # the dirstate.
535 elif (old and not new) or (not old and not new and file in dirstate):
535 elif (old and not new) or (not old and not new and file in dirstate):
536 dropped.append(file)
536 dropped.append(file)
537 if file not in pending:
537 if file not in pending:
538 mresult.addfile(file, mergestatemod.ACTION_REMOVE, [], b'')
538 mresult.addfile(file, mergestatemod.ACTION_REMOVE, [], b'')
539
539
540 # Verify there are no pending changes in newly included files
540 # Verify there are no pending changes in newly included files
541 abort = False
541 abort = False
542 for file in lookup:
542 for file in lookup:
543 repo.ui.warn(_(b"pending changes to '%s'\n") % file)
543 repo.ui.warn(_(b"pending changes to '%s'\n") % file)
544 abort = not force
544 abort = not force
545 if abort:
545 if abort:
546 raise error.Abort(
546 raise error.Abort(
547 _(
547 _(
548 b'cannot change sparseness due to pending '
548 b'cannot change sparseness due to pending '
549 b'changes (delete the files or use '
549 b'changes (delete the files or use '
550 b'--force to bring them back dirty)'
550 b'--force to bring them back dirty)'
551 )
551 )
552 )
552 )
553
553
554 # Check for files that were only in the dirstate.
554 # Check for files that were only in the dirstate.
555 for file, state in pycompat.iteritems(dirstate):
555 for file, state in pycompat.iteritems(dirstate):
556 if not file in files:
556 if not file in files:
557 old = origsparsematch(file)
557 old = origsparsematch(file)
558 new = sparsematch(file)
558 new = sparsematch(file)
559 if old and not new:
559 if old and not new:
560 dropped.append(file)
560 dropped.append(file)
561
561
562 mergemod.applyupdates(
562 mergemod.applyupdates(
563 repo, mresult, repo[None], repo[b'.'], False, wantfiledata=False
563 repo, mresult, repo[None], repo[b'.'], False, wantfiledata=False
564 )
564 )
565
565
566 # Fix dirstate
566 # Fix dirstate
567 for file in added:
567 for file in added:
568 dirstate.update_file(file, p1_tracked=True, wc_tracked=True)
568 dirstate.update_file(file, p1_tracked=True, wc_tracked=True)
569
569
570 for file in dropped:
570 for file in dropped:
571 dirstate.update_file(file, p1_tracked=False, wc_tracked=False)
571 dirstate.update_file(file, p1_tracked=False, wc_tracked=False)
572
572
573 for file in lookup:
573 for file in lookup:
574 # File exists on disk, and we're bringing it back in an unknown state.
574 # File exists on disk, and we're bringing it back in an unknown state.
575 dirstate.update_file(
575 dirstate.update_file(
576 file, p1_tracked=True, wc_tracked=True, possibly_dirty=True
576 file, p1_tracked=True, wc_tracked=True, possibly_dirty=True
577 )
577 )
578
578
579 return added, dropped, lookup
579 return added, dropped, lookup
580
580
581
581
582 def aftercommit(repo, node):
582 def aftercommit(repo, node):
583 """Perform actions after a working directory commit."""
583 """Perform actions after a working directory commit."""
584 # This function is called unconditionally, even if sparse isn't
584 # This function is called unconditionally, even if sparse isn't
585 # enabled.
585 # enabled.
586 ctx = repo[node]
586 ctx = repo[node]
587
587
588 profiles = patternsforrev(repo, ctx.rev())[2]
588 profiles = patternsforrev(repo, ctx.rev())[2]
589
589
590 # profiles will only have data if sparse is enabled.
590 # profiles will only have data if sparse is enabled.
591 if profiles & set(ctx.files()):
591 if profiles & set(ctx.files()):
592 origstatus = repo.status()
592 origstatus = repo.status()
593 origsparsematch = matcher(repo)
593 origsparsematch = matcher(repo)
594 refreshwdir(repo, origstatus, origsparsematch, force=True)
594 refreshwdir(repo, origstatus, origsparsematch, force=True)
595
595
596 prunetemporaryincludes(repo)
596 prunetemporaryincludes(repo)
597
597
598
598
599 def _updateconfigandrefreshwdir(
599 def _updateconfigandrefreshwdir(
600 repo, includes, excludes, profiles, force=False, removing=False
600 repo, includes, excludes, profiles, force=False, removing=False
601 ):
601 ):
602 """Update the sparse config and working directory state."""
602 """Update the sparse config and working directory state."""
603 raw = repo.vfs.tryread(b'sparse')
603 raw = repo.vfs.tryread(b'sparse')
604 oldincludes, oldexcludes, oldprofiles = parseconfig(repo.ui, raw, b'sparse')
604 oldincludes, oldexcludes, oldprofiles = parseconfig(repo.ui, raw, b'sparse')
605
605
606 oldstatus = repo.status()
606 oldstatus = repo.status()
607 oldmatch = matcher(repo)
607 oldmatch = matcher(repo)
608 oldrequires = set(repo.requirements)
608 oldrequires = set(repo.requirements)
609
609
610 # TODO remove this try..except once the matcher integrates better
610 # TODO remove this try..except once the matcher integrates better
611 # with dirstate. We currently have to write the updated config
611 # with dirstate. We currently have to write the updated config
612 # because that will invalidate the matcher cache and force a
612 # because that will invalidate the matcher cache and force a
613 # re-read. We ideally want to update the cached matcher on the
613 # re-read. We ideally want to update the cached matcher on the
614 # repo instance then flush the new config to disk once wdir is
614 # repo instance then flush the new config to disk once wdir is
615 # updated. But this requires massive rework to matcher() and its
615 # updated. But this requires massive rework to matcher() and its
616 # consumers.
616 # consumers.
617
617
618 if requirements.SPARSE_REQUIREMENT in oldrequires and removing:
618 if requirements.SPARSE_REQUIREMENT in oldrequires and removing:
619 repo.requirements.discard(requirements.SPARSE_REQUIREMENT)
619 repo.requirements.discard(requirements.SPARSE_REQUIREMENT)
620 scmutil.writereporequirements(repo)
620 scmutil.writereporequirements(repo)
621 elif requirements.SPARSE_REQUIREMENT not in oldrequires:
621 elif requirements.SPARSE_REQUIREMENT not in oldrequires:
622 repo.requirements.add(requirements.SPARSE_REQUIREMENT)
622 repo.requirements.add(requirements.SPARSE_REQUIREMENT)
623 scmutil.writereporequirements(repo)
623 scmutil.writereporequirements(repo)
624
624
625 try:
625 try:
626 writeconfig(repo, includes, excludes, profiles)
626 writeconfig(repo, includes, excludes, profiles)
627 return refreshwdir(repo, oldstatus, oldmatch, force=force)
627 return refreshwdir(repo, oldstatus, oldmatch, force=force)
628 except Exception:
628 except Exception:
629 if repo.requirements != oldrequires:
629 if repo.requirements != oldrequires:
630 repo.requirements.clear()
630 repo.requirements.clear()
631 repo.requirements |= oldrequires
631 repo.requirements |= oldrequires
632 scmutil.writereporequirements(repo)
632 scmutil.writereporequirements(repo)
633 writeconfig(repo, oldincludes, oldexcludes, oldprofiles)
633 writeconfig(repo, oldincludes, oldexcludes, oldprofiles)
634 raise
634 raise
635
635
636
636
637 def clearrules(repo, force=False):
637 def clearrules(repo, force=False):
638 """Clears include/exclude rules from the sparse config.
638 """Clears include/exclude rules from the sparse config.
639
639
640 The remaining sparse config only has profiles, if defined. The working
640 The remaining sparse config only has profiles, if defined. The working
641 directory is refreshed, as needed.
641 directory is refreshed, as needed.
642 """
642 """
643 with repo.wlock(), repo.dirstate.parentchange():
643 with repo.wlock(), repo.dirstate.parentchange():
644 raw = repo.vfs.tryread(b'sparse')
644 raw = repo.vfs.tryread(b'sparse')
645 includes, excludes, profiles = parseconfig(repo.ui, raw, b'sparse')
645 includes, excludes, profiles = parseconfig(repo.ui, raw, b'sparse')
646
646
647 if not includes and not excludes:
647 if not includes and not excludes:
648 return
648 return
649
649
650 _updateconfigandrefreshwdir(repo, set(), set(), profiles, force=force)
650 _updateconfigandrefreshwdir(repo, set(), set(), profiles, force=force)
651
651
652
652
653 def importfromfiles(repo, opts, paths, force=False):
653 def importfromfiles(repo, opts, paths, force=False):
654 """Import sparse config rules from files.
654 """Import sparse config rules from files.
655
655
656 The updated sparse config is written out and the working directory
656 The updated sparse config is written out and the working directory
657 is refreshed, as needed.
657 is refreshed, as needed.
658 """
658 """
659 with repo.wlock(), repo.dirstate.parentchange():
659 with repo.wlock(), repo.dirstate.parentchange():
660 # read current configuration
660 # read current configuration
661 raw = repo.vfs.tryread(b'sparse')
661 raw = repo.vfs.tryread(b'sparse')
662 includes, excludes, profiles = parseconfig(repo.ui, raw, b'sparse')
662 includes, excludes, profiles = parseconfig(repo.ui, raw, b'sparse')
663 aincludes, aexcludes, aprofiles = activeconfig(repo)
663 aincludes, aexcludes, aprofiles = activeconfig(repo)
664
664
665 # Import rules on top; only take in rules that are not yet
665 # Import rules on top; only take in rules that are not yet
666 # part of the active rules.
666 # part of the active rules.
667 changed = False
667 changed = False
668 for p in paths:
668 for p in paths:
669 with util.posixfile(util.expandpath(p), mode=b'rb') as fh:
669 with util.posixfile(util.expandpath(p), mode=b'rb') as fh:
670 raw = fh.read()
670 raw = fh.read()
671
671
672 iincludes, iexcludes, iprofiles = parseconfig(
672 iincludes, iexcludes, iprofiles = parseconfig(
673 repo.ui, raw, b'sparse'
673 repo.ui, raw, b'sparse'
674 )
674 )
675 oldsize = len(includes) + len(excludes) + len(profiles)
675 oldsize = len(includes) + len(excludes) + len(profiles)
676 includes.update(iincludes - aincludes)
676 includes.update(iincludes - aincludes)
677 excludes.update(iexcludes - aexcludes)
677 excludes.update(iexcludes - aexcludes)
678 profiles.update(iprofiles - aprofiles)
678 profiles.update(iprofiles - aprofiles)
679 if len(includes) + len(excludes) + len(profiles) > oldsize:
679 if len(includes) + len(excludes) + len(profiles) > oldsize:
680 changed = True
680 changed = True
681
681
682 profilecount = includecount = excludecount = 0
682 profilecount = includecount = excludecount = 0
683 fcounts = (0, 0, 0)
683 fcounts = (0, 0, 0)
684
684
685 if changed:
685 if changed:
686 profilecount = len(profiles - aprofiles)
686 profilecount = len(profiles - aprofiles)
687 includecount = len(includes - aincludes)
687 includecount = len(includes - aincludes)
688 excludecount = len(excludes - aexcludes)
688 excludecount = len(excludes - aexcludes)
689
689
690 fcounts = map(
690 fcounts = map(
691 len,
691 len,
692 _updateconfigandrefreshwdir(
692 _updateconfigandrefreshwdir(
693 repo, includes, excludes, profiles, force=force
693 repo, includes, excludes, profiles, force=force
694 ),
694 ),
695 )
695 )
696
696
697 printchanges(
697 printchanges(
698 repo.ui, opts, profilecount, includecount, excludecount, *fcounts
698 repo.ui, opts, profilecount, includecount, excludecount, *fcounts
699 )
699 )
700
700
701
701
702 def updateconfig(
702 def updateconfig(
703 repo,
703 repo,
704 pats,
704 pats,
705 opts,
705 opts,
706 include=False,
706 include=False,
707 exclude=False,
707 exclude=False,
708 reset=False,
708 reset=False,
709 delete=False,
709 delete=False,
710 enableprofile=False,
710 enableprofile=False,
711 disableprofile=False,
711 disableprofile=False,
712 force=False,
712 force=False,
713 usereporootpaths=False,
713 usereporootpaths=False,
714 ):
714 ):
715 """Perform a sparse config update.
715 """Perform a sparse config update.
716
716
717 Only one of the actions may be performed.
717 Only one of the actions may be performed.
718
718
719 The new config is written out and a working directory refresh is performed.
719 The new config is written out and a working directory refresh is performed.
720 """
720 """
721 with repo.wlock(), repo.dirstate.parentchange():
721 with repo.wlock(), repo.lock(), repo.dirstate.parentchange():
722 raw = repo.vfs.tryread(b'sparse')
722 raw = repo.vfs.tryread(b'sparse')
723 oldinclude, oldexclude, oldprofiles = parseconfig(
723 oldinclude, oldexclude, oldprofiles = parseconfig(
724 repo.ui, raw, b'sparse'
724 repo.ui, raw, b'sparse'
725 )
725 )
726
726
727 if reset:
727 if reset:
728 newinclude = set()
728 newinclude = set()
729 newexclude = set()
729 newexclude = set()
730 newprofiles = set()
730 newprofiles = set()
731 else:
731 else:
732 newinclude = set(oldinclude)
732 newinclude = set(oldinclude)
733 newexclude = set(oldexclude)
733 newexclude = set(oldexclude)
734 newprofiles = set(oldprofiles)
734 newprofiles = set(oldprofiles)
735
735
736 if any(os.path.isabs(pat) for pat in pats):
736 if any(os.path.isabs(pat) for pat in pats):
737 raise error.Abort(_(b'paths cannot be absolute'))
737 raise error.Abort(_(b'paths cannot be absolute'))
738
738
739 if not usereporootpaths:
739 if not usereporootpaths:
740 # let's treat paths as relative to cwd
740 # let's treat paths as relative to cwd
741 root, cwd = repo.root, repo.getcwd()
741 root, cwd = repo.root, repo.getcwd()
742 abspats = []
742 abspats = []
743 for kindpat in pats:
743 for kindpat in pats:
744 kind, pat = matchmod._patsplit(kindpat, None)
744 kind, pat = matchmod._patsplit(kindpat, None)
745 if kind in matchmod.cwdrelativepatternkinds or kind is None:
745 if kind in matchmod.cwdrelativepatternkinds or kind is None:
746 ap = (kind + b':' if kind else b'') + pathutil.canonpath(
746 ap = (kind + b':' if kind else b'') + pathutil.canonpath(
747 root, cwd, pat
747 root, cwd, pat
748 )
748 )
749 abspats.append(ap)
749 abspats.append(ap)
750 else:
750 else:
751 abspats.append(kindpat)
751 abspats.append(kindpat)
752 pats = abspats
752 pats = abspats
753
753
754 if include:
754 if include:
755 newinclude.update(pats)
755 newinclude.update(pats)
756 elif exclude:
756 elif exclude:
757 newexclude.update(pats)
757 newexclude.update(pats)
758 elif enableprofile:
758 elif enableprofile:
759 newprofiles.update(pats)
759 newprofiles.update(pats)
760 elif disableprofile:
760 elif disableprofile:
761 newprofiles.difference_update(pats)
761 newprofiles.difference_update(pats)
762 elif delete:
762 elif delete:
763 newinclude.difference_update(pats)
763 newinclude.difference_update(pats)
764 newexclude.difference_update(pats)
764 newexclude.difference_update(pats)
765
765
766 profilecount = len(newprofiles - oldprofiles) - len(
766 profilecount = len(newprofiles - oldprofiles) - len(
767 oldprofiles - newprofiles
767 oldprofiles - newprofiles
768 )
768 )
769 includecount = len(newinclude - oldinclude) - len(
769 includecount = len(newinclude - oldinclude) - len(
770 oldinclude - newinclude
770 oldinclude - newinclude
771 )
771 )
772 excludecount = len(newexclude - oldexclude) - len(
772 excludecount = len(newexclude - oldexclude) - len(
773 oldexclude - newexclude
773 oldexclude - newexclude
774 )
774 )
775
775
776 fcounts = map(
776 fcounts = map(
777 len,
777 len,
778 _updateconfigandrefreshwdir(
778 _updateconfigandrefreshwdir(
779 repo,
779 repo,
780 newinclude,
780 newinclude,
781 newexclude,
781 newexclude,
782 newprofiles,
782 newprofiles,
783 force=force,
783 force=force,
784 removing=reset,
784 removing=reset,
785 ),
785 ),
786 )
786 )
787
787
788 printchanges(
788 printchanges(
789 repo.ui, opts, profilecount, includecount, excludecount, *fcounts
789 repo.ui, opts, profilecount, includecount, excludecount, *fcounts
790 )
790 )
791
791
792
792
793 def printchanges(
793 def printchanges(
794 ui,
794 ui,
795 opts,
795 opts,
796 profilecount=0,
796 profilecount=0,
797 includecount=0,
797 includecount=0,
798 excludecount=0,
798 excludecount=0,
799 added=0,
799 added=0,
800 dropped=0,
800 dropped=0,
801 conflicting=0,
801 conflicting=0,
802 ):
802 ):
803 """Print output summarizing sparse config changes."""
803 """Print output summarizing sparse config changes."""
804 with ui.formatter(b'sparse', opts) as fm:
804 with ui.formatter(b'sparse', opts) as fm:
805 fm.startitem()
805 fm.startitem()
806 fm.condwrite(
806 fm.condwrite(
807 ui.verbose,
807 ui.verbose,
808 b'profiles_added',
808 b'profiles_added',
809 _(b'Profiles changed: %d\n'),
809 _(b'Profiles changed: %d\n'),
810 profilecount,
810 profilecount,
811 )
811 )
812 fm.condwrite(
812 fm.condwrite(
813 ui.verbose,
813 ui.verbose,
814 b'include_rules_added',
814 b'include_rules_added',
815 _(b'Include rules changed: %d\n'),
815 _(b'Include rules changed: %d\n'),
816 includecount,
816 includecount,
817 )
817 )
818 fm.condwrite(
818 fm.condwrite(
819 ui.verbose,
819 ui.verbose,
820 b'exclude_rules_added',
820 b'exclude_rules_added',
821 _(b'Exclude rules changed: %d\n'),
821 _(b'Exclude rules changed: %d\n'),
822 excludecount,
822 excludecount,
823 )
823 )
824
824
825 # In 'plain' verbose mode, mergemod.applyupdates already outputs what
825 # In 'plain' verbose mode, mergemod.applyupdates already outputs what
826 # files are added or removed outside of the templating formatter
826 # files are added or removed outside of the templating formatter
827 # framework. No point in repeating ourselves in that case.
827 # framework. No point in repeating ourselves in that case.
828 if not fm.isplain():
828 if not fm.isplain():
829 fm.condwrite(
829 fm.condwrite(
830 ui.verbose, b'files_added', _(b'Files added: %d\n'), added
830 ui.verbose, b'files_added', _(b'Files added: %d\n'), added
831 )
831 )
832 fm.condwrite(
832 fm.condwrite(
833 ui.verbose, b'files_dropped', _(b'Files dropped: %d\n'), dropped
833 ui.verbose, b'files_dropped', _(b'Files dropped: %d\n'), dropped
834 )
834 )
835 fm.condwrite(
835 fm.condwrite(
836 ui.verbose,
836 ui.verbose,
837 b'files_conflicting',
837 b'files_conflicting',
838 _(b'Files conflicting: %d\n'),
838 _(b'Files conflicting: %d\n'),
839 conflicting,
839 conflicting,
840 )
840 )
@@ -1,25 +1,23 b''
1 Same with share-safe
1 Same with share-safe
2
2
3 $ echo "[format]" >> $HGRCPATH
3 $ echo "[format]" >> $HGRCPATH
4 $ echo "use-share-safe = True" >> $HGRCPATH
4 $ echo "use-share-safe = True" >> $HGRCPATH
5
5
6 $ cd $TESTTMP
6 $ cd $TESTTMP
7
7
8 $ hg init myrepo
8 $ hg init myrepo
9 $ cd myrepo
9 $ cd myrepo
10 $ cat > .hg/hgrc <<EOF
10 $ cat > .hg/hgrc <<EOF
11 > [extensions]
11 > [extensions]
12 > sparse=
12 > sparse=
13 > EOF
13 > EOF
14
14
15 $ echo a > show
15 $ echo a > show
16 $ echo x > hide
16 $ echo x > hide
17 $ hg ci -Aqm 'initial'
17 $ hg ci -Aqm 'initial'
18
18
19 Verify basic --include
19 Regression test: checks that this command correctly locks the store
20 before updating the store [requirements] config.
20
21
21 $ hg up -q 0
22 $ hg up -q 0
22 $ hg debugsparse --include 'hide'
23 $ hg debugsparse --include 'hide'
23 devel-warn: write with no lock: "requires" at: *mercurial/scmutil.py:1558 (writerequires) (glob)
24
25 TODO: bug in sparse when used together with safe-share^
General Comments 0
You need to be logged in to leave comments. Login now