##// END OF EJS Templates
windows: use abspath in mercurial/hg.py...
marmoute -
r48426:1fdf315e default
parent child Browse files
Show More
@@ -1,1590 +1,1590 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import os
12 import os
13 import shutil
13 import shutil
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import (
17 from .node import (
18 hex,
18 hex,
19 sha1nodeconstants,
19 sha1nodeconstants,
20 short,
20 short,
21 )
21 )
22 from .pycompat import getattr
22 from .pycompat import getattr
23
23
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 bundlerepo,
26 bundlerepo,
27 cmdutil,
27 cmdutil,
28 destutil,
28 destutil,
29 discovery,
29 discovery,
30 error,
30 error,
31 exchange,
31 exchange,
32 extensions,
32 extensions,
33 graphmod,
33 graphmod,
34 httppeer,
34 httppeer,
35 localrepo,
35 localrepo,
36 lock,
36 lock,
37 logcmdutil,
37 logcmdutil,
38 logexchange,
38 logexchange,
39 merge as mergemod,
39 merge as mergemod,
40 mergestate as mergestatemod,
40 mergestate as mergestatemod,
41 narrowspec,
41 narrowspec,
42 phases,
42 phases,
43 requirements,
43 requirements,
44 scmutil,
44 scmutil,
45 sshpeer,
45 sshpeer,
46 statichttprepo,
46 statichttprepo,
47 ui as uimod,
47 ui as uimod,
48 unionrepo,
48 unionrepo,
49 url,
49 url,
50 util,
50 util,
51 verify as verifymod,
51 verify as verifymod,
52 vfs as vfsmod,
52 vfs as vfsmod,
53 )
53 )
54 from .interfaces import repository as repositorymod
54 from .interfaces import repository as repositorymod
55 from .utils import (
55 from .utils import (
56 hashutil,
56 hashutil,
57 stringutil,
57 stringutil,
58 urlutil,
58 urlutil,
59 )
59 )
60
60
61
61
62 release = lock.release
62 release = lock.release
63
63
64 # shared features
64 # shared features
65 sharedbookmarks = b'bookmarks'
65 sharedbookmarks = b'bookmarks'
66
66
67
67
68 def _local(path):
68 def _local(path):
69 path = util.expandpath(urlutil.urllocalpath(path))
69 path = util.expandpath(urlutil.urllocalpath(path))
70
70
71 try:
71 try:
72 # we use os.stat() directly here instead of os.path.isfile()
72 # we use os.stat() directly here instead of os.path.isfile()
73 # because the latter started returning `False` on invalid path
73 # because the latter started returning `False` on invalid path
74 # exceptions starting in 3.8 and we care about handling
74 # exceptions starting in 3.8 and we care about handling
75 # invalid paths specially here.
75 # invalid paths specially here.
76 st = os.stat(path)
76 st = os.stat(path)
77 isfile = stat.S_ISREG(st.st_mode)
77 isfile = stat.S_ISREG(st.st_mode)
78 # Python 2 raises TypeError, Python 3 ValueError.
78 # Python 2 raises TypeError, Python 3 ValueError.
79 except (TypeError, ValueError) as e:
79 except (TypeError, ValueError) as e:
80 raise error.Abort(
80 raise error.Abort(
81 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
81 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
82 )
82 )
83 except OSError:
83 except OSError:
84 isfile = False
84 isfile = False
85
85
86 return isfile and bundlerepo or localrepo
86 return isfile and bundlerepo or localrepo
87
87
88
88
89 def addbranchrevs(lrepo, other, branches, revs):
89 def addbranchrevs(lrepo, other, branches, revs):
90 peer = other.peer() # a courtesy to callers using a localrepo for other
90 peer = other.peer() # a courtesy to callers using a localrepo for other
91 hashbranch, branches = branches
91 hashbranch, branches = branches
92 if not hashbranch and not branches:
92 if not hashbranch and not branches:
93 x = revs or None
93 x = revs or None
94 if revs:
94 if revs:
95 y = revs[0]
95 y = revs[0]
96 else:
96 else:
97 y = None
97 y = None
98 return x, y
98 return x, y
99 if revs:
99 if revs:
100 revs = list(revs)
100 revs = list(revs)
101 else:
101 else:
102 revs = []
102 revs = []
103
103
104 if not peer.capable(b'branchmap'):
104 if not peer.capable(b'branchmap'):
105 if branches:
105 if branches:
106 raise error.Abort(_(b"remote branch lookup not supported"))
106 raise error.Abort(_(b"remote branch lookup not supported"))
107 revs.append(hashbranch)
107 revs.append(hashbranch)
108 return revs, revs[0]
108 return revs, revs[0]
109
109
110 with peer.commandexecutor() as e:
110 with peer.commandexecutor() as e:
111 branchmap = e.callcommand(b'branchmap', {}).result()
111 branchmap = e.callcommand(b'branchmap', {}).result()
112
112
113 def primary(branch):
113 def primary(branch):
114 if branch == b'.':
114 if branch == b'.':
115 if not lrepo:
115 if not lrepo:
116 raise error.Abort(_(b"dirstate branch not accessible"))
116 raise error.Abort(_(b"dirstate branch not accessible"))
117 branch = lrepo.dirstate.branch()
117 branch = lrepo.dirstate.branch()
118 if branch in branchmap:
118 if branch in branchmap:
119 revs.extend(hex(r) for r in reversed(branchmap[branch]))
119 revs.extend(hex(r) for r in reversed(branchmap[branch]))
120 return True
120 return True
121 else:
121 else:
122 return False
122 return False
123
123
124 for branch in branches:
124 for branch in branches:
125 if not primary(branch):
125 if not primary(branch):
126 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
126 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
127 if hashbranch:
127 if hashbranch:
128 if not primary(hashbranch):
128 if not primary(hashbranch):
129 revs.append(hashbranch)
129 revs.append(hashbranch)
130 return revs, revs[0]
130 return revs, revs[0]
131
131
132
132
133 def parseurl(path, branches=None):
133 def parseurl(path, branches=None):
134 '''parse url#branch, returning (url, (branch, branches))'''
134 '''parse url#branch, returning (url, (branch, branches))'''
135 msg = b'parseurl(...) moved to mercurial.utils.urlutil'
135 msg = b'parseurl(...) moved to mercurial.utils.urlutil'
136 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
136 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
137 return urlutil.parseurl(path, branches=branches)
137 return urlutil.parseurl(path, branches=branches)
138
138
139
139
140 schemes = {
140 schemes = {
141 b'bundle': bundlerepo,
141 b'bundle': bundlerepo,
142 b'union': unionrepo,
142 b'union': unionrepo,
143 b'file': _local,
143 b'file': _local,
144 b'http': httppeer,
144 b'http': httppeer,
145 b'https': httppeer,
145 b'https': httppeer,
146 b'ssh': sshpeer,
146 b'ssh': sshpeer,
147 b'static-http': statichttprepo,
147 b'static-http': statichttprepo,
148 }
148 }
149
149
150
150
151 def _peerlookup(path):
151 def _peerlookup(path):
152 u = urlutil.url(path)
152 u = urlutil.url(path)
153 scheme = u.scheme or b'file'
153 scheme = u.scheme or b'file'
154 thing = schemes.get(scheme) or schemes[b'file']
154 thing = schemes.get(scheme) or schemes[b'file']
155 try:
155 try:
156 return thing(path)
156 return thing(path)
157 except TypeError:
157 except TypeError:
158 # we can't test callable(thing) because 'thing' can be an unloaded
158 # we can't test callable(thing) because 'thing' can be an unloaded
159 # module that implements __call__
159 # module that implements __call__
160 if not util.safehasattr(thing, b'instance'):
160 if not util.safehasattr(thing, b'instance'):
161 raise
161 raise
162 return thing
162 return thing
163
163
164
164
165 def islocal(repo):
165 def islocal(repo):
166 '''return true if repo (or path pointing to repo) is local'''
166 '''return true if repo (or path pointing to repo) is local'''
167 if isinstance(repo, bytes):
167 if isinstance(repo, bytes):
168 try:
168 try:
169 return _peerlookup(repo).islocal(repo)
169 return _peerlookup(repo).islocal(repo)
170 except AttributeError:
170 except AttributeError:
171 return False
171 return False
172 return repo.local()
172 return repo.local()
173
173
174
174
175 def openpath(ui, path, sendaccept=True):
175 def openpath(ui, path, sendaccept=True):
176 '''open path with open if local, url.open if remote'''
176 '''open path with open if local, url.open if remote'''
177 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
177 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
178 if pathurl.islocal():
178 if pathurl.islocal():
179 return util.posixfile(pathurl.localpath(), b'rb')
179 return util.posixfile(pathurl.localpath(), b'rb')
180 else:
180 else:
181 return url.open(ui, path, sendaccept=sendaccept)
181 return url.open(ui, path, sendaccept=sendaccept)
182
182
183
183
184 # a list of (ui, repo) functions called for wire peer initialization
184 # a list of (ui, repo) functions called for wire peer initialization
185 wirepeersetupfuncs = []
185 wirepeersetupfuncs = []
186
186
187
187
188 def _peerorrepo(
188 def _peerorrepo(
189 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
189 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
190 ):
190 ):
191 """return a repository object for the specified path"""
191 """return a repository object for the specified path"""
192 obj = _peerlookup(path).instance(
192 obj = _peerlookup(path).instance(
193 ui, path, create, intents=intents, createopts=createopts
193 ui, path, create, intents=intents, createopts=createopts
194 )
194 )
195 ui = getattr(obj, "ui", ui)
195 ui = getattr(obj, "ui", ui)
196 for f in presetupfuncs or []:
196 for f in presetupfuncs or []:
197 f(ui, obj)
197 f(ui, obj)
198 ui.log(b'extension', b'- executing reposetup hooks\n')
198 ui.log(b'extension', b'- executing reposetup hooks\n')
199 with util.timedcm('all reposetup') as allreposetupstats:
199 with util.timedcm('all reposetup') as allreposetupstats:
200 for name, module in extensions.extensions(ui):
200 for name, module in extensions.extensions(ui):
201 ui.log(b'extension', b' - running reposetup for %s\n', name)
201 ui.log(b'extension', b' - running reposetup for %s\n', name)
202 hook = getattr(module, 'reposetup', None)
202 hook = getattr(module, 'reposetup', None)
203 if hook:
203 if hook:
204 with util.timedcm('reposetup %r', name) as stats:
204 with util.timedcm('reposetup %r', name) as stats:
205 hook(ui, obj)
205 hook(ui, obj)
206 ui.log(
206 ui.log(
207 b'extension', b' > reposetup for %s took %s\n', name, stats
207 b'extension', b' > reposetup for %s took %s\n', name, stats
208 )
208 )
209 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
209 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
210 if not obj.local():
210 if not obj.local():
211 for f in wirepeersetupfuncs:
211 for f in wirepeersetupfuncs:
212 f(ui, obj)
212 f(ui, obj)
213 return obj
213 return obj
214
214
215
215
216 def repository(
216 def repository(
217 ui,
217 ui,
218 path=b'',
218 path=b'',
219 create=False,
219 create=False,
220 presetupfuncs=None,
220 presetupfuncs=None,
221 intents=None,
221 intents=None,
222 createopts=None,
222 createopts=None,
223 ):
223 ):
224 """return a repository object for the specified path"""
224 """return a repository object for the specified path"""
225 peer = _peerorrepo(
225 peer = _peerorrepo(
226 ui,
226 ui,
227 path,
227 path,
228 create,
228 create,
229 presetupfuncs=presetupfuncs,
229 presetupfuncs=presetupfuncs,
230 intents=intents,
230 intents=intents,
231 createopts=createopts,
231 createopts=createopts,
232 )
232 )
233 repo = peer.local()
233 repo = peer.local()
234 if not repo:
234 if not repo:
235 raise error.Abort(
235 raise error.Abort(
236 _(b"repository '%s' is not local") % (path or peer.url())
236 _(b"repository '%s' is not local") % (path or peer.url())
237 )
237 )
238 return repo.filtered(b'visible')
238 return repo.filtered(b'visible')
239
239
240
240
241 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
241 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
242 '''return a repository peer for the specified path'''
242 '''return a repository peer for the specified path'''
243 rui = remoteui(uiorrepo, opts)
243 rui = remoteui(uiorrepo, opts)
244 return _peerorrepo(
244 return _peerorrepo(
245 rui, path, create, intents=intents, createopts=createopts
245 rui, path, create, intents=intents, createopts=createopts
246 ).peer()
246 ).peer()
247
247
248
248
249 def defaultdest(source):
249 def defaultdest(source):
250 """return default destination of clone if none is given
250 """return default destination of clone if none is given
251
251
252 >>> defaultdest(b'foo')
252 >>> defaultdest(b'foo')
253 'foo'
253 'foo'
254 >>> defaultdest(b'/foo/bar')
254 >>> defaultdest(b'/foo/bar')
255 'bar'
255 'bar'
256 >>> defaultdest(b'/')
256 >>> defaultdest(b'/')
257 ''
257 ''
258 >>> defaultdest(b'')
258 >>> defaultdest(b'')
259 ''
259 ''
260 >>> defaultdest(b'http://example.org/')
260 >>> defaultdest(b'http://example.org/')
261 ''
261 ''
262 >>> defaultdest(b'http://example.org/foo/')
262 >>> defaultdest(b'http://example.org/foo/')
263 'foo'
263 'foo'
264 """
264 """
265 path = urlutil.url(source).path
265 path = urlutil.url(source).path
266 if not path:
266 if not path:
267 return b''
267 return b''
268 return os.path.basename(os.path.normpath(path))
268 return os.path.basename(os.path.normpath(path))
269
269
270
270
271 def sharedreposource(repo):
271 def sharedreposource(repo):
272 """Returns repository object for source repository of a shared repo.
272 """Returns repository object for source repository of a shared repo.
273
273
274 If repo is not a shared repository, returns None.
274 If repo is not a shared repository, returns None.
275 """
275 """
276 if repo.sharedpath == repo.path:
276 if repo.sharedpath == repo.path:
277 return None
277 return None
278
278
279 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
279 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
280 return repo.srcrepo
280 return repo.srcrepo
281
281
282 # the sharedpath always ends in the .hg; we want the path to the repo
282 # the sharedpath always ends in the .hg; we want the path to the repo
283 source = repo.vfs.split(repo.sharedpath)[0]
283 source = repo.vfs.split(repo.sharedpath)[0]
284 srcurl, branches = urlutil.parseurl(source)
284 srcurl, branches = urlutil.parseurl(source)
285 srcrepo = repository(repo.ui, srcurl)
285 srcrepo = repository(repo.ui, srcurl)
286 repo.srcrepo = srcrepo
286 repo.srcrepo = srcrepo
287 return srcrepo
287 return srcrepo
288
288
289
289
290 def share(
290 def share(
291 ui,
291 ui,
292 source,
292 source,
293 dest=None,
293 dest=None,
294 update=True,
294 update=True,
295 bookmarks=True,
295 bookmarks=True,
296 defaultpath=None,
296 defaultpath=None,
297 relative=False,
297 relative=False,
298 ):
298 ):
299 '''create a shared repository'''
299 '''create a shared repository'''
300
300
301 if not islocal(source):
301 if not islocal(source):
302 raise error.Abort(_(b'can only share local repositories'))
302 raise error.Abort(_(b'can only share local repositories'))
303
303
304 if not dest:
304 if not dest:
305 dest = defaultdest(source)
305 dest = defaultdest(source)
306 else:
306 else:
307 dest = urlutil.get_clone_path(ui, dest)[1]
307 dest = urlutil.get_clone_path(ui, dest)[1]
308
308
309 if isinstance(source, bytes):
309 if isinstance(source, bytes):
310 origsource, source, branches = urlutil.get_clone_path(ui, source)
310 origsource, source, branches = urlutil.get_clone_path(ui, source)
311 srcrepo = repository(ui, source)
311 srcrepo = repository(ui, source)
312 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
312 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
313 else:
313 else:
314 srcrepo = source.local()
314 srcrepo = source.local()
315 checkout = None
315 checkout = None
316
316
317 shareditems = set()
317 shareditems = set()
318 if bookmarks:
318 if bookmarks:
319 shareditems.add(sharedbookmarks)
319 shareditems.add(sharedbookmarks)
320
320
321 r = repository(
321 r = repository(
322 ui,
322 ui,
323 dest,
323 dest,
324 create=True,
324 create=True,
325 createopts={
325 createopts={
326 b'sharedrepo': srcrepo,
326 b'sharedrepo': srcrepo,
327 b'sharedrelative': relative,
327 b'sharedrelative': relative,
328 b'shareditems': shareditems,
328 b'shareditems': shareditems,
329 },
329 },
330 )
330 )
331
331
332 postshare(srcrepo, r, defaultpath=defaultpath)
332 postshare(srcrepo, r, defaultpath=defaultpath)
333 r = repository(ui, dest)
333 r = repository(ui, dest)
334 _postshareupdate(r, update, checkout=checkout)
334 _postshareupdate(r, update, checkout=checkout)
335 return r
335 return r
336
336
337
337
338 def _prependsourcehgrc(repo):
338 def _prependsourcehgrc(repo):
339 """copies the source repo config and prepend it in current repo .hg/hgrc
339 """copies the source repo config and prepend it in current repo .hg/hgrc
340 on unshare. This is only done if the share was perfomed using share safe
340 on unshare. This is only done if the share was perfomed using share safe
341 method where we share config of source in shares"""
341 method where we share config of source in shares"""
342 srcvfs = vfsmod.vfs(repo.sharedpath)
342 srcvfs = vfsmod.vfs(repo.sharedpath)
343 dstvfs = vfsmod.vfs(repo.path)
343 dstvfs = vfsmod.vfs(repo.path)
344
344
345 if not srcvfs.exists(b'hgrc'):
345 if not srcvfs.exists(b'hgrc'):
346 return
346 return
347
347
348 currentconfig = b''
348 currentconfig = b''
349 if dstvfs.exists(b'hgrc'):
349 if dstvfs.exists(b'hgrc'):
350 currentconfig = dstvfs.read(b'hgrc')
350 currentconfig = dstvfs.read(b'hgrc')
351
351
352 with dstvfs(b'hgrc', b'wb') as fp:
352 with dstvfs(b'hgrc', b'wb') as fp:
353 sourceconfig = srcvfs.read(b'hgrc')
353 sourceconfig = srcvfs.read(b'hgrc')
354 fp.write(b"# Config copied from shared source\n")
354 fp.write(b"# Config copied from shared source\n")
355 fp.write(sourceconfig)
355 fp.write(sourceconfig)
356 fp.write(b'\n')
356 fp.write(b'\n')
357 fp.write(currentconfig)
357 fp.write(currentconfig)
358
358
359
359
360 def unshare(ui, repo):
360 def unshare(ui, repo):
361 """convert a shared repository to a normal one
361 """convert a shared repository to a normal one
362
362
363 Copy the store data to the repo and remove the sharedpath data.
363 Copy the store data to the repo and remove the sharedpath data.
364
364
365 Returns a new repository object representing the unshared repository.
365 Returns a new repository object representing the unshared repository.
366
366
367 The passed repository object is not usable after this function is
367 The passed repository object is not usable after this function is
368 called.
368 called.
369 """
369 """
370
370
371 with repo.lock():
371 with repo.lock():
372 # we use locks here because if we race with commit, we
372 # we use locks here because if we race with commit, we
373 # can end up with extra data in the cloned revlogs that's
373 # can end up with extra data in the cloned revlogs that's
374 # not pointed to by changesets, thus causing verify to
374 # not pointed to by changesets, thus causing verify to
375 # fail
375 # fail
376 destlock = copystore(ui, repo, repo.path)
376 destlock = copystore(ui, repo, repo.path)
377 with destlock or util.nullcontextmanager():
377 with destlock or util.nullcontextmanager():
378 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
378 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
379 # we were sharing .hg/hgrc of the share source with the current
379 # we were sharing .hg/hgrc of the share source with the current
380 # repo. We need to copy that while unsharing otherwise it can
380 # repo. We need to copy that while unsharing otherwise it can
381 # disable hooks and other checks
381 # disable hooks and other checks
382 _prependsourcehgrc(repo)
382 _prependsourcehgrc(repo)
383
383
384 sharefile = repo.vfs.join(b'sharedpath')
384 sharefile = repo.vfs.join(b'sharedpath')
385 util.rename(sharefile, sharefile + b'.old')
385 util.rename(sharefile, sharefile + b'.old')
386
386
387 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
387 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
388 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
388 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
389 scmutil.writereporequirements(repo)
389 scmutil.writereporequirements(repo)
390
390
391 # Removing share changes some fundamental properties of the repo instance.
391 # Removing share changes some fundamental properties of the repo instance.
392 # So we instantiate a new repo object and operate on it rather than
392 # So we instantiate a new repo object and operate on it rather than
393 # try to keep the existing repo usable.
393 # try to keep the existing repo usable.
394 newrepo = repository(repo.baseui, repo.root, create=False)
394 newrepo = repository(repo.baseui, repo.root, create=False)
395
395
396 # TODO: figure out how to access subrepos that exist, but were previously
396 # TODO: figure out how to access subrepos that exist, but were previously
397 # removed from .hgsub
397 # removed from .hgsub
398 c = newrepo[b'.']
398 c = newrepo[b'.']
399 subs = c.substate
399 subs = c.substate
400 for s in sorted(subs):
400 for s in sorted(subs):
401 c.sub(s).unshare()
401 c.sub(s).unshare()
402
402
403 localrepo.poisonrepository(repo)
403 localrepo.poisonrepository(repo)
404
404
405 return newrepo
405 return newrepo
406
406
407
407
408 def postshare(sourcerepo, destrepo, defaultpath=None):
408 def postshare(sourcerepo, destrepo, defaultpath=None):
409 """Called after a new shared repo is created.
409 """Called after a new shared repo is created.
410
410
411 The new repo only has a requirements file and pointer to the source.
411 The new repo only has a requirements file and pointer to the source.
412 This function configures additional shared data.
412 This function configures additional shared data.
413
413
414 Extensions can wrap this function and write additional entries to
414 Extensions can wrap this function and write additional entries to
415 destrepo/.hg/shared to indicate additional pieces of data to be shared.
415 destrepo/.hg/shared to indicate additional pieces of data to be shared.
416 """
416 """
417 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
417 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
418 if default:
418 if default:
419 template = b'[paths]\ndefault = %s\n'
419 template = b'[paths]\ndefault = %s\n'
420 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
420 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
421 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
421 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
422 with destrepo.wlock():
422 with destrepo.wlock():
423 narrowspec.copytoworkingcopy(destrepo)
423 narrowspec.copytoworkingcopy(destrepo)
424
424
425
425
426 def _postshareupdate(repo, update, checkout=None):
426 def _postshareupdate(repo, update, checkout=None):
427 """Maybe perform a working directory update after a shared repo is created.
427 """Maybe perform a working directory update after a shared repo is created.
428
428
429 ``update`` can be a boolean or a revision to update to.
429 ``update`` can be a boolean or a revision to update to.
430 """
430 """
431 if not update:
431 if not update:
432 return
432 return
433
433
434 repo.ui.status(_(b"updating working directory\n"))
434 repo.ui.status(_(b"updating working directory\n"))
435 if update is not True:
435 if update is not True:
436 checkout = update
436 checkout = update
437 for test in (checkout, b'default', b'tip'):
437 for test in (checkout, b'default', b'tip'):
438 if test is None:
438 if test is None:
439 continue
439 continue
440 try:
440 try:
441 uprev = repo.lookup(test)
441 uprev = repo.lookup(test)
442 break
442 break
443 except error.RepoLookupError:
443 except error.RepoLookupError:
444 continue
444 continue
445 _update(repo, uprev)
445 _update(repo, uprev)
446
446
447
447
448 def copystore(ui, srcrepo, destpath):
448 def copystore(ui, srcrepo, destpath):
449 """copy files from store of srcrepo in destpath
449 """copy files from store of srcrepo in destpath
450
450
451 returns destlock
451 returns destlock
452 """
452 """
453 destlock = None
453 destlock = None
454 try:
454 try:
455 hardlink = None
455 hardlink = None
456 topic = _(b'linking') if hardlink else _(b'copying')
456 topic = _(b'linking') if hardlink else _(b'copying')
457 with ui.makeprogress(topic, unit=_(b'files')) as progress:
457 with ui.makeprogress(topic, unit=_(b'files')) as progress:
458 num = 0
458 num = 0
459 srcpublishing = srcrepo.publishing()
459 srcpublishing = srcrepo.publishing()
460 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
460 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
461 dstvfs = vfsmod.vfs(destpath)
461 dstvfs = vfsmod.vfs(destpath)
462 for f in srcrepo.store.copylist():
462 for f in srcrepo.store.copylist():
463 if srcpublishing and f.endswith(b'phaseroots'):
463 if srcpublishing and f.endswith(b'phaseroots'):
464 continue
464 continue
465 dstbase = os.path.dirname(f)
465 dstbase = os.path.dirname(f)
466 if dstbase and not dstvfs.exists(dstbase):
466 if dstbase and not dstvfs.exists(dstbase):
467 dstvfs.mkdir(dstbase)
467 dstvfs.mkdir(dstbase)
468 if srcvfs.exists(f):
468 if srcvfs.exists(f):
469 if f.endswith(b'data'):
469 if f.endswith(b'data'):
470 # 'dstbase' may be empty (e.g. revlog format 0)
470 # 'dstbase' may be empty (e.g. revlog format 0)
471 lockfile = os.path.join(dstbase, b"lock")
471 lockfile = os.path.join(dstbase, b"lock")
472 # lock to avoid premature writing to the target
472 # lock to avoid premature writing to the target
473 destlock = lock.lock(dstvfs, lockfile)
473 destlock = lock.lock(dstvfs, lockfile)
474 hardlink, n = util.copyfiles(
474 hardlink, n = util.copyfiles(
475 srcvfs.join(f), dstvfs.join(f), hardlink, progress
475 srcvfs.join(f), dstvfs.join(f), hardlink, progress
476 )
476 )
477 num += n
477 num += n
478 if hardlink:
478 if hardlink:
479 ui.debug(b"linked %d files\n" % num)
479 ui.debug(b"linked %d files\n" % num)
480 else:
480 else:
481 ui.debug(b"copied %d files\n" % num)
481 ui.debug(b"copied %d files\n" % num)
482 return destlock
482 return destlock
483 except: # re-raises
483 except: # re-raises
484 release(destlock)
484 release(destlock)
485 raise
485 raise
486
486
487
487
488 def clonewithshare(
488 def clonewithshare(
489 ui,
489 ui,
490 peeropts,
490 peeropts,
491 sharepath,
491 sharepath,
492 source,
492 source,
493 srcpeer,
493 srcpeer,
494 dest,
494 dest,
495 pull=False,
495 pull=False,
496 rev=None,
496 rev=None,
497 update=True,
497 update=True,
498 stream=False,
498 stream=False,
499 ):
499 ):
500 """Perform a clone using a shared repo.
500 """Perform a clone using a shared repo.
501
501
502 The store for the repository will be located at <sharepath>/.hg. The
502 The store for the repository will be located at <sharepath>/.hg. The
503 specified revisions will be cloned or pulled from "source". A shared repo
503 specified revisions will be cloned or pulled from "source". A shared repo
504 will be created at "dest" and a working copy will be created if "update" is
504 will be created at "dest" and a working copy will be created if "update" is
505 True.
505 True.
506 """
506 """
507 revs = None
507 revs = None
508 if rev:
508 if rev:
509 if not srcpeer.capable(b'lookup'):
509 if not srcpeer.capable(b'lookup'):
510 raise error.Abort(
510 raise error.Abort(
511 _(
511 _(
512 b"src repository does not support "
512 b"src repository does not support "
513 b"revision lookup and so doesn't "
513 b"revision lookup and so doesn't "
514 b"support clone by revision"
514 b"support clone by revision"
515 )
515 )
516 )
516 )
517
517
518 # TODO this is batchable.
518 # TODO this is batchable.
519 remoterevs = []
519 remoterevs = []
520 for r in rev:
520 for r in rev:
521 with srcpeer.commandexecutor() as e:
521 with srcpeer.commandexecutor() as e:
522 remoterevs.append(
522 remoterevs.append(
523 e.callcommand(
523 e.callcommand(
524 b'lookup',
524 b'lookup',
525 {
525 {
526 b'key': r,
526 b'key': r,
527 },
527 },
528 ).result()
528 ).result()
529 )
529 )
530 revs = remoterevs
530 revs = remoterevs
531
531
532 # Obtain a lock before checking for or cloning the pooled repo otherwise
532 # Obtain a lock before checking for or cloning the pooled repo otherwise
533 # 2 clients may race creating or populating it.
533 # 2 clients may race creating or populating it.
534 pooldir = os.path.dirname(sharepath)
534 pooldir = os.path.dirname(sharepath)
535 # lock class requires the directory to exist.
535 # lock class requires the directory to exist.
536 try:
536 try:
537 util.makedir(pooldir, False)
537 util.makedir(pooldir, False)
538 except OSError as e:
538 except OSError as e:
539 if e.errno != errno.EEXIST:
539 if e.errno != errno.EEXIST:
540 raise
540 raise
541
541
542 poolvfs = vfsmod.vfs(pooldir)
542 poolvfs = vfsmod.vfs(pooldir)
543 basename = os.path.basename(sharepath)
543 basename = os.path.basename(sharepath)
544
544
545 with lock.lock(poolvfs, b'%s.lock' % basename):
545 with lock.lock(poolvfs, b'%s.lock' % basename):
546 if os.path.exists(sharepath):
546 if os.path.exists(sharepath):
547 ui.status(
547 ui.status(
548 _(b'(sharing from existing pooled repository %s)\n') % basename
548 _(b'(sharing from existing pooled repository %s)\n') % basename
549 )
549 )
550 else:
550 else:
551 ui.status(
551 ui.status(
552 _(b'(sharing from new pooled repository %s)\n') % basename
552 _(b'(sharing from new pooled repository %s)\n') % basename
553 )
553 )
554 # Always use pull mode because hardlinks in share mode don't work
554 # Always use pull mode because hardlinks in share mode don't work
555 # well. Never update because working copies aren't necessary in
555 # well. Never update because working copies aren't necessary in
556 # share mode.
556 # share mode.
557 clone(
557 clone(
558 ui,
558 ui,
559 peeropts,
559 peeropts,
560 source,
560 source,
561 dest=sharepath,
561 dest=sharepath,
562 pull=True,
562 pull=True,
563 revs=rev,
563 revs=rev,
564 update=False,
564 update=False,
565 stream=stream,
565 stream=stream,
566 )
566 )
567
567
568 # Resolve the value to put in [paths] section for the source.
568 # Resolve the value to put in [paths] section for the source.
569 if islocal(source):
569 if islocal(source):
570 defaultpath = os.path.abspath(urlutil.urllocalpath(source))
570 defaultpath = util.abspath(urlutil.urllocalpath(source))
571 else:
571 else:
572 defaultpath = source
572 defaultpath = source
573
573
574 sharerepo = repository(ui, path=sharepath)
574 sharerepo = repository(ui, path=sharepath)
575 destrepo = share(
575 destrepo = share(
576 ui,
576 ui,
577 sharerepo,
577 sharerepo,
578 dest=dest,
578 dest=dest,
579 update=False,
579 update=False,
580 bookmarks=False,
580 bookmarks=False,
581 defaultpath=defaultpath,
581 defaultpath=defaultpath,
582 )
582 )
583
583
584 # We need to perform a pull against the dest repo to fetch bookmarks
584 # We need to perform a pull against the dest repo to fetch bookmarks
585 # and other non-store data that isn't shared by default. In the case of
585 # and other non-store data that isn't shared by default. In the case of
586 # non-existing shared repo, this means we pull from the remote twice. This
586 # non-existing shared repo, this means we pull from the remote twice. This
587 # is a bit weird. But at the time it was implemented, there wasn't an easy
587 # is a bit weird. But at the time it was implemented, there wasn't an easy
588 # way to pull just non-changegroup data.
588 # way to pull just non-changegroup data.
589 exchange.pull(destrepo, srcpeer, heads=revs)
589 exchange.pull(destrepo, srcpeer, heads=revs)
590
590
591 _postshareupdate(destrepo, update)
591 _postshareupdate(destrepo, update)
592
592
593 return srcpeer, peer(ui, peeropts, dest)
593 return srcpeer, peer(ui, peeropts, dest)
594
594
595
595
596 # Recomputing caches is often slow on big repos, so copy them.
596 # Recomputing caches is often slow on big repos, so copy them.
597 def _copycache(srcrepo, dstcachedir, fname):
597 def _copycache(srcrepo, dstcachedir, fname):
598 """copy a cache from srcrepo to destcachedir (if it exists)"""
598 """copy a cache from srcrepo to destcachedir (if it exists)"""
599 srcfname = srcrepo.cachevfs.join(fname)
599 srcfname = srcrepo.cachevfs.join(fname)
600 dstfname = os.path.join(dstcachedir, fname)
600 dstfname = os.path.join(dstcachedir, fname)
601 if os.path.exists(srcfname):
601 if os.path.exists(srcfname):
602 if not os.path.exists(dstcachedir):
602 if not os.path.exists(dstcachedir):
603 os.mkdir(dstcachedir)
603 os.mkdir(dstcachedir)
604 util.copyfile(srcfname, dstfname)
604 util.copyfile(srcfname, dstfname)
605
605
606
606
607 def clone(
607 def clone(
608 ui,
608 ui,
609 peeropts,
609 peeropts,
610 source,
610 source,
611 dest=None,
611 dest=None,
612 pull=False,
612 pull=False,
613 revs=None,
613 revs=None,
614 update=True,
614 update=True,
615 stream=False,
615 stream=False,
616 branch=None,
616 branch=None,
617 shareopts=None,
617 shareopts=None,
618 storeincludepats=None,
618 storeincludepats=None,
619 storeexcludepats=None,
619 storeexcludepats=None,
620 depth=None,
620 depth=None,
621 ):
621 ):
622 """Make a copy of an existing repository.
622 """Make a copy of an existing repository.
623
623
624 Create a copy of an existing repository in a new directory. The
624 Create a copy of an existing repository in a new directory. The
625 source and destination are URLs, as passed to the repository
625 source and destination are URLs, as passed to the repository
626 function. Returns a pair of repository peers, the source and
626 function. Returns a pair of repository peers, the source and
627 newly created destination.
627 newly created destination.
628
628
629 The location of the source is added to the new repository's
629 The location of the source is added to the new repository's
630 .hg/hgrc file, as the default to be used for future pulls and
630 .hg/hgrc file, as the default to be used for future pulls and
631 pushes.
631 pushes.
632
632
633 If an exception is raised, the partly cloned/updated destination
633 If an exception is raised, the partly cloned/updated destination
634 repository will be deleted.
634 repository will be deleted.
635
635
636 Arguments:
636 Arguments:
637
637
638 source: repository object or URL
638 source: repository object or URL
639
639
640 dest: URL of destination repository to create (defaults to base
640 dest: URL of destination repository to create (defaults to base
641 name of source repository)
641 name of source repository)
642
642
643 pull: always pull from source repository, even in local case or if the
643 pull: always pull from source repository, even in local case or if the
644 server prefers streaming
644 server prefers streaming
645
645
646 stream: stream raw data uncompressed from repository (fast over
646 stream: stream raw data uncompressed from repository (fast over
647 LAN, slow over WAN)
647 LAN, slow over WAN)
648
648
649 revs: revision to clone up to (implies pull=True)
649 revs: revision to clone up to (implies pull=True)
650
650
651 update: update working directory after clone completes, if
651 update: update working directory after clone completes, if
652 destination is local repository (True means update to default rev,
652 destination is local repository (True means update to default rev,
653 anything else is treated as a revision)
653 anything else is treated as a revision)
654
654
655 branch: branches to clone
655 branch: branches to clone
656
656
657 shareopts: dict of options to control auto sharing behavior. The "pool" key
657 shareopts: dict of options to control auto sharing behavior. The "pool" key
658 activates auto sharing mode and defines the directory for stores. The
658 activates auto sharing mode and defines the directory for stores. The
659 "mode" key determines how to construct the directory name of the shared
659 "mode" key determines how to construct the directory name of the shared
660 repository. "identity" means the name is derived from the node of the first
660 repository. "identity" means the name is derived from the node of the first
661 changeset in the repository. "remote" means the name is derived from the
661 changeset in the repository. "remote" means the name is derived from the
662 remote's path/URL. Defaults to "identity."
662 remote's path/URL. Defaults to "identity."
663
663
664 storeincludepats and storeexcludepats: sets of file patterns to include and
664 storeincludepats and storeexcludepats: sets of file patterns to include and
665 exclude in the repository copy, respectively. If not defined, all files
665 exclude in the repository copy, respectively. If not defined, all files
666 will be included (a "full" clone). Otherwise a "narrow" clone containing
666 will be included (a "full" clone). Otherwise a "narrow" clone containing
667 only the requested files will be performed. If ``storeincludepats`` is not
667 only the requested files will be performed. If ``storeincludepats`` is not
668 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
668 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
669 ``path:.``. If both are empty sets, no files will be cloned.
669 ``path:.``. If both are empty sets, no files will be cloned.
670 """
670 """
671
671
672 if isinstance(source, bytes):
672 if isinstance(source, bytes):
673 src = urlutil.get_clone_path(ui, source, branch)
673 src = urlutil.get_clone_path(ui, source, branch)
674 origsource, source, branches = src
674 origsource, source, branches = src
675 srcpeer = peer(ui, peeropts, source)
675 srcpeer = peer(ui, peeropts, source)
676 else:
676 else:
677 srcpeer = source.peer() # in case we were called with a localrepo
677 srcpeer = source.peer() # in case we were called with a localrepo
678 branches = (None, branch or [])
678 branches = (None, branch or [])
679 origsource = source = srcpeer.url()
679 origsource = source = srcpeer.url()
680 srclock = destlock = cleandir = None
680 srclock = destlock = cleandir = None
681 destpeer = None
681 destpeer = None
682 try:
682 try:
683 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
683 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
684
684
685 if dest is None:
685 if dest is None:
686 dest = defaultdest(source)
686 dest = defaultdest(source)
687 if dest:
687 if dest:
688 ui.status(_(b"destination directory: %s\n") % dest)
688 ui.status(_(b"destination directory: %s\n") % dest)
689 else:
689 else:
690 dest = urlutil.get_clone_path(ui, dest)[0]
690 dest = urlutil.get_clone_path(ui, dest)[0]
691
691
692 dest = urlutil.urllocalpath(dest)
692 dest = urlutil.urllocalpath(dest)
693 source = urlutil.urllocalpath(source)
693 source = urlutil.urllocalpath(source)
694
694
695 if not dest:
695 if not dest:
696 raise error.InputError(_(b"empty destination path is not valid"))
696 raise error.InputError(_(b"empty destination path is not valid"))
697
697
698 destvfs = vfsmod.vfs(dest, expandpath=True)
698 destvfs = vfsmod.vfs(dest, expandpath=True)
699 if destvfs.lexists():
699 if destvfs.lexists():
700 if not destvfs.isdir():
700 if not destvfs.isdir():
701 raise error.InputError(
701 raise error.InputError(
702 _(b"destination '%s' already exists") % dest
702 _(b"destination '%s' already exists") % dest
703 )
703 )
704 elif destvfs.listdir():
704 elif destvfs.listdir():
705 raise error.InputError(
705 raise error.InputError(
706 _(b"destination '%s' is not empty") % dest
706 _(b"destination '%s' is not empty") % dest
707 )
707 )
708
708
709 createopts = {}
709 createopts = {}
710 narrow = False
710 narrow = False
711
711
712 if storeincludepats is not None:
712 if storeincludepats is not None:
713 narrowspec.validatepatterns(storeincludepats)
713 narrowspec.validatepatterns(storeincludepats)
714 narrow = True
714 narrow = True
715
715
716 if storeexcludepats is not None:
716 if storeexcludepats is not None:
717 narrowspec.validatepatterns(storeexcludepats)
717 narrowspec.validatepatterns(storeexcludepats)
718 narrow = True
718 narrow = True
719
719
720 if narrow:
720 if narrow:
721 # Include everything by default if only exclusion patterns defined.
721 # Include everything by default if only exclusion patterns defined.
722 if storeexcludepats and not storeincludepats:
722 if storeexcludepats and not storeincludepats:
723 storeincludepats = {b'path:.'}
723 storeincludepats = {b'path:.'}
724
724
725 createopts[b'narrowfiles'] = True
725 createopts[b'narrowfiles'] = True
726
726
727 if depth:
727 if depth:
728 createopts[b'shallowfilestore'] = True
728 createopts[b'shallowfilestore'] = True
729
729
730 if srcpeer.capable(b'lfs-serve'):
730 if srcpeer.capable(b'lfs-serve'):
731 # Repository creation honors the config if it disabled the extension, so
731 # Repository creation honors the config if it disabled the extension, so
732 # we can't just announce that lfs will be enabled. This check avoids
732 # we can't just announce that lfs will be enabled. This check avoids
733 # saying that lfs will be enabled, and then saying it's an unknown
733 # saying that lfs will be enabled, and then saying it's an unknown
734 # feature. The lfs creation option is set in either case so that a
734 # feature. The lfs creation option is set in either case so that a
735 # requirement is added. If the extension is explicitly disabled but the
735 # requirement is added. If the extension is explicitly disabled but the
736 # requirement is set, the clone aborts early, before transferring any
736 # requirement is set, the clone aborts early, before transferring any
737 # data.
737 # data.
738 createopts[b'lfs'] = True
738 createopts[b'lfs'] = True
739
739
740 if extensions.disabled_help(b'lfs'):
740 if extensions.disabled_help(b'lfs'):
741 ui.status(
741 ui.status(
742 _(
742 _(
743 b'(remote is using large file support (lfs), but it is '
743 b'(remote is using large file support (lfs), but it is '
744 b'explicitly disabled in the local configuration)\n'
744 b'explicitly disabled in the local configuration)\n'
745 )
745 )
746 )
746 )
747 else:
747 else:
748 ui.status(
748 ui.status(
749 _(
749 _(
750 b'(remote is using large file support (lfs); lfs will '
750 b'(remote is using large file support (lfs); lfs will '
751 b'be enabled for this repository)\n'
751 b'be enabled for this repository)\n'
752 )
752 )
753 )
753 )
754
754
755 shareopts = shareopts or {}
755 shareopts = shareopts or {}
756 sharepool = shareopts.get(b'pool')
756 sharepool = shareopts.get(b'pool')
757 sharenamemode = shareopts.get(b'mode')
757 sharenamemode = shareopts.get(b'mode')
758 if sharepool and islocal(dest):
758 if sharepool and islocal(dest):
759 sharepath = None
759 sharepath = None
760 if sharenamemode == b'identity':
760 if sharenamemode == b'identity':
761 # Resolve the name from the initial changeset in the remote
761 # Resolve the name from the initial changeset in the remote
762 # repository. This returns nullid when the remote is empty. It
762 # repository. This returns nullid when the remote is empty. It
763 # raises RepoLookupError if revision 0 is filtered or otherwise
763 # raises RepoLookupError if revision 0 is filtered or otherwise
764 # not available. If we fail to resolve, sharing is not enabled.
764 # not available. If we fail to resolve, sharing is not enabled.
765 try:
765 try:
766 with srcpeer.commandexecutor() as e:
766 with srcpeer.commandexecutor() as e:
767 rootnode = e.callcommand(
767 rootnode = e.callcommand(
768 b'lookup',
768 b'lookup',
769 {
769 {
770 b'key': b'0',
770 b'key': b'0',
771 },
771 },
772 ).result()
772 ).result()
773
773
774 if rootnode != sha1nodeconstants.nullid:
774 if rootnode != sha1nodeconstants.nullid:
775 sharepath = os.path.join(sharepool, hex(rootnode))
775 sharepath = os.path.join(sharepool, hex(rootnode))
776 else:
776 else:
777 ui.status(
777 ui.status(
778 _(
778 _(
779 b'(not using pooled storage: '
779 b'(not using pooled storage: '
780 b'remote appears to be empty)\n'
780 b'remote appears to be empty)\n'
781 )
781 )
782 )
782 )
783 except error.RepoLookupError:
783 except error.RepoLookupError:
784 ui.status(
784 ui.status(
785 _(
785 _(
786 b'(not using pooled storage: '
786 b'(not using pooled storage: '
787 b'unable to resolve identity of remote)\n'
787 b'unable to resolve identity of remote)\n'
788 )
788 )
789 )
789 )
790 elif sharenamemode == b'remote':
790 elif sharenamemode == b'remote':
791 sharepath = os.path.join(
791 sharepath = os.path.join(
792 sharepool, hex(hashutil.sha1(source).digest())
792 sharepool, hex(hashutil.sha1(source).digest())
793 )
793 )
794 else:
794 else:
795 raise error.Abort(
795 raise error.Abort(
796 _(b'unknown share naming mode: %s') % sharenamemode
796 _(b'unknown share naming mode: %s') % sharenamemode
797 )
797 )
798
798
799 # TODO this is a somewhat arbitrary restriction.
799 # TODO this is a somewhat arbitrary restriction.
800 if narrow:
800 if narrow:
801 ui.status(
801 ui.status(
802 _(b'(pooled storage not supported for narrow clones)\n')
802 _(b'(pooled storage not supported for narrow clones)\n')
803 )
803 )
804 sharepath = None
804 sharepath = None
805
805
806 if sharepath:
806 if sharepath:
807 return clonewithshare(
807 return clonewithshare(
808 ui,
808 ui,
809 peeropts,
809 peeropts,
810 sharepath,
810 sharepath,
811 source,
811 source,
812 srcpeer,
812 srcpeer,
813 dest,
813 dest,
814 pull=pull,
814 pull=pull,
815 rev=revs,
815 rev=revs,
816 update=update,
816 update=update,
817 stream=stream,
817 stream=stream,
818 )
818 )
819
819
820 srcrepo = srcpeer.local()
820 srcrepo = srcpeer.local()
821
821
822 abspath = origsource
822 abspath = origsource
823 if islocal(origsource):
823 if islocal(origsource):
824 abspath = os.path.abspath(urlutil.urllocalpath(origsource))
824 abspath = util.abspath(urlutil.urllocalpath(origsource))
825
825
826 if islocal(dest):
826 if islocal(dest):
827 if os.path.exists(dest):
827 if os.path.exists(dest):
828 # only clean up directories we create ourselves
828 # only clean up directories we create ourselves
829 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
829 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
830 cleandir = hgdir
830 cleandir = hgdir
831 else:
831 else:
832 cleandir = dest
832 cleandir = dest
833
833
834 copy = False
834 copy = False
835 if (
835 if (
836 srcrepo
836 srcrepo
837 and srcrepo.cancopy()
837 and srcrepo.cancopy()
838 and islocal(dest)
838 and islocal(dest)
839 and not phases.hassecret(srcrepo)
839 and not phases.hassecret(srcrepo)
840 ):
840 ):
841 copy = not pull and not revs
841 copy = not pull and not revs
842
842
843 # TODO this is a somewhat arbitrary restriction.
843 # TODO this is a somewhat arbitrary restriction.
844 if narrow:
844 if narrow:
845 copy = False
845 copy = False
846
846
847 if copy:
847 if copy:
848 try:
848 try:
849 # we use a lock here because if we race with commit, we
849 # we use a lock here because if we race with commit, we
850 # can end up with extra data in the cloned revlogs that's
850 # can end up with extra data in the cloned revlogs that's
851 # not pointed to by changesets, thus causing verify to
851 # not pointed to by changesets, thus causing verify to
852 # fail
852 # fail
853 srclock = srcrepo.lock(wait=False)
853 srclock = srcrepo.lock(wait=False)
854 except error.LockError:
854 except error.LockError:
855 copy = False
855 copy = False
856
856
857 if copy:
857 if copy:
858 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
858 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
859
859
860 destrootpath = urlutil.urllocalpath(dest)
860 destrootpath = urlutil.urllocalpath(dest)
861 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
861 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
862 localrepo.createrepository(
862 localrepo.createrepository(
863 ui,
863 ui,
864 destrootpath,
864 destrootpath,
865 requirements=dest_reqs,
865 requirements=dest_reqs,
866 )
866 )
867 destrepo = localrepo.makelocalrepository(ui, destrootpath)
867 destrepo = localrepo.makelocalrepository(ui, destrootpath)
868 destlock = destrepo.lock()
868 destlock = destrepo.lock()
869 from . import streamclone # avoid cycle
869 from . import streamclone # avoid cycle
870
870
871 streamclone.local_copy(srcrepo, destrepo)
871 streamclone.local_copy(srcrepo, destrepo)
872
872
873 # we need to re-init the repo after manually copying the data
873 # we need to re-init the repo after manually copying the data
874 # into it
874 # into it
875 destpeer = peer(srcrepo, peeropts, dest)
875 destpeer = peer(srcrepo, peeropts, dest)
876 srcrepo.hook(
876 srcrepo.hook(
877 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
877 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
878 )
878 )
879 else:
879 else:
880 try:
880 try:
881 # only pass ui when no srcrepo
881 # only pass ui when no srcrepo
882 destpeer = peer(
882 destpeer = peer(
883 srcrepo or ui,
883 srcrepo or ui,
884 peeropts,
884 peeropts,
885 dest,
885 dest,
886 create=True,
886 create=True,
887 createopts=createopts,
887 createopts=createopts,
888 )
888 )
889 except OSError as inst:
889 except OSError as inst:
890 if inst.errno == errno.EEXIST:
890 if inst.errno == errno.EEXIST:
891 cleandir = None
891 cleandir = None
892 raise error.Abort(
892 raise error.Abort(
893 _(b"destination '%s' already exists") % dest
893 _(b"destination '%s' already exists") % dest
894 )
894 )
895 raise
895 raise
896
896
897 if revs:
897 if revs:
898 if not srcpeer.capable(b'lookup'):
898 if not srcpeer.capable(b'lookup'):
899 raise error.Abort(
899 raise error.Abort(
900 _(
900 _(
901 b"src repository does not support "
901 b"src repository does not support "
902 b"revision lookup and so doesn't "
902 b"revision lookup and so doesn't "
903 b"support clone by revision"
903 b"support clone by revision"
904 )
904 )
905 )
905 )
906
906
907 # TODO this is batchable.
907 # TODO this is batchable.
908 remoterevs = []
908 remoterevs = []
909 for rev in revs:
909 for rev in revs:
910 with srcpeer.commandexecutor() as e:
910 with srcpeer.commandexecutor() as e:
911 remoterevs.append(
911 remoterevs.append(
912 e.callcommand(
912 e.callcommand(
913 b'lookup',
913 b'lookup',
914 {
914 {
915 b'key': rev,
915 b'key': rev,
916 },
916 },
917 ).result()
917 ).result()
918 )
918 )
919 revs = remoterevs
919 revs = remoterevs
920
920
921 checkout = revs[0]
921 checkout = revs[0]
922 else:
922 else:
923 revs = None
923 revs = None
924 local = destpeer.local()
924 local = destpeer.local()
925 if local:
925 if local:
926 if narrow:
926 if narrow:
927 with local.wlock(), local.lock():
927 with local.wlock(), local.lock():
928 local.setnarrowpats(storeincludepats, storeexcludepats)
928 local.setnarrowpats(storeincludepats, storeexcludepats)
929 narrowspec.copytoworkingcopy(local)
929 narrowspec.copytoworkingcopy(local)
930
930
931 u = urlutil.url(abspath)
931 u = urlutil.url(abspath)
932 defaulturl = bytes(u)
932 defaulturl = bytes(u)
933 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
933 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
934 if not stream:
934 if not stream:
935 if pull:
935 if pull:
936 stream = False
936 stream = False
937 else:
937 else:
938 stream = None
938 stream = None
939 # internal config: ui.quietbookmarkmove
939 # internal config: ui.quietbookmarkmove
940 overrides = {(b'ui', b'quietbookmarkmove'): True}
940 overrides = {(b'ui', b'quietbookmarkmove'): True}
941 with local.ui.configoverride(overrides, b'clone'):
941 with local.ui.configoverride(overrides, b'clone'):
942 exchange.pull(
942 exchange.pull(
943 local,
943 local,
944 srcpeer,
944 srcpeer,
945 revs,
945 revs,
946 streamclonerequested=stream,
946 streamclonerequested=stream,
947 includepats=storeincludepats,
947 includepats=storeincludepats,
948 excludepats=storeexcludepats,
948 excludepats=storeexcludepats,
949 depth=depth,
949 depth=depth,
950 )
950 )
951 elif srcrepo:
951 elif srcrepo:
952 # TODO lift restriction once exchange.push() accepts narrow
952 # TODO lift restriction once exchange.push() accepts narrow
953 # push.
953 # push.
954 if narrow:
954 if narrow:
955 raise error.Abort(
955 raise error.Abort(
956 _(
956 _(
957 b'narrow clone not available for '
957 b'narrow clone not available for '
958 b'remote destinations'
958 b'remote destinations'
959 )
959 )
960 )
960 )
961
961
962 exchange.push(
962 exchange.push(
963 srcrepo,
963 srcrepo,
964 destpeer,
964 destpeer,
965 revs=revs,
965 revs=revs,
966 bookmarks=srcrepo._bookmarks.keys(),
966 bookmarks=srcrepo._bookmarks.keys(),
967 )
967 )
968 else:
968 else:
969 raise error.Abort(
969 raise error.Abort(
970 _(b"clone from remote to remote not supported")
970 _(b"clone from remote to remote not supported")
971 )
971 )
972
972
973 cleandir = None
973 cleandir = None
974
974
975 destrepo = destpeer.local()
975 destrepo = destpeer.local()
976 if destrepo:
976 if destrepo:
977 template = uimod.samplehgrcs[b'cloned']
977 template = uimod.samplehgrcs[b'cloned']
978 u = urlutil.url(abspath)
978 u = urlutil.url(abspath)
979 u.passwd = None
979 u.passwd = None
980 defaulturl = bytes(u)
980 defaulturl = bytes(u)
981 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
981 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
982 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
982 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
983
983
984 if ui.configbool(b'experimental', b'remotenames'):
984 if ui.configbool(b'experimental', b'remotenames'):
985 logexchange.pullremotenames(destrepo, srcpeer)
985 logexchange.pullremotenames(destrepo, srcpeer)
986
986
987 if update:
987 if update:
988 if update is not True:
988 if update is not True:
989 with srcpeer.commandexecutor() as e:
989 with srcpeer.commandexecutor() as e:
990 checkout = e.callcommand(
990 checkout = e.callcommand(
991 b'lookup',
991 b'lookup',
992 {
992 {
993 b'key': update,
993 b'key': update,
994 },
994 },
995 ).result()
995 ).result()
996
996
997 uprev = None
997 uprev = None
998 status = None
998 status = None
999 if checkout is not None:
999 if checkout is not None:
1000 # Some extensions (at least hg-git and hg-subversion) have
1000 # Some extensions (at least hg-git and hg-subversion) have
1001 # a peer.lookup() implementation that returns a name instead
1001 # a peer.lookup() implementation that returns a name instead
1002 # of a nodeid. We work around it here until we've figured
1002 # of a nodeid. We work around it here until we've figured
1003 # out a better solution.
1003 # out a better solution.
1004 if len(checkout) == 20 and checkout in destrepo:
1004 if len(checkout) == 20 and checkout in destrepo:
1005 uprev = checkout
1005 uprev = checkout
1006 elif scmutil.isrevsymbol(destrepo, checkout):
1006 elif scmutil.isrevsymbol(destrepo, checkout):
1007 uprev = scmutil.revsymbol(destrepo, checkout).node()
1007 uprev = scmutil.revsymbol(destrepo, checkout).node()
1008 else:
1008 else:
1009 if update is not True:
1009 if update is not True:
1010 try:
1010 try:
1011 uprev = destrepo.lookup(update)
1011 uprev = destrepo.lookup(update)
1012 except error.RepoLookupError:
1012 except error.RepoLookupError:
1013 pass
1013 pass
1014 if uprev is None:
1014 if uprev is None:
1015 try:
1015 try:
1016 if destrepo._activebookmark:
1016 if destrepo._activebookmark:
1017 uprev = destrepo.lookup(destrepo._activebookmark)
1017 uprev = destrepo.lookup(destrepo._activebookmark)
1018 update = destrepo._activebookmark
1018 update = destrepo._activebookmark
1019 else:
1019 else:
1020 uprev = destrepo._bookmarks[b'@']
1020 uprev = destrepo._bookmarks[b'@']
1021 update = b'@'
1021 update = b'@'
1022 bn = destrepo[uprev].branch()
1022 bn = destrepo[uprev].branch()
1023 if bn == b'default':
1023 if bn == b'default':
1024 status = _(b"updating to bookmark %s\n" % update)
1024 status = _(b"updating to bookmark %s\n" % update)
1025 else:
1025 else:
1026 status = (
1026 status = (
1027 _(b"updating to bookmark %s on branch %s\n")
1027 _(b"updating to bookmark %s on branch %s\n")
1028 ) % (update, bn)
1028 ) % (update, bn)
1029 except KeyError:
1029 except KeyError:
1030 try:
1030 try:
1031 uprev = destrepo.branchtip(b'default')
1031 uprev = destrepo.branchtip(b'default')
1032 except error.RepoLookupError:
1032 except error.RepoLookupError:
1033 uprev = destrepo.lookup(b'tip')
1033 uprev = destrepo.lookup(b'tip')
1034 if not status:
1034 if not status:
1035 bn = destrepo[uprev].branch()
1035 bn = destrepo[uprev].branch()
1036 status = _(b"updating to branch %s\n") % bn
1036 status = _(b"updating to branch %s\n") % bn
1037 destrepo.ui.status(status)
1037 destrepo.ui.status(status)
1038 _update(destrepo, uprev)
1038 _update(destrepo, uprev)
1039 if update in destrepo._bookmarks:
1039 if update in destrepo._bookmarks:
1040 bookmarks.activate(destrepo, update)
1040 bookmarks.activate(destrepo, update)
1041 if destlock is not None:
1041 if destlock is not None:
1042 release(destlock)
1042 release(destlock)
1043 # here is a tiny windows were someone could end up writing the
1043 # here is a tiny windows were someone could end up writing the
1044 # repository before the cache are sure to be warm. This is "fine"
1044 # repository before the cache are sure to be warm. This is "fine"
1045 # as the only "bad" outcome would be some slowness. That potential
1045 # as the only "bad" outcome would be some slowness. That potential
1046 # slowness already affect reader.
1046 # slowness already affect reader.
1047 with destrepo.lock():
1047 with destrepo.lock():
1048 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1048 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1049 finally:
1049 finally:
1050 release(srclock, destlock)
1050 release(srclock, destlock)
1051 if cleandir is not None:
1051 if cleandir is not None:
1052 shutil.rmtree(cleandir, True)
1052 shutil.rmtree(cleandir, True)
1053 if srcpeer is not None:
1053 if srcpeer is not None:
1054 srcpeer.close()
1054 srcpeer.close()
1055 if destpeer and destpeer.local() is None:
1055 if destpeer and destpeer.local() is None:
1056 destpeer.close()
1056 destpeer.close()
1057 return srcpeer, destpeer
1057 return srcpeer, destpeer
1058
1058
1059
1059
1060 def _showstats(repo, stats, quietempty=False):
1060 def _showstats(repo, stats, quietempty=False):
1061 if quietempty and stats.isempty():
1061 if quietempty and stats.isempty():
1062 return
1062 return
1063 repo.ui.status(
1063 repo.ui.status(
1064 _(
1064 _(
1065 b"%d files updated, %d files merged, "
1065 b"%d files updated, %d files merged, "
1066 b"%d files removed, %d files unresolved\n"
1066 b"%d files removed, %d files unresolved\n"
1067 )
1067 )
1068 % (
1068 % (
1069 stats.updatedcount,
1069 stats.updatedcount,
1070 stats.mergedcount,
1070 stats.mergedcount,
1071 stats.removedcount,
1071 stats.removedcount,
1072 stats.unresolvedcount,
1072 stats.unresolvedcount,
1073 )
1073 )
1074 )
1074 )
1075
1075
1076
1076
1077 def updaterepo(repo, node, overwrite, updatecheck=None):
1077 def updaterepo(repo, node, overwrite, updatecheck=None):
1078 """Update the working directory to node.
1078 """Update the working directory to node.
1079
1079
1080 When overwrite is set, changes are clobbered, merged else
1080 When overwrite is set, changes are clobbered, merged else
1081
1081
1082 returns stats (see pydoc mercurial.merge.applyupdates)"""
1082 returns stats (see pydoc mercurial.merge.applyupdates)"""
1083 repo.ui.deprecwarn(
1083 repo.ui.deprecwarn(
1084 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1084 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1085 b'5.7',
1085 b'5.7',
1086 )
1086 )
1087 return mergemod._update(
1087 return mergemod._update(
1088 repo,
1088 repo,
1089 node,
1089 node,
1090 branchmerge=False,
1090 branchmerge=False,
1091 force=overwrite,
1091 force=overwrite,
1092 labels=[b'working copy', b'destination'],
1092 labels=[b'working copy', b'destination'],
1093 updatecheck=updatecheck,
1093 updatecheck=updatecheck,
1094 )
1094 )
1095
1095
1096
1096
1097 def update(repo, node, quietempty=False, updatecheck=None):
1097 def update(repo, node, quietempty=False, updatecheck=None):
1098 """update the working directory to node"""
1098 """update the working directory to node"""
1099 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1099 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1100 _showstats(repo, stats, quietempty)
1100 _showstats(repo, stats, quietempty)
1101 if stats.unresolvedcount:
1101 if stats.unresolvedcount:
1102 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1102 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1103 return stats.unresolvedcount > 0
1103 return stats.unresolvedcount > 0
1104
1104
1105
1105
1106 # naming conflict in clone()
1106 # naming conflict in clone()
1107 _update = update
1107 _update = update
1108
1108
1109
1109
1110 def clean(repo, node, show_stats=True, quietempty=False):
1110 def clean(repo, node, show_stats=True, quietempty=False):
1111 """forcibly switch the working directory to node, clobbering changes"""
1111 """forcibly switch the working directory to node, clobbering changes"""
1112 stats = mergemod.clean_update(repo[node])
1112 stats = mergemod.clean_update(repo[node])
1113 assert stats.unresolvedcount == 0
1113 assert stats.unresolvedcount == 0
1114 if show_stats:
1114 if show_stats:
1115 _showstats(repo, stats, quietempty)
1115 _showstats(repo, stats, quietempty)
1116 return False
1116 return False
1117
1117
1118
1118
1119 # naming conflict in updatetotally()
1119 # naming conflict in updatetotally()
1120 _clean = clean
1120 _clean = clean
1121
1121
1122 _VALID_UPDATECHECKS = {
1122 _VALID_UPDATECHECKS = {
1123 mergemod.UPDATECHECK_ABORT,
1123 mergemod.UPDATECHECK_ABORT,
1124 mergemod.UPDATECHECK_NONE,
1124 mergemod.UPDATECHECK_NONE,
1125 mergemod.UPDATECHECK_LINEAR,
1125 mergemod.UPDATECHECK_LINEAR,
1126 mergemod.UPDATECHECK_NO_CONFLICT,
1126 mergemod.UPDATECHECK_NO_CONFLICT,
1127 }
1127 }
1128
1128
1129
1129
1130 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1130 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1131 """Update the working directory with extra care for non-file components
1131 """Update the working directory with extra care for non-file components
1132
1132
1133 This takes care of non-file components below:
1133 This takes care of non-file components below:
1134
1134
1135 :bookmark: might be advanced or (in)activated
1135 :bookmark: might be advanced or (in)activated
1136
1136
1137 This takes arguments below:
1137 This takes arguments below:
1138
1138
1139 :checkout: to which revision the working directory is updated
1139 :checkout: to which revision the working directory is updated
1140 :brev: a name, which might be a bookmark to be activated after updating
1140 :brev: a name, which might be a bookmark to be activated after updating
1141 :clean: whether changes in the working directory can be discarded
1141 :clean: whether changes in the working directory can be discarded
1142 :updatecheck: how to deal with a dirty working directory
1142 :updatecheck: how to deal with a dirty working directory
1143
1143
1144 Valid values for updatecheck are the UPDATECHECK_* constants
1144 Valid values for updatecheck are the UPDATECHECK_* constants
1145 defined in the merge module. Passing `None` will result in using the
1145 defined in the merge module. Passing `None` will result in using the
1146 configured default.
1146 configured default.
1147
1147
1148 * ABORT: abort if the working directory is dirty
1148 * ABORT: abort if the working directory is dirty
1149 * NONE: don't check (merge working directory changes into destination)
1149 * NONE: don't check (merge working directory changes into destination)
1150 * LINEAR: check that update is linear before merging working directory
1150 * LINEAR: check that update is linear before merging working directory
1151 changes into destination
1151 changes into destination
1152 * NO_CONFLICT: check that the update does not result in file merges
1152 * NO_CONFLICT: check that the update does not result in file merges
1153
1153
1154 This returns whether conflict is detected at updating or not.
1154 This returns whether conflict is detected at updating or not.
1155 """
1155 """
1156 if updatecheck is None:
1156 if updatecheck is None:
1157 updatecheck = ui.config(b'commands', b'update.check')
1157 updatecheck = ui.config(b'commands', b'update.check')
1158 if updatecheck not in _VALID_UPDATECHECKS:
1158 if updatecheck not in _VALID_UPDATECHECKS:
1159 # If not configured, or invalid value configured
1159 # If not configured, or invalid value configured
1160 updatecheck = mergemod.UPDATECHECK_LINEAR
1160 updatecheck = mergemod.UPDATECHECK_LINEAR
1161 if updatecheck not in _VALID_UPDATECHECKS:
1161 if updatecheck not in _VALID_UPDATECHECKS:
1162 raise ValueError(
1162 raise ValueError(
1163 r'Invalid updatecheck value %r (can accept %r)'
1163 r'Invalid updatecheck value %r (can accept %r)'
1164 % (updatecheck, _VALID_UPDATECHECKS)
1164 % (updatecheck, _VALID_UPDATECHECKS)
1165 )
1165 )
1166 with repo.wlock():
1166 with repo.wlock():
1167 movemarkfrom = None
1167 movemarkfrom = None
1168 warndest = False
1168 warndest = False
1169 if checkout is None:
1169 if checkout is None:
1170 updata = destutil.destupdate(repo, clean=clean)
1170 updata = destutil.destupdate(repo, clean=clean)
1171 checkout, movemarkfrom, brev = updata
1171 checkout, movemarkfrom, brev = updata
1172 warndest = True
1172 warndest = True
1173
1173
1174 if clean:
1174 if clean:
1175 ret = _clean(repo, checkout)
1175 ret = _clean(repo, checkout)
1176 else:
1176 else:
1177 if updatecheck == mergemod.UPDATECHECK_ABORT:
1177 if updatecheck == mergemod.UPDATECHECK_ABORT:
1178 cmdutil.bailifchanged(repo, merge=False)
1178 cmdutil.bailifchanged(repo, merge=False)
1179 updatecheck = mergemod.UPDATECHECK_NONE
1179 updatecheck = mergemod.UPDATECHECK_NONE
1180 ret = _update(repo, checkout, updatecheck=updatecheck)
1180 ret = _update(repo, checkout, updatecheck=updatecheck)
1181
1181
1182 if not ret and movemarkfrom:
1182 if not ret and movemarkfrom:
1183 if movemarkfrom == repo[b'.'].node():
1183 if movemarkfrom == repo[b'.'].node():
1184 pass # no-op update
1184 pass # no-op update
1185 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1185 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1186 b = ui.label(repo._activebookmark, b'bookmarks.active')
1186 b = ui.label(repo._activebookmark, b'bookmarks.active')
1187 ui.status(_(b"updating bookmark %s\n") % b)
1187 ui.status(_(b"updating bookmark %s\n") % b)
1188 else:
1188 else:
1189 # this can happen with a non-linear update
1189 # this can happen with a non-linear update
1190 b = ui.label(repo._activebookmark, b'bookmarks')
1190 b = ui.label(repo._activebookmark, b'bookmarks')
1191 ui.status(_(b"(leaving bookmark %s)\n") % b)
1191 ui.status(_(b"(leaving bookmark %s)\n") % b)
1192 bookmarks.deactivate(repo)
1192 bookmarks.deactivate(repo)
1193 elif brev in repo._bookmarks:
1193 elif brev in repo._bookmarks:
1194 if brev != repo._activebookmark:
1194 if brev != repo._activebookmark:
1195 b = ui.label(brev, b'bookmarks.active')
1195 b = ui.label(brev, b'bookmarks.active')
1196 ui.status(_(b"(activating bookmark %s)\n") % b)
1196 ui.status(_(b"(activating bookmark %s)\n") % b)
1197 bookmarks.activate(repo, brev)
1197 bookmarks.activate(repo, brev)
1198 elif brev:
1198 elif brev:
1199 if repo._activebookmark:
1199 if repo._activebookmark:
1200 b = ui.label(repo._activebookmark, b'bookmarks')
1200 b = ui.label(repo._activebookmark, b'bookmarks')
1201 ui.status(_(b"(leaving bookmark %s)\n") % b)
1201 ui.status(_(b"(leaving bookmark %s)\n") % b)
1202 bookmarks.deactivate(repo)
1202 bookmarks.deactivate(repo)
1203
1203
1204 if warndest:
1204 if warndest:
1205 destutil.statusotherdests(ui, repo)
1205 destutil.statusotherdests(ui, repo)
1206
1206
1207 return ret
1207 return ret
1208
1208
1209
1209
1210 def merge(
1210 def merge(
1211 ctx,
1211 ctx,
1212 force=False,
1212 force=False,
1213 remind=True,
1213 remind=True,
1214 labels=None,
1214 labels=None,
1215 ):
1215 ):
1216 """Branch merge with node, resolving changes. Return true if any
1216 """Branch merge with node, resolving changes. Return true if any
1217 unresolved conflicts."""
1217 unresolved conflicts."""
1218 repo = ctx.repo()
1218 repo = ctx.repo()
1219 stats = mergemod.merge(ctx, force=force, labels=labels)
1219 stats = mergemod.merge(ctx, force=force, labels=labels)
1220 _showstats(repo, stats)
1220 _showstats(repo, stats)
1221 if stats.unresolvedcount:
1221 if stats.unresolvedcount:
1222 repo.ui.status(
1222 repo.ui.status(
1223 _(
1223 _(
1224 b"use 'hg resolve' to retry unresolved file merges "
1224 b"use 'hg resolve' to retry unresolved file merges "
1225 b"or 'hg merge --abort' to abandon\n"
1225 b"or 'hg merge --abort' to abandon\n"
1226 )
1226 )
1227 )
1227 )
1228 elif remind:
1228 elif remind:
1229 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1229 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1230 return stats.unresolvedcount > 0
1230 return stats.unresolvedcount > 0
1231
1231
1232
1232
1233 def abortmerge(ui, repo):
1233 def abortmerge(ui, repo):
1234 ms = mergestatemod.mergestate.read(repo)
1234 ms = mergestatemod.mergestate.read(repo)
1235 if ms.active():
1235 if ms.active():
1236 # there were conflicts
1236 # there were conflicts
1237 node = ms.localctx.hex()
1237 node = ms.localctx.hex()
1238 else:
1238 else:
1239 # there were no conficts, mergestate was not stored
1239 # there were no conficts, mergestate was not stored
1240 node = repo[b'.'].hex()
1240 node = repo[b'.'].hex()
1241
1241
1242 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1242 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1243 stats = mergemod.clean_update(repo[node])
1243 stats = mergemod.clean_update(repo[node])
1244 assert stats.unresolvedcount == 0
1244 assert stats.unresolvedcount == 0
1245 _showstats(repo, stats)
1245 _showstats(repo, stats)
1246
1246
1247
1247
1248 def _incoming(
1248 def _incoming(
1249 displaychlist,
1249 displaychlist,
1250 subreporecurse,
1250 subreporecurse,
1251 ui,
1251 ui,
1252 repo,
1252 repo,
1253 source,
1253 source,
1254 opts,
1254 opts,
1255 buffered=False,
1255 buffered=False,
1256 subpath=None,
1256 subpath=None,
1257 ):
1257 ):
1258 """
1258 """
1259 Helper for incoming / gincoming.
1259 Helper for incoming / gincoming.
1260 displaychlist gets called with
1260 displaychlist gets called with
1261 (remoterepo, incomingchangesetlist, displayer) parameters,
1261 (remoterepo, incomingchangesetlist, displayer) parameters,
1262 and is supposed to contain only code that can't be unified.
1262 and is supposed to contain only code that can't be unified.
1263 """
1263 """
1264 srcs = urlutil.get_pull_paths(repo, ui, [source], opts.get(b'branch'))
1264 srcs = urlutil.get_pull_paths(repo, ui, [source], opts.get(b'branch'))
1265 srcs = list(srcs)
1265 srcs = list(srcs)
1266 if len(srcs) != 1:
1266 if len(srcs) != 1:
1267 msg = _(b'for now, incoming supports only a single source, %d provided')
1267 msg = _(b'for now, incoming supports only a single source, %d provided')
1268 msg %= len(srcs)
1268 msg %= len(srcs)
1269 raise error.Abort(msg)
1269 raise error.Abort(msg)
1270 source, branches = srcs[0]
1270 source, branches = srcs[0]
1271 if subpath is not None:
1271 if subpath is not None:
1272 subpath = urlutil.url(subpath)
1272 subpath = urlutil.url(subpath)
1273 if subpath.isabs():
1273 if subpath.isabs():
1274 source = bytes(subpath)
1274 source = bytes(subpath)
1275 else:
1275 else:
1276 p = urlutil.url(source)
1276 p = urlutil.url(source)
1277 p.path = os.path.normpath(b'%s/%s' % (p.path, subpath))
1277 p.path = os.path.normpath(b'%s/%s' % (p.path, subpath))
1278 source = bytes(p)
1278 source = bytes(p)
1279 other = peer(repo, opts, source)
1279 other = peer(repo, opts, source)
1280 cleanupfn = other.close
1280 cleanupfn = other.close
1281 try:
1281 try:
1282 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source))
1282 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source))
1283 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1283 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1284
1284
1285 if revs:
1285 if revs:
1286 revs = [other.lookup(rev) for rev in revs]
1286 revs = [other.lookup(rev) for rev in revs]
1287 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1287 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1288 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
1288 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
1289 )
1289 )
1290
1290
1291 if not chlist:
1291 if not chlist:
1292 ui.status(_(b"no changes found\n"))
1292 ui.status(_(b"no changes found\n"))
1293 return subreporecurse()
1293 return subreporecurse()
1294 ui.pager(b'incoming')
1294 ui.pager(b'incoming')
1295 displayer = logcmdutil.changesetdisplayer(
1295 displayer = logcmdutil.changesetdisplayer(
1296 ui, other, opts, buffered=buffered
1296 ui, other, opts, buffered=buffered
1297 )
1297 )
1298 displaychlist(other, chlist, displayer)
1298 displaychlist(other, chlist, displayer)
1299 displayer.close()
1299 displayer.close()
1300 finally:
1300 finally:
1301 cleanupfn()
1301 cleanupfn()
1302 subreporecurse()
1302 subreporecurse()
1303 return 0 # exit code is zero since we found incoming changes
1303 return 0 # exit code is zero since we found incoming changes
1304
1304
1305
1305
1306 def incoming(ui, repo, source, opts, subpath=None):
1306 def incoming(ui, repo, source, opts, subpath=None):
1307 def subreporecurse():
1307 def subreporecurse():
1308 ret = 1
1308 ret = 1
1309 if opts.get(b'subrepos'):
1309 if opts.get(b'subrepos'):
1310 ctx = repo[None]
1310 ctx = repo[None]
1311 for subpath in sorted(ctx.substate):
1311 for subpath in sorted(ctx.substate):
1312 sub = ctx.sub(subpath)
1312 sub = ctx.sub(subpath)
1313 ret = min(ret, sub.incoming(ui, source, opts))
1313 ret = min(ret, sub.incoming(ui, source, opts))
1314 return ret
1314 return ret
1315
1315
1316 def display(other, chlist, displayer):
1316 def display(other, chlist, displayer):
1317 limit = logcmdutil.getlimit(opts)
1317 limit = logcmdutil.getlimit(opts)
1318 if opts.get(b'newest_first'):
1318 if opts.get(b'newest_first'):
1319 chlist.reverse()
1319 chlist.reverse()
1320 count = 0
1320 count = 0
1321 for n in chlist:
1321 for n in chlist:
1322 if limit is not None and count >= limit:
1322 if limit is not None and count >= limit:
1323 break
1323 break
1324 parents = [
1324 parents = [
1325 p for p in other.changelog.parents(n) if p != repo.nullid
1325 p for p in other.changelog.parents(n) if p != repo.nullid
1326 ]
1326 ]
1327 if opts.get(b'no_merges') and len(parents) == 2:
1327 if opts.get(b'no_merges') and len(parents) == 2:
1328 continue
1328 continue
1329 count += 1
1329 count += 1
1330 displayer.show(other[n])
1330 displayer.show(other[n])
1331
1331
1332 return _incoming(
1332 return _incoming(
1333 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1333 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1334 )
1334 )
1335
1335
1336
1336
1337 def _outgoing(ui, repo, dests, opts, subpath=None):
1337 def _outgoing(ui, repo, dests, opts, subpath=None):
1338 out = set()
1338 out = set()
1339 others = []
1339 others = []
1340 for path in urlutil.get_push_paths(repo, ui, dests):
1340 for path in urlutil.get_push_paths(repo, ui, dests):
1341 dest = path.pushloc or path.loc
1341 dest = path.pushloc or path.loc
1342 if subpath is not None:
1342 if subpath is not None:
1343 subpath = urlutil.url(subpath)
1343 subpath = urlutil.url(subpath)
1344 if subpath.isabs():
1344 if subpath.isabs():
1345 dest = bytes(subpath)
1345 dest = bytes(subpath)
1346 else:
1346 else:
1347 p = urlutil.url(dest)
1347 p = urlutil.url(dest)
1348 p.path = os.path.normpath(b'%s/%s' % (p.path, subpath))
1348 p.path = os.path.normpath(b'%s/%s' % (p.path, subpath))
1349 dest = bytes(p)
1349 dest = bytes(p)
1350 branches = path.branch, opts.get(b'branch') or []
1350 branches = path.branch, opts.get(b'branch') or []
1351
1351
1352 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1352 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1353 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1353 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1354 if revs:
1354 if revs:
1355 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1355 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1356
1356
1357 other = peer(repo, opts, dest)
1357 other = peer(repo, opts, dest)
1358 try:
1358 try:
1359 outgoing = discovery.findcommonoutgoing(
1359 outgoing = discovery.findcommonoutgoing(
1360 repo, other, revs, force=opts.get(b'force')
1360 repo, other, revs, force=opts.get(b'force')
1361 )
1361 )
1362 o = outgoing.missing
1362 o = outgoing.missing
1363 out.update(o)
1363 out.update(o)
1364 if not o:
1364 if not o:
1365 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1365 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1366 others.append(other)
1366 others.append(other)
1367 except: # re-raises
1367 except: # re-raises
1368 other.close()
1368 other.close()
1369 raise
1369 raise
1370 # make sure this is ordered by revision number
1370 # make sure this is ordered by revision number
1371 outgoing_revs = list(out)
1371 outgoing_revs = list(out)
1372 cl = repo.changelog
1372 cl = repo.changelog
1373 outgoing_revs.sort(key=cl.rev)
1373 outgoing_revs.sort(key=cl.rev)
1374 return outgoing_revs, others
1374 return outgoing_revs, others
1375
1375
1376
1376
1377 def _outgoing_recurse(ui, repo, dests, opts):
1377 def _outgoing_recurse(ui, repo, dests, opts):
1378 ret = 1
1378 ret = 1
1379 if opts.get(b'subrepos'):
1379 if opts.get(b'subrepos'):
1380 ctx = repo[None]
1380 ctx = repo[None]
1381 for subpath in sorted(ctx.substate):
1381 for subpath in sorted(ctx.substate):
1382 sub = ctx.sub(subpath)
1382 sub = ctx.sub(subpath)
1383 ret = min(ret, sub.outgoing(ui, dests, opts))
1383 ret = min(ret, sub.outgoing(ui, dests, opts))
1384 return ret
1384 return ret
1385
1385
1386
1386
1387 def _outgoing_filter(repo, revs, opts):
1387 def _outgoing_filter(repo, revs, opts):
1388 """apply revision filtering/ordering option for outgoing"""
1388 """apply revision filtering/ordering option for outgoing"""
1389 limit = logcmdutil.getlimit(opts)
1389 limit = logcmdutil.getlimit(opts)
1390 no_merges = opts.get(b'no_merges')
1390 no_merges = opts.get(b'no_merges')
1391 if opts.get(b'newest_first'):
1391 if opts.get(b'newest_first'):
1392 revs.reverse()
1392 revs.reverse()
1393 if limit is None and not no_merges:
1393 if limit is None and not no_merges:
1394 for r in revs:
1394 for r in revs:
1395 yield r
1395 yield r
1396 return
1396 return
1397
1397
1398 count = 0
1398 count = 0
1399 cl = repo.changelog
1399 cl = repo.changelog
1400 for n in revs:
1400 for n in revs:
1401 if limit is not None and count >= limit:
1401 if limit is not None and count >= limit:
1402 break
1402 break
1403 parents = [p for p in cl.parents(n) if p != repo.nullid]
1403 parents = [p for p in cl.parents(n) if p != repo.nullid]
1404 if no_merges and len(parents) == 2:
1404 if no_merges and len(parents) == 2:
1405 continue
1405 continue
1406 count += 1
1406 count += 1
1407 yield n
1407 yield n
1408
1408
1409
1409
1410 def outgoing(ui, repo, dests, opts, subpath=None):
1410 def outgoing(ui, repo, dests, opts, subpath=None):
1411 if opts.get(b'graph'):
1411 if opts.get(b'graph'):
1412 logcmdutil.checkunsupportedgraphflags([], opts)
1412 logcmdutil.checkunsupportedgraphflags([], opts)
1413 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1413 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1414 ret = 1
1414 ret = 1
1415 try:
1415 try:
1416 if o:
1416 if o:
1417 ret = 0
1417 ret = 0
1418
1418
1419 if opts.get(b'graph'):
1419 if opts.get(b'graph'):
1420 revdag = logcmdutil.graphrevs(repo, o, opts)
1420 revdag = logcmdutil.graphrevs(repo, o, opts)
1421 ui.pager(b'outgoing')
1421 ui.pager(b'outgoing')
1422 displayer = logcmdutil.changesetdisplayer(
1422 displayer = logcmdutil.changesetdisplayer(
1423 ui, repo, opts, buffered=True
1423 ui, repo, opts, buffered=True
1424 )
1424 )
1425 logcmdutil.displaygraph(
1425 logcmdutil.displaygraph(
1426 ui, repo, revdag, displayer, graphmod.asciiedges
1426 ui, repo, revdag, displayer, graphmod.asciiedges
1427 )
1427 )
1428 else:
1428 else:
1429 ui.pager(b'outgoing')
1429 ui.pager(b'outgoing')
1430 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1430 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1431 for n in _outgoing_filter(repo, o, opts):
1431 for n in _outgoing_filter(repo, o, opts):
1432 displayer.show(repo[n])
1432 displayer.show(repo[n])
1433 displayer.close()
1433 displayer.close()
1434 for oth in others:
1434 for oth in others:
1435 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1435 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1436 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1436 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1437 return ret # exit code is zero since we found outgoing changes
1437 return ret # exit code is zero since we found outgoing changes
1438 finally:
1438 finally:
1439 for oth in others:
1439 for oth in others:
1440 oth.close()
1440 oth.close()
1441
1441
1442
1442
1443 def verify(repo, level=None):
1443 def verify(repo, level=None):
1444 """verify the consistency of a repository"""
1444 """verify the consistency of a repository"""
1445 ret = verifymod.verify(repo, level=level)
1445 ret = verifymod.verify(repo, level=level)
1446
1446
1447 # Broken subrepo references in hidden csets don't seem worth worrying about,
1447 # Broken subrepo references in hidden csets don't seem worth worrying about,
1448 # since they can't be pushed/pulled, and --hidden can be used if they are a
1448 # since they can't be pushed/pulled, and --hidden can be used if they are a
1449 # concern.
1449 # concern.
1450
1450
1451 # pathto() is needed for -R case
1451 # pathto() is needed for -R case
1452 revs = repo.revs(
1452 revs = repo.revs(
1453 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1453 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1454 )
1454 )
1455
1455
1456 if revs:
1456 if revs:
1457 repo.ui.status(_(b'checking subrepo links\n'))
1457 repo.ui.status(_(b'checking subrepo links\n'))
1458 for rev in revs:
1458 for rev in revs:
1459 ctx = repo[rev]
1459 ctx = repo[rev]
1460 try:
1460 try:
1461 for subpath in ctx.substate:
1461 for subpath in ctx.substate:
1462 try:
1462 try:
1463 ret = (
1463 ret = (
1464 ctx.sub(subpath, allowcreate=False).verify() or ret
1464 ctx.sub(subpath, allowcreate=False).verify() or ret
1465 )
1465 )
1466 except error.RepoError as e:
1466 except error.RepoError as e:
1467 repo.ui.warn(b'%d: %s\n' % (rev, e))
1467 repo.ui.warn(b'%d: %s\n' % (rev, e))
1468 except Exception:
1468 except Exception:
1469 repo.ui.warn(
1469 repo.ui.warn(
1470 _(b'.hgsubstate is corrupt in revision %s\n')
1470 _(b'.hgsubstate is corrupt in revision %s\n')
1471 % short(ctx.node())
1471 % short(ctx.node())
1472 )
1472 )
1473
1473
1474 return ret
1474 return ret
1475
1475
1476
1476
1477 def remoteui(src, opts):
1477 def remoteui(src, opts):
1478 """build a remote ui from ui or repo and opts"""
1478 """build a remote ui from ui or repo and opts"""
1479 if util.safehasattr(src, b'baseui'): # looks like a repository
1479 if util.safehasattr(src, b'baseui'): # looks like a repository
1480 dst = src.baseui.copy() # drop repo-specific config
1480 dst = src.baseui.copy() # drop repo-specific config
1481 src = src.ui # copy target options from repo
1481 src = src.ui # copy target options from repo
1482 else: # assume it's a global ui object
1482 else: # assume it's a global ui object
1483 dst = src.copy() # keep all global options
1483 dst = src.copy() # keep all global options
1484
1484
1485 # copy ssh-specific options
1485 # copy ssh-specific options
1486 for o in b'ssh', b'remotecmd':
1486 for o in b'ssh', b'remotecmd':
1487 v = opts.get(o) or src.config(b'ui', o)
1487 v = opts.get(o) or src.config(b'ui', o)
1488 if v:
1488 if v:
1489 dst.setconfig(b"ui", o, v, b'copied')
1489 dst.setconfig(b"ui", o, v, b'copied')
1490
1490
1491 # copy bundle-specific options
1491 # copy bundle-specific options
1492 r = src.config(b'bundle', b'mainreporoot')
1492 r = src.config(b'bundle', b'mainreporoot')
1493 if r:
1493 if r:
1494 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1494 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1495
1495
1496 # copy selected local settings to the remote ui
1496 # copy selected local settings to the remote ui
1497 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1497 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1498 for key, val in src.configitems(sect):
1498 for key, val in src.configitems(sect):
1499 dst.setconfig(sect, key, val, b'copied')
1499 dst.setconfig(sect, key, val, b'copied')
1500 v = src.config(b'web', b'cacerts')
1500 v = src.config(b'web', b'cacerts')
1501 if v:
1501 if v:
1502 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1502 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1503
1503
1504 return dst
1504 return dst
1505
1505
1506
1506
1507 # Files of interest
1507 # Files of interest
1508 # Used to check if the repository has changed looking at mtime and size of
1508 # Used to check if the repository has changed looking at mtime and size of
1509 # these files.
1509 # these files.
1510 foi = [
1510 foi = [
1511 (b'spath', b'00changelog.i'),
1511 (b'spath', b'00changelog.i'),
1512 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1512 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1513 (b'spath', b'obsstore'),
1513 (b'spath', b'obsstore'),
1514 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1514 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1515 ]
1515 ]
1516
1516
1517
1517
1518 class cachedlocalrepo(object):
1518 class cachedlocalrepo(object):
1519 """Holds a localrepository that can be cached and reused."""
1519 """Holds a localrepository that can be cached and reused."""
1520
1520
1521 def __init__(self, repo):
1521 def __init__(self, repo):
1522 """Create a new cached repo from an existing repo.
1522 """Create a new cached repo from an existing repo.
1523
1523
1524 We assume the passed in repo was recently created. If the
1524 We assume the passed in repo was recently created. If the
1525 repo has changed between when it was created and when it was
1525 repo has changed between when it was created and when it was
1526 turned into a cache, it may not refresh properly.
1526 turned into a cache, it may not refresh properly.
1527 """
1527 """
1528 assert isinstance(repo, localrepo.localrepository)
1528 assert isinstance(repo, localrepo.localrepository)
1529 self._repo = repo
1529 self._repo = repo
1530 self._state, self.mtime = self._repostate()
1530 self._state, self.mtime = self._repostate()
1531 self._filtername = repo.filtername
1531 self._filtername = repo.filtername
1532
1532
1533 def fetch(self):
1533 def fetch(self):
1534 """Refresh (if necessary) and return a repository.
1534 """Refresh (if necessary) and return a repository.
1535
1535
1536 If the cached instance is out of date, it will be recreated
1536 If the cached instance is out of date, it will be recreated
1537 automatically and returned.
1537 automatically and returned.
1538
1538
1539 Returns a tuple of the repo and a boolean indicating whether a new
1539 Returns a tuple of the repo and a boolean indicating whether a new
1540 repo instance was created.
1540 repo instance was created.
1541 """
1541 """
1542 # We compare the mtimes and sizes of some well-known files to
1542 # We compare the mtimes and sizes of some well-known files to
1543 # determine if the repo changed. This is not precise, as mtimes
1543 # determine if the repo changed. This is not precise, as mtimes
1544 # are susceptible to clock skew and imprecise filesystems and
1544 # are susceptible to clock skew and imprecise filesystems and
1545 # file content can change while maintaining the same size.
1545 # file content can change while maintaining the same size.
1546
1546
1547 state, mtime = self._repostate()
1547 state, mtime = self._repostate()
1548 if state == self._state:
1548 if state == self._state:
1549 return self._repo, False
1549 return self._repo, False
1550
1550
1551 repo = repository(self._repo.baseui, self._repo.url())
1551 repo = repository(self._repo.baseui, self._repo.url())
1552 if self._filtername:
1552 if self._filtername:
1553 self._repo = repo.filtered(self._filtername)
1553 self._repo = repo.filtered(self._filtername)
1554 else:
1554 else:
1555 self._repo = repo.unfiltered()
1555 self._repo = repo.unfiltered()
1556 self._state = state
1556 self._state = state
1557 self.mtime = mtime
1557 self.mtime = mtime
1558
1558
1559 return self._repo, True
1559 return self._repo, True
1560
1560
1561 def _repostate(self):
1561 def _repostate(self):
1562 state = []
1562 state = []
1563 maxmtime = -1
1563 maxmtime = -1
1564 for attr, fname in foi:
1564 for attr, fname in foi:
1565 prefix = getattr(self._repo, attr)
1565 prefix = getattr(self._repo, attr)
1566 p = os.path.join(prefix, fname)
1566 p = os.path.join(prefix, fname)
1567 try:
1567 try:
1568 st = os.stat(p)
1568 st = os.stat(p)
1569 except OSError:
1569 except OSError:
1570 st = os.stat(prefix)
1570 st = os.stat(prefix)
1571 state.append((st[stat.ST_MTIME], st.st_size))
1571 state.append((st[stat.ST_MTIME], st.st_size))
1572 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1572 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1573
1573
1574 return tuple(state), maxmtime
1574 return tuple(state), maxmtime
1575
1575
1576 def copy(self):
1576 def copy(self):
1577 """Obtain a copy of this class instance.
1577 """Obtain a copy of this class instance.
1578
1578
1579 A new localrepository instance is obtained. The new instance should be
1579 A new localrepository instance is obtained. The new instance should be
1580 completely independent of the original.
1580 completely independent of the original.
1581 """
1581 """
1582 repo = repository(self._repo.baseui, self._repo.origroot)
1582 repo = repository(self._repo.baseui, self._repo.origroot)
1583 if self._filtername:
1583 if self._filtername:
1584 repo = repo.filtered(self._filtername)
1584 repo = repo.filtered(self._filtername)
1585 else:
1585 else:
1586 repo = repo.unfiltered()
1586 repo = repo.unfiltered()
1587 c = cachedlocalrepo(repo)
1587 c = cachedlocalrepo(repo)
1588 c._state = self._state
1588 c._state = self._state
1589 c.mtime = self.mtime
1589 c.mtime = self.mtime
1590 return c
1590 return c
General Comments 0
You need to be logged in to leave comments. Login now