##// END OF EJS Templates
clone: use `get_clone_path`...
marmoute -
r47713:338ab1d8 default
parent child Browse files
Show More
@@ -1,1597 +1,1597 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import os
12 import os
13 import shutil
13 import shutil
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import (
17 from .node import (
18 hex,
18 hex,
19 nullhex,
19 nullhex,
20 nullid,
20 nullid,
21 short,
21 short,
22 )
22 )
23 from .pycompat import getattr
23 from .pycompat import getattr
24
24
25 from . import (
25 from . import (
26 bookmarks,
26 bookmarks,
27 bundlerepo,
27 bundlerepo,
28 cacheutil,
28 cacheutil,
29 cmdutil,
29 cmdutil,
30 destutil,
30 destutil,
31 discovery,
31 discovery,
32 error,
32 error,
33 exchange,
33 exchange,
34 extensions,
34 extensions,
35 graphmod,
35 graphmod,
36 httppeer,
36 httppeer,
37 localrepo,
37 localrepo,
38 lock,
38 lock,
39 logcmdutil,
39 logcmdutil,
40 logexchange,
40 logexchange,
41 merge as mergemod,
41 merge as mergemod,
42 mergestate as mergestatemod,
42 mergestate as mergestatemod,
43 narrowspec,
43 narrowspec,
44 phases,
44 phases,
45 requirements,
45 requirements,
46 scmutil,
46 scmutil,
47 sshpeer,
47 sshpeer,
48 statichttprepo,
48 statichttprepo,
49 ui as uimod,
49 ui as uimod,
50 unionrepo,
50 unionrepo,
51 url,
51 url,
52 util,
52 util,
53 verify as verifymod,
53 verify as verifymod,
54 vfs as vfsmod,
54 vfs as vfsmod,
55 )
55 )
56 from .utils import (
56 from .utils import (
57 hashutil,
57 hashutil,
58 stringutil,
58 stringutil,
59 urlutil,
59 urlutil,
60 )
60 )
61
61
62
62
63 release = lock.release
63 release = lock.release
64
64
65 # shared features
65 # shared features
66 sharedbookmarks = b'bookmarks'
66 sharedbookmarks = b'bookmarks'
67
67
68
68
69 def _local(path):
69 def _local(path):
70 path = util.expandpath(urlutil.urllocalpath(path))
70 path = util.expandpath(urlutil.urllocalpath(path))
71
71
72 try:
72 try:
73 # we use os.stat() directly here instead of os.path.isfile()
73 # we use os.stat() directly here instead of os.path.isfile()
74 # because the latter started returning `False` on invalid path
74 # because the latter started returning `False` on invalid path
75 # exceptions starting in 3.8 and we care about handling
75 # exceptions starting in 3.8 and we care about handling
76 # invalid paths specially here.
76 # invalid paths specially here.
77 st = os.stat(path)
77 st = os.stat(path)
78 isfile = stat.S_ISREG(st.st_mode)
78 isfile = stat.S_ISREG(st.st_mode)
79 # Python 2 raises TypeError, Python 3 ValueError.
79 # Python 2 raises TypeError, Python 3 ValueError.
80 except (TypeError, ValueError) as e:
80 except (TypeError, ValueError) as e:
81 raise error.Abort(
81 raise error.Abort(
82 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
82 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
83 )
83 )
84 except OSError:
84 except OSError:
85 isfile = False
85 isfile = False
86
86
87 return isfile and bundlerepo or localrepo
87 return isfile and bundlerepo or localrepo
88
88
89
89
90 def addbranchrevs(lrepo, other, branches, revs):
90 def addbranchrevs(lrepo, other, branches, revs):
91 peer = other.peer() # a courtesy to callers using a localrepo for other
91 peer = other.peer() # a courtesy to callers using a localrepo for other
92 hashbranch, branches = branches
92 hashbranch, branches = branches
93 if not hashbranch and not branches:
93 if not hashbranch and not branches:
94 x = revs or None
94 x = revs or None
95 if revs:
95 if revs:
96 y = revs[0]
96 y = revs[0]
97 else:
97 else:
98 y = None
98 y = None
99 return x, y
99 return x, y
100 if revs:
100 if revs:
101 revs = list(revs)
101 revs = list(revs)
102 else:
102 else:
103 revs = []
103 revs = []
104
104
105 if not peer.capable(b'branchmap'):
105 if not peer.capable(b'branchmap'):
106 if branches:
106 if branches:
107 raise error.Abort(_(b"remote branch lookup not supported"))
107 raise error.Abort(_(b"remote branch lookup not supported"))
108 revs.append(hashbranch)
108 revs.append(hashbranch)
109 return revs, revs[0]
109 return revs, revs[0]
110
110
111 with peer.commandexecutor() as e:
111 with peer.commandexecutor() as e:
112 branchmap = e.callcommand(b'branchmap', {}).result()
112 branchmap = e.callcommand(b'branchmap', {}).result()
113
113
114 def primary(branch):
114 def primary(branch):
115 if branch == b'.':
115 if branch == b'.':
116 if not lrepo:
116 if not lrepo:
117 raise error.Abort(_(b"dirstate branch not accessible"))
117 raise error.Abort(_(b"dirstate branch not accessible"))
118 branch = lrepo.dirstate.branch()
118 branch = lrepo.dirstate.branch()
119 if branch in branchmap:
119 if branch in branchmap:
120 revs.extend(hex(r) for r in reversed(branchmap[branch]))
120 revs.extend(hex(r) for r in reversed(branchmap[branch]))
121 return True
121 return True
122 else:
122 else:
123 return False
123 return False
124
124
125 for branch in branches:
125 for branch in branches:
126 if not primary(branch):
126 if not primary(branch):
127 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
127 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
128 if hashbranch:
128 if hashbranch:
129 if not primary(hashbranch):
129 if not primary(hashbranch):
130 revs.append(hashbranch)
130 revs.append(hashbranch)
131 return revs, revs[0]
131 return revs, revs[0]
132
132
133
133
134 def parseurl(path, branches=None):
134 def parseurl(path, branches=None):
135 '''parse url#branch, returning (url, (branch, branches))'''
135 '''parse url#branch, returning (url, (branch, branches))'''
136 msg = b'parseurl(...) moved to mercurial.utils.urlutil'
136 msg = b'parseurl(...) moved to mercurial.utils.urlutil'
137 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
137 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
138 return urlutil.parseurl(path, branches=branches)
138 return urlutil.parseurl(path, branches=branches)
139
139
140
140
141 schemes = {
141 schemes = {
142 b'bundle': bundlerepo,
142 b'bundle': bundlerepo,
143 b'union': unionrepo,
143 b'union': unionrepo,
144 b'file': _local,
144 b'file': _local,
145 b'http': httppeer,
145 b'http': httppeer,
146 b'https': httppeer,
146 b'https': httppeer,
147 b'ssh': sshpeer,
147 b'ssh': sshpeer,
148 b'static-http': statichttprepo,
148 b'static-http': statichttprepo,
149 }
149 }
150
150
151
151
152 def _peerlookup(path):
152 def _peerlookup(path):
153 u = urlutil.url(path)
153 u = urlutil.url(path)
154 scheme = u.scheme or b'file'
154 scheme = u.scheme or b'file'
155 thing = schemes.get(scheme) or schemes[b'file']
155 thing = schemes.get(scheme) or schemes[b'file']
156 try:
156 try:
157 return thing(path)
157 return thing(path)
158 except TypeError:
158 except TypeError:
159 # we can't test callable(thing) because 'thing' can be an unloaded
159 # we can't test callable(thing) because 'thing' can be an unloaded
160 # module that implements __call__
160 # module that implements __call__
161 if not util.safehasattr(thing, b'instance'):
161 if not util.safehasattr(thing, b'instance'):
162 raise
162 raise
163 return thing
163 return thing
164
164
165
165
166 def islocal(repo):
166 def islocal(repo):
167 '''return true if repo (or path pointing to repo) is local'''
167 '''return true if repo (or path pointing to repo) is local'''
168 if isinstance(repo, bytes):
168 if isinstance(repo, bytes):
169 try:
169 try:
170 return _peerlookup(repo).islocal(repo)
170 return _peerlookup(repo).islocal(repo)
171 except AttributeError:
171 except AttributeError:
172 return False
172 return False
173 return repo.local()
173 return repo.local()
174
174
175
175
176 def openpath(ui, path, sendaccept=True):
176 def openpath(ui, path, sendaccept=True):
177 '''open path with open if local, url.open if remote'''
177 '''open path with open if local, url.open if remote'''
178 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
178 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
179 if pathurl.islocal():
179 if pathurl.islocal():
180 return util.posixfile(pathurl.localpath(), b'rb')
180 return util.posixfile(pathurl.localpath(), b'rb')
181 else:
181 else:
182 return url.open(ui, path, sendaccept=sendaccept)
182 return url.open(ui, path, sendaccept=sendaccept)
183
183
184
184
185 # a list of (ui, repo) functions called for wire peer initialization
185 # a list of (ui, repo) functions called for wire peer initialization
186 wirepeersetupfuncs = []
186 wirepeersetupfuncs = []
187
187
188
188
189 def _peerorrepo(
189 def _peerorrepo(
190 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
190 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
191 ):
191 ):
192 """return a repository object for the specified path"""
192 """return a repository object for the specified path"""
193 obj = _peerlookup(path).instance(
193 obj = _peerlookup(path).instance(
194 ui, path, create, intents=intents, createopts=createopts
194 ui, path, create, intents=intents, createopts=createopts
195 )
195 )
196 ui = getattr(obj, "ui", ui)
196 ui = getattr(obj, "ui", ui)
197 for f in presetupfuncs or []:
197 for f in presetupfuncs or []:
198 f(ui, obj)
198 f(ui, obj)
199 ui.log(b'extension', b'- executing reposetup hooks\n')
199 ui.log(b'extension', b'- executing reposetup hooks\n')
200 with util.timedcm('all reposetup') as allreposetupstats:
200 with util.timedcm('all reposetup') as allreposetupstats:
201 for name, module in extensions.extensions(ui):
201 for name, module in extensions.extensions(ui):
202 ui.log(b'extension', b' - running reposetup for %s\n', name)
202 ui.log(b'extension', b' - running reposetup for %s\n', name)
203 hook = getattr(module, 'reposetup', None)
203 hook = getattr(module, 'reposetup', None)
204 if hook:
204 if hook:
205 with util.timedcm('reposetup %r', name) as stats:
205 with util.timedcm('reposetup %r', name) as stats:
206 hook(ui, obj)
206 hook(ui, obj)
207 ui.log(
207 ui.log(
208 b'extension', b' > reposetup for %s took %s\n', name, stats
208 b'extension', b' > reposetup for %s took %s\n', name, stats
209 )
209 )
210 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
210 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
211 if not obj.local():
211 if not obj.local():
212 for f in wirepeersetupfuncs:
212 for f in wirepeersetupfuncs:
213 f(ui, obj)
213 f(ui, obj)
214 return obj
214 return obj
215
215
216
216
217 def repository(
217 def repository(
218 ui,
218 ui,
219 path=b'',
219 path=b'',
220 create=False,
220 create=False,
221 presetupfuncs=None,
221 presetupfuncs=None,
222 intents=None,
222 intents=None,
223 createopts=None,
223 createopts=None,
224 ):
224 ):
225 """return a repository object for the specified path"""
225 """return a repository object for the specified path"""
226 peer = _peerorrepo(
226 peer = _peerorrepo(
227 ui,
227 ui,
228 path,
228 path,
229 create,
229 create,
230 presetupfuncs=presetupfuncs,
230 presetupfuncs=presetupfuncs,
231 intents=intents,
231 intents=intents,
232 createopts=createopts,
232 createopts=createopts,
233 )
233 )
234 repo = peer.local()
234 repo = peer.local()
235 if not repo:
235 if not repo:
236 raise error.Abort(
236 raise error.Abort(
237 _(b"repository '%s' is not local") % (path or peer.url())
237 _(b"repository '%s' is not local") % (path or peer.url())
238 )
238 )
239 return repo.filtered(b'visible')
239 return repo.filtered(b'visible')
240
240
241
241
242 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
242 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
243 '''return a repository peer for the specified path'''
243 '''return a repository peer for the specified path'''
244 rui = remoteui(uiorrepo, opts)
244 rui = remoteui(uiorrepo, opts)
245 return _peerorrepo(
245 return _peerorrepo(
246 rui, path, create, intents=intents, createopts=createopts
246 rui, path, create, intents=intents, createopts=createopts
247 ).peer()
247 ).peer()
248
248
249
249
250 def defaultdest(source):
250 def defaultdest(source):
251 """return default destination of clone if none is given
251 """return default destination of clone if none is given
252
252
253 >>> defaultdest(b'foo')
253 >>> defaultdest(b'foo')
254 'foo'
254 'foo'
255 >>> defaultdest(b'/foo/bar')
255 >>> defaultdest(b'/foo/bar')
256 'bar'
256 'bar'
257 >>> defaultdest(b'/')
257 >>> defaultdest(b'/')
258 ''
258 ''
259 >>> defaultdest(b'')
259 >>> defaultdest(b'')
260 ''
260 ''
261 >>> defaultdest(b'http://example.org/')
261 >>> defaultdest(b'http://example.org/')
262 ''
262 ''
263 >>> defaultdest(b'http://example.org/foo/')
263 >>> defaultdest(b'http://example.org/foo/')
264 'foo'
264 'foo'
265 """
265 """
266 path = urlutil.url(source).path
266 path = urlutil.url(source).path
267 if not path:
267 if not path:
268 return b''
268 return b''
269 return os.path.basename(os.path.normpath(path))
269 return os.path.basename(os.path.normpath(path))
270
270
271
271
272 def sharedreposource(repo):
272 def sharedreposource(repo):
273 """Returns repository object for source repository of a shared repo.
273 """Returns repository object for source repository of a shared repo.
274
274
275 If repo is not a shared repository, returns None.
275 If repo is not a shared repository, returns None.
276 """
276 """
277 if repo.sharedpath == repo.path:
277 if repo.sharedpath == repo.path:
278 return None
278 return None
279
279
280 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
280 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
281 return repo.srcrepo
281 return repo.srcrepo
282
282
283 # the sharedpath always ends in the .hg; we want the path to the repo
283 # the sharedpath always ends in the .hg; we want the path to the repo
284 source = repo.vfs.split(repo.sharedpath)[0]
284 source = repo.vfs.split(repo.sharedpath)[0]
285 srcurl, branches = urlutil.parseurl(source)
285 srcurl, branches = urlutil.parseurl(source)
286 srcrepo = repository(repo.ui, srcurl)
286 srcrepo = repository(repo.ui, srcurl)
287 repo.srcrepo = srcrepo
287 repo.srcrepo = srcrepo
288 return srcrepo
288 return srcrepo
289
289
290
290
291 def share(
291 def share(
292 ui,
292 ui,
293 source,
293 source,
294 dest=None,
294 dest=None,
295 update=True,
295 update=True,
296 bookmarks=True,
296 bookmarks=True,
297 defaultpath=None,
297 defaultpath=None,
298 relative=False,
298 relative=False,
299 ):
299 ):
300 '''create a shared repository'''
300 '''create a shared repository'''
301
301
302 if not islocal(source):
302 if not islocal(source):
303 raise error.Abort(_(b'can only share local repositories'))
303 raise error.Abort(_(b'can only share local repositories'))
304
304
305 if not dest:
305 if not dest:
306 dest = defaultdest(source)
306 dest = defaultdest(source)
307 else:
307 else:
308 dest = ui.expandpath(dest)
308 dest = ui.expandpath(dest)
309
309
310 if isinstance(source, bytes):
310 if isinstance(source, bytes):
311 origsource = ui.expandpath(source)
311 origsource = ui.expandpath(source)
312 source, branches = urlutil.parseurl(origsource)
312 source, branches = urlutil.parseurl(origsource)
313 srcrepo = repository(ui, source)
313 srcrepo = repository(ui, source)
314 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
314 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
315 else:
315 else:
316 srcrepo = source.local()
316 srcrepo = source.local()
317 checkout = None
317 checkout = None
318
318
319 shareditems = set()
319 shareditems = set()
320 if bookmarks:
320 if bookmarks:
321 shareditems.add(sharedbookmarks)
321 shareditems.add(sharedbookmarks)
322
322
323 r = repository(
323 r = repository(
324 ui,
324 ui,
325 dest,
325 dest,
326 create=True,
326 create=True,
327 createopts={
327 createopts={
328 b'sharedrepo': srcrepo,
328 b'sharedrepo': srcrepo,
329 b'sharedrelative': relative,
329 b'sharedrelative': relative,
330 b'shareditems': shareditems,
330 b'shareditems': shareditems,
331 },
331 },
332 )
332 )
333
333
334 postshare(srcrepo, r, defaultpath=defaultpath)
334 postshare(srcrepo, r, defaultpath=defaultpath)
335 r = repository(ui, dest)
335 r = repository(ui, dest)
336 _postshareupdate(r, update, checkout=checkout)
336 _postshareupdate(r, update, checkout=checkout)
337 return r
337 return r
338
338
339
339
340 def _prependsourcehgrc(repo):
340 def _prependsourcehgrc(repo):
341 """copies the source repo config and prepend it in current repo .hg/hgrc
341 """copies the source repo config and prepend it in current repo .hg/hgrc
342 on unshare. This is only done if the share was perfomed using share safe
342 on unshare. This is only done if the share was perfomed using share safe
343 method where we share config of source in shares"""
343 method where we share config of source in shares"""
344 srcvfs = vfsmod.vfs(repo.sharedpath)
344 srcvfs = vfsmod.vfs(repo.sharedpath)
345 dstvfs = vfsmod.vfs(repo.path)
345 dstvfs = vfsmod.vfs(repo.path)
346
346
347 if not srcvfs.exists(b'hgrc'):
347 if not srcvfs.exists(b'hgrc'):
348 return
348 return
349
349
350 currentconfig = b''
350 currentconfig = b''
351 if dstvfs.exists(b'hgrc'):
351 if dstvfs.exists(b'hgrc'):
352 currentconfig = dstvfs.read(b'hgrc')
352 currentconfig = dstvfs.read(b'hgrc')
353
353
354 with dstvfs(b'hgrc', b'wb') as fp:
354 with dstvfs(b'hgrc', b'wb') as fp:
355 sourceconfig = srcvfs.read(b'hgrc')
355 sourceconfig = srcvfs.read(b'hgrc')
356 fp.write(b"# Config copied from shared source\n")
356 fp.write(b"# Config copied from shared source\n")
357 fp.write(sourceconfig)
357 fp.write(sourceconfig)
358 fp.write(b'\n')
358 fp.write(b'\n')
359 fp.write(currentconfig)
359 fp.write(currentconfig)
360
360
361
361
362 def unshare(ui, repo):
362 def unshare(ui, repo):
363 """convert a shared repository to a normal one
363 """convert a shared repository to a normal one
364
364
365 Copy the store data to the repo and remove the sharedpath data.
365 Copy the store data to the repo and remove the sharedpath data.
366
366
367 Returns a new repository object representing the unshared repository.
367 Returns a new repository object representing the unshared repository.
368
368
369 The passed repository object is not usable after this function is
369 The passed repository object is not usable after this function is
370 called.
370 called.
371 """
371 """
372
372
373 with repo.lock():
373 with repo.lock():
374 # we use locks here because if we race with commit, we
374 # we use locks here because if we race with commit, we
375 # can end up with extra data in the cloned revlogs that's
375 # can end up with extra data in the cloned revlogs that's
376 # not pointed to by changesets, thus causing verify to
376 # not pointed to by changesets, thus causing verify to
377 # fail
377 # fail
378 destlock = copystore(ui, repo, repo.path)
378 destlock = copystore(ui, repo, repo.path)
379 with destlock or util.nullcontextmanager():
379 with destlock or util.nullcontextmanager():
380 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
380 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
381 # we were sharing .hg/hgrc of the share source with the current
381 # we were sharing .hg/hgrc of the share source with the current
382 # repo. We need to copy that while unsharing otherwise it can
382 # repo. We need to copy that while unsharing otherwise it can
383 # disable hooks and other checks
383 # disable hooks and other checks
384 _prependsourcehgrc(repo)
384 _prependsourcehgrc(repo)
385
385
386 sharefile = repo.vfs.join(b'sharedpath')
386 sharefile = repo.vfs.join(b'sharedpath')
387 util.rename(sharefile, sharefile + b'.old')
387 util.rename(sharefile, sharefile + b'.old')
388
388
389 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
389 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
390 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
390 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
391 scmutil.writereporequirements(repo)
391 scmutil.writereporequirements(repo)
392
392
393 # Removing share changes some fundamental properties of the repo instance.
393 # Removing share changes some fundamental properties of the repo instance.
394 # So we instantiate a new repo object and operate on it rather than
394 # So we instantiate a new repo object and operate on it rather than
395 # try to keep the existing repo usable.
395 # try to keep the existing repo usable.
396 newrepo = repository(repo.baseui, repo.root, create=False)
396 newrepo = repository(repo.baseui, repo.root, create=False)
397
397
398 # TODO: figure out how to access subrepos that exist, but were previously
398 # TODO: figure out how to access subrepos that exist, but were previously
399 # removed from .hgsub
399 # removed from .hgsub
400 c = newrepo[b'.']
400 c = newrepo[b'.']
401 subs = c.substate
401 subs = c.substate
402 for s in sorted(subs):
402 for s in sorted(subs):
403 c.sub(s).unshare()
403 c.sub(s).unshare()
404
404
405 localrepo.poisonrepository(repo)
405 localrepo.poisonrepository(repo)
406
406
407 return newrepo
407 return newrepo
408
408
409
409
410 def postshare(sourcerepo, destrepo, defaultpath=None):
410 def postshare(sourcerepo, destrepo, defaultpath=None):
411 """Called after a new shared repo is created.
411 """Called after a new shared repo is created.
412
412
413 The new repo only has a requirements file and pointer to the source.
413 The new repo only has a requirements file and pointer to the source.
414 This function configures additional shared data.
414 This function configures additional shared data.
415
415
416 Extensions can wrap this function and write additional entries to
416 Extensions can wrap this function and write additional entries to
417 destrepo/.hg/shared to indicate additional pieces of data to be shared.
417 destrepo/.hg/shared to indicate additional pieces of data to be shared.
418 """
418 """
419 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
419 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
420 if default:
420 if default:
421 template = b'[paths]\ndefault = %s\n'
421 template = b'[paths]\ndefault = %s\n'
422 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
422 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
423 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
423 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
424 with destrepo.wlock():
424 with destrepo.wlock():
425 narrowspec.copytoworkingcopy(destrepo)
425 narrowspec.copytoworkingcopy(destrepo)
426
426
427
427
428 def _postshareupdate(repo, update, checkout=None):
428 def _postshareupdate(repo, update, checkout=None):
429 """Maybe perform a working directory update after a shared repo is created.
429 """Maybe perform a working directory update after a shared repo is created.
430
430
431 ``update`` can be a boolean or a revision to update to.
431 ``update`` can be a boolean or a revision to update to.
432 """
432 """
433 if not update:
433 if not update:
434 return
434 return
435
435
436 repo.ui.status(_(b"updating working directory\n"))
436 repo.ui.status(_(b"updating working directory\n"))
437 if update is not True:
437 if update is not True:
438 checkout = update
438 checkout = update
439 for test in (checkout, b'default', b'tip'):
439 for test in (checkout, b'default', b'tip'):
440 if test is None:
440 if test is None:
441 continue
441 continue
442 try:
442 try:
443 uprev = repo.lookup(test)
443 uprev = repo.lookup(test)
444 break
444 break
445 except error.RepoLookupError:
445 except error.RepoLookupError:
446 continue
446 continue
447 _update(repo, uprev)
447 _update(repo, uprev)
448
448
449
449
450 def copystore(ui, srcrepo, destpath):
450 def copystore(ui, srcrepo, destpath):
451 """copy files from store of srcrepo in destpath
451 """copy files from store of srcrepo in destpath
452
452
453 returns destlock
453 returns destlock
454 """
454 """
455 destlock = None
455 destlock = None
456 try:
456 try:
457 hardlink = None
457 hardlink = None
458 topic = _(b'linking') if hardlink else _(b'copying')
458 topic = _(b'linking') if hardlink else _(b'copying')
459 with ui.makeprogress(topic, unit=_(b'files')) as progress:
459 with ui.makeprogress(topic, unit=_(b'files')) as progress:
460 num = 0
460 num = 0
461 srcpublishing = srcrepo.publishing()
461 srcpublishing = srcrepo.publishing()
462 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
462 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
463 dstvfs = vfsmod.vfs(destpath)
463 dstvfs = vfsmod.vfs(destpath)
464 for f in srcrepo.store.copylist():
464 for f in srcrepo.store.copylist():
465 if srcpublishing and f.endswith(b'phaseroots'):
465 if srcpublishing and f.endswith(b'phaseroots'):
466 continue
466 continue
467 dstbase = os.path.dirname(f)
467 dstbase = os.path.dirname(f)
468 if dstbase and not dstvfs.exists(dstbase):
468 if dstbase and not dstvfs.exists(dstbase):
469 dstvfs.mkdir(dstbase)
469 dstvfs.mkdir(dstbase)
470 if srcvfs.exists(f):
470 if srcvfs.exists(f):
471 if f.endswith(b'data'):
471 if f.endswith(b'data'):
472 # 'dstbase' may be empty (e.g. revlog format 0)
472 # 'dstbase' may be empty (e.g. revlog format 0)
473 lockfile = os.path.join(dstbase, b"lock")
473 lockfile = os.path.join(dstbase, b"lock")
474 # lock to avoid premature writing to the target
474 # lock to avoid premature writing to the target
475 destlock = lock.lock(dstvfs, lockfile)
475 destlock = lock.lock(dstvfs, lockfile)
476 hardlink, n = util.copyfiles(
476 hardlink, n = util.copyfiles(
477 srcvfs.join(f), dstvfs.join(f), hardlink, progress
477 srcvfs.join(f), dstvfs.join(f), hardlink, progress
478 )
478 )
479 num += n
479 num += n
480 if hardlink:
480 if hardlink:
481 ui.debug(b"linked %d files\n" % num)
481 ui.debug(b"linked %d files\n" % num)
482 else:
482 else:
483 ui.debug(b"copied %d files\n" % num)
483 ui.debug(b"copied %d files\n" % num)
484 return destlock
484 return destlock
485 except: # re-raises
485 except: # re-raises
486 release(destlock)
486 release(destlock)
487 raise
487 raise
488
488
489
489
490 def clonewithshare(
490 def clonewithshare(
491 ui,
491 ui,
492 peeropts,
492 peeropts,
493 sharepath,
493 sharepath,
494 source,
494 source,
495 srcpeer,
495 srcpeer,
496 dest,
496 dest,
497 pull=False,
497 pull=False,
498 rev=None,
498 rev=None,
499 update=True,
499 update=True,
500 stream=False,
500 stream=False,
501 ):
501 ):
502 """Perform a clone using a shared repo.
502 """Perform a clone using a shared repo.
503
503
504 The store for the repository will be located at <sharepath>/.hg. The
504 The store for the repository will be located at <sharepath>/.hg. The
505 specified revisions will be cloned or pulled from "source". A shared repo
505 specified revisions will be cloned or pulled from "source". A shared repo
506 will be created at "dest" and a working copy will be created if "update" is
506 will be created at "dest" and a working copy will be created if "update" is
507 True.
507 True.
508 """
508 """
509 revs = None
509 revs = None
510 if rev:
510 if rev:
511 if not srcpeer.capable(b'lookup'):
511 if not srcpeer.capable(b'lookup'):
512 raise error.Abort(
512 raise error.Abort(
513 _(
513 _(
514 b"src repository does not support "
514 b"src repository does not support "
515 b"revision lookup and so doesn't "
515 b"revision lookup and so doesn't "
516 b"support clone by revision"
516 b"support clone by revision"
517 )
517 )
518 )
518 )
519
519
520 # TODO this is batchable.
520 # TODO this is batchable.
521 remoterevs = []
521 remoterevs = []
522 for r in rev:
522 for r in rev:
523 with srcpeer.commandexecutor() as e:
523 with srcpeer.commandexecutor() as e:
524 remoterevs.append(
524 remoterevs.append(
525 e.callcommand(
525 e.callcommand(
526 b'lookup',
526 b'lookup',
527 {
527 {
528 b'key': r,
528 b'key': r,
529 },
529 },
530 ).result()
530 ).result()
531 )
531 )
532 revs = remoterevs
532 revs = remoterevs
533
533
534 # Obtain a lock before checking for or cloning the pooled repo otherwise
534 # Obtain a lock before checking for or cloning the pooled repo otherwise
535 # 2 clients may race creating or populating it.
535 # 2 clients may race creating or populating it.
536 pooldir = os.path.dirname(sharepath)
536 pooldir = os.path.dirname(sharepath)
537 # lock class requires the directory to exist.
537 # lock class requires the directory to exist.
538 try:
538 try:
539 util.makedir(pooldir, False)
539 util.makedir(pooldir, False)
540 except OSError as e:
540 except OSError as e:
541 if e.errno != errno.EEXIST:
541 if e.errno != errno.EEXIST:
542 raise
542 raise
543
543
544 poolvfs = vfsmod.vfs(pooldir)
544 poolvfs = vfsmod.vfs(pooldir)
545 basename = os.path.basename(sharepath)
545 basename = os.path.basename(sharepath)
546
546
547 with lock.lock(poolvfs, b'%s.lock' % basename):
547 with lock.lock(poolvfs, b'%s.lock' % basename):
548 if os.path.exists(sharepath):
548 if os.path.exists(sharepath):
549 ui.status(
549 ui.status(
550 _(b'(sharing from existing pooled repository %s)\n') % basename
550 _(b'(sharing from existing pooled repository %s)\n') % basename
551 )
551 )
552 else:
552 else:
553 ui.status(
553 ui.status(
554 _(b'(sharing from new pooled repository %s)\n') % basename
554 _(b'(sharing from new pooled repository %s)\n') % basename
555 )
555 )
556 # Always use pull mode because hardlinks in share mode don't work
556 # Always use pull mode because hardlinks in share mode don't work
557 # well. Never update because working copies aren't necessary in
557 # well. Never update because working copies aren't necessary in
558 # share mode.
558 # share mode.
559 clone(
559 clone(
560 ui,
560 ui,
561 peeropts,
561 peeropts,
562 source,
562 source,
563 dest=sharepath,
563 dest=sharepath,
564 pull=True,
564 pull=True,
565 revs=rev,
565 revs=rev,
566 update=False,
566 update=False,
567 stream=stream,
567 stream=stream,
568 )
568 )
569
569
570 # Resolve the value to put in [paths] section for the source.
570 # Resolve the value to put in [paths] section for the source.
571 if islocal(source):
571 if islocal(source):
572 defaultpath = os.path.abspath(urlutil.urllocalpath(source))
572 defaultpath = os.path.abspath(urlutil.urllocalpath(source))
573 else:
573 else:
574 defaultpath = source
574 defaultpath = source
575
575
576 sharerepo = repository(ui, path=sharepath)
576 sharerepo = repository(ui, path=sharepath)
577 destrepo = share(
577 destrepo = share(
578 ui,
578 ui,
579 sharerepo,
579 sharerepo,
580 dest=dest,
580 dest=dest,
581 update=False,
581 update=False,
582 bookmarks=False,
582 bookmarks=False,
583 defaultpath=defaultpath,
583 defaultpath=defaultpath,
584 )
584 )
585
585
586 # We need to perform a pull against the dest repo to fetch bookmarks
586 # We need to perform a pull against the dest repo to fetch bookmarks
587 # and other non-store data that isn't shared by default. In the case of
587 # and other non-store data that isn't shared by default. In the case of
588 # non-existing shared repo, this means we pull from the remote twice. This
588 # non-existing shared repo, this means we pull from the remote twice. This
589 # is a bit weird. But at the time it was implemented, there wasn't an easy
589 # is a bit weird. But at the time it was implemented, there wasn't an easy
590 # way to pull just non-changegroup data.
590 # way to pull just non-changegroup data.
591 exchange.pull(destrepo, srcpeer, heads=revs)
591 exchange.pull(destrepo, srcpeer, heads=revs)
592
592
593 _postshareupdate(destrepo, update)
593 _postshareupdate(destrepo, update)
594
594
595 return srcpeer, peer(ui, peeropts, dest)
595 return srcpeer, peer(ui, peeropts, dest)
596
596
597
597
598 # Recomputing caches is often slow on big repos, so copy them.
598 # Recomputing caches is often slow on big repos, so copy them.
599 def _copycache(srcrepo, dstcachedir, fname):
599 def _copycache(srcrepo, dstcachedir, fname):
600 """copy a cache from srcrepo to destcachedir (if it exists)"""
600 """copy a cache from srcrepo to destcachedir (if it exists)"""
601 srcfname = srcrepo.cachevfs.join(fname)
601 srcfname = srcrepo.cachevfs.join(fname)
602 dstfname = os.path.join(dstcachedir, fname)
602 dstfname = os.path.join(dstcachedir, fname)
603 if os.path.exists(srcfname):
603 if os.path.exists(srcfname):
604 if not os.path.exists(dstcachedir):
604 if not os.path.exists(dstcachedir):
605 os.mkdir(dstcachedir)
605 os.mkdir(dstcachedir)
606 util.copyfile(srcfname, dstfname)
606 util.copyfile(srcfname, dstfname)
607
607
608
608
609 def clone(
609 def clone(
610 ui,
610 ui,
611 peeropts,
611 peeropts,
612 source,
612 source,
613 dest=None,
613 dest=None,
614 pull=False,
614 pull=False,
615 revs=None,
615 revs=None,
616 update=True,
616 update=True,
617 stream=False,
617 stream=False,
618 branch=None,
618 branch=None,
619 shareopts=None,
619 shareopts=None,
620 storeincludepats=None,
620 storeincludepats=None,
621 storeexcludepats=None,
621 storeexcludepats=None,
622 depth=None,
622 depth=None,
623 ):
623 ):
624 """Make a copy of an existing repository.
624 """Make a copy of an existing repository.
625
625
626 Create a copy of an existing repository in a new directory. The
626 Create a copy of an existing repository in a new directory. The
627 source and destination are URLs, as passed to the repository
627 source and destination are URLs, as passed to the repository
628 function. Returns a pair of repository peers, the source and
628 function. Returns a pair of repository peers, the source and
629 newly created destination.
629 newly created destination.
630
630
631 The location of the source is added to the new repository's
631 The location of the source is added to the new repository's
632 .hg/hgrc file, as the default to be used for future pulls and
632 .hg/hgrc file, as the default to be used for future pulls and
633 pushes.
633 pushes.
634
634
635 If an exception is raised, the partly cloned/updated destination
635 If an exception is raised, the partly cloned/updated destination
636 repository will be deleted.
636 repository will be deleted.
637
637
638 Arguments:
638 Arguments:
639
639
640 source: repository object or URL
640 source: repository object or URL
641
641
642 dest: URL of destination repository to create (defaults to base
642 dest: URL of destination repository to create (defaults to base
643 name of source repository)
643 name of source repository)
644
644
645 pull: always pull from source repository, even in local case or if the
645 pull: always pull from source repository, even in local case or if the
646 server prefers streaming
646 server prefers streaming
647
647
648 stream: stream raw data uncompressed from repository (fast over
648 stream: stream raw data uncompressed from repository (fast over
649 LAN, slow over WAN)
649 LAN, slow over WAN)
650
650
651 revs: revision to clone up to (implies pull=True)
651 revs: revision to clone up to (implies pull=True)
652
652
653 update: update working directory after clone completes, if
653 update: update working directory after clone completes, if
654 destination is local repository (True means update to default rev,
654 destination is local repository (True means update to default rev,
655 anything else is treated as a revision)
655 anything else is treated as a revision)
656
656
657 branch: branches to clone
657 branch: branches to clone
658
658
659 shareopts: dict of options to control auto sharing behavior. The "pool" key
659 shareopts: dict of options to control auto sharing behavior. The "pool" key
660 activates auto sharing mode and defines the directory for stores. The
660 activates auto sharing mode and defines the directory for stores. The
661 "mode" key determines how to construct the directory name of the shared
661 "mode" key determines how to construct the directory name of the shared
662 repository. "identity" means the name is derived from the node of the first
662 repository. "identity" means the name is derived from the node of the first
663 changeset in the repository. "remote" means the name is derived from the
663 changeset in the repository. "remote" means the name is derived from the
664 remote's path/URL. Defaults to "identity."
664 remote's path/URL. Defaults to "identity."
665
665
666 storeincludepats and storeexcludepats: sets of file patterns to include and
666 storeincludepats and storeexcludepats: sets of file patterns to include and
667 exclude in the repository copy, respectively. If not defined, all files
667 exclude in the repository copy, respectively. If not defined, all files
668 will be included (a "full" clone). Otherwise a "narrow" clone containing
668 will be included (a "full" clone). Otherwise a "narrow" clone containing
669 only the requested files will be performed. If ``storeincludepats`` is not
669 only the requested files will be performed. If ``storeincludepats`` is not
670 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
670 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
671 ``path:.``. If both are empty sets, no files will be cloned.
671 ``path:.``. If both are empty sets, no files will be cloned.
672 """
672 """
673
673
674 if isinstance(source, bytes):
674 if isinstance(source, bytes):
675 src = urlutil.get_clone_path(ui, source, branch)
675 src = urlutil.get_clone_path(ui, source, branch)
676 origsource, source, branches = src
676 origsource, source, branches = src
677 srcpeer = peer(ui, peeropts, source)
677 srcpeer = peer(ui, peeropts, source)
678 else:
678 else:
679 srcpeer = source.peer() # in case we were called with a localrepo
679 srcpeer = source.peer() # in case we were called with a localrepo
680 branches = (None, branch or [])
680 branches = (None, branch or [])
681 origsource = source = srcpeer.url()
681 origsource = source = srcpeer.url()
682 srclock = destlock = cleandir = None
682 srclock = destlock = cleandir = None
683 destpeer = None
683 destpeer = None
684 try:
684 try:
685 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
685 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
686
686
687 if dest is None:
687 if dest is None:
688 dest = defaultdest(source)
688 dest = defaultdest(source)
689 if dest:
689 if dest:
690 ui.status(_(b"destination directory: %s\n") % dest)
690 ui.status(_(b"destination directory: %s\n") % dest)
691 else:
691 else:
692 dest = ui.expandpath(dest)
692 dest = urlutil.get_clone_path(ui, dest)[0]
693
693
694 dest = urlutil.urllocalpath(dest)
694 dest = urlutil.urllocalpath(dest)
695 source = urlutil.urllocalpath(source)
695 source = urlutil.urllocalpath(source)
696
696
697 if not dest:
697 if not dest:
698 raise error.InputError(_(b"empty destination path is not valid"))
698 raise error.InputError(_(b"empty destination path is not valid"))
699
699
700 destvfs = vfsmod.vfs(dest, expandpath=True)
700 destvfs = vfsmod.vfs(dest, expandpath=True)
701 if destvfs.lexists():
701 if destvfs.lexists():
702 if not destvfs.isdir():
702 if not destvfs.isdir():
703 raise error.InputError(
703 raise error.InputError(
704 _(b"destination '%s' already exists") % dest
704 _(b"destination '%s' already exists") % dest
705 )
705 )
706 elif destvfs.listdir():
706 elif destvfs.listdir():
707 raise error.InputError(
707 raise error.InputError(
708 _(b"destination '%s' is not empty") % dest
708 _(b"destination '%s' is not empty") % dest
709 )
709 )
710
710
711 createopts = {}
711 createopts = {}
712 narrow = False
712 narrow = False
713
713
714 if storeincludepats is not None:
714 if storeincludepats is not None:
715 narrowspec.validatepatterns(storeincludepats)
715 narrowspec.validatepatterns(storeincludepats)
716 narrow = True
716 narrow = True
717
717
718 if storeexcludepats is not None:
718 if storeexcludepats is not None:
719 narrowspec.validatepatterns(storeexcludepats)
719 narrowspec.validatepatterns(storeexcludepats)
720 narrow = True
720 narrow = True
721
721
722 if narrow:
722 if narrow:
723 # Include everything by default if only exclusion patterns defined.
723 # Include everything by default if only exclusion patterns defined.
724 if storeexcludepats and not storeincludepats:
724 if storeexcludepats and not storeincludepats:
725 storeincludepats = {b'path:.'}
725 storeincludepats = {b'path:.'}
726
726
727 createopts[b'narrowfiles'] = True
727 createopts[b'narrowfiles'] = True
728
728
729 if depth:
729 if depth:
730 createopts[b'shallowfilestore'] = True
730 createopts[b'shallowfilestore'] = True
731
731
732 if srcpeer.capable(b'lfs-serve'):
732 if srcpeer.capable(b'lfs-serve'):
733 # Repository creation honors the config if it disabled the extension, so
733 # Repository creation honors the config if it disabled the extension, so
734 # we can't just announce that lfs will be enabled. This check avoids
734 # we can't just announce that lfs will be enabled. This check avoids
735 # saying that lfs will be enabled, and then saying it's an unknown
735 # saying that lfs will be enabled, and then saying it's an unknown
736 # feature. The lfs creation option is set in either case so that a
736 # feature. The lfs creation option is set in either case so that a
737 # requirement is added. If the extension is explicitly disabled but the
737 # requirement is added. If the extension is explicitly disabled but the
738 # requirement is set, the clone aborts early, before transferring any
738 # requirement is set, the clone aborts early, before transferring any
739 # data.
739 # data.
740 createopts[b'lfs'] = True
740 createopts[b'lfs'] = True
741
741
742 if extensions.disabled_help(b'lfs'):
742 if extensions.disabled_help(b'lfs'):
743 ui.status(
743 ui.status(
744 _(
744 _(
745 b'(remote is using large file support (lfs), but it is '
745 b'(remote is using large file support (lfs), but it is '
746 b'explicitly disabled in the local configuration)\n'
746 b'explicitly disabled in the local configuration)\n'
747 )
747 )
748 )
748 )
749 else:
749 else:
750 ui.status(
750 ui.status(
751 _(
751 _(
752 b'(remote is using large file support (lfs); lfs will '
752 b'(remote is using large file support (lfs); lfs will '
753 b'be enabled for this repository)\n'
753 b'be enabled for this repository)\n'
754 )
754 )
755 )
755 )
756
756
757 shareopts = shareopts or {}
757 shareopts = shareopts or {}
758 sharepool = shareopts.get(b'pool')
758 sharepool = shareopts.get(b'pool')
759 sharenamemode = shareopts.get(b'mode')
759 sharenamemode = shareopts.get(b'mode')
760 if sharepool and islocal(dest):
760 if sharepool and islocal(dest):
761 sharepath = None
761 sharepath = None
762 if sharenamemode == b'identity':
762 if sharenamemode == b'identity':
763 # Resolve the name from the initial changeset in the remote
763 # Resolve the name from the initial changeset in the remote
764 # repository. This returns nullid when the remote is empty. It
764 # repository. This returns nullid when the remote is empty. It
765 # raises RepoLookupError if revision 0 is filtered or otherwise
765 # raises RepoLookupError if revision 0 is filtered or otherwise
766 # not available. If we fail to resolve, sharing is not enabled.
766 # not available. If we fail to resolve, sharing is not enabled.
767 try:
767 try:
768 with srcpeer.commandexecutor() as e:
768 with srcpeer.commandexecutor() as e:
769 rootnode = e.callcommand(
769 rootnode = e.callcommand(
770 b'lookup',
770 b'lookup',
771 {
771 {
772 b'key': b'0',
772 b'key': b'0',
773 },
773 },
774 ).result()
774 ).result()
775
775
776 if rootnode != nullid:
776 if rootnode != nullid:
777 sharepath = os.path.join(sharepool, hex(rootnode))
777 sharepath = os.path.join(sharepool, hex(rootnode))
778 else:
778 else:
779 ui.status(
779 ui.status(
780 _(
780 _(
781 b'(not using pooled storage: '
781 b'(not using pooled storage: '
782 b'remote appears to be empty)\n'
782 b'remote appears to be empty)\n'
783 )
783 )
784 )
784 )
785 except error.RepoLookupError:
785 except error.RepoLookupError:
786 ui.status(
786 ui.status(
787 _(
787 _(
788 b'(not using pooled storage: '
788 b'(not using pooled storage: '
789 b'unable to resolve identity of remote)\n'
789 b'unable to resolve identity of remote)\n'
790 )
790 )
791 )
791 )
792 elif sharenamemode == b'remote':
792 elif sharenamemode == b'remote':
793 sharepath = os.path.join(
793 sharepath = os.path.join(
794 sharepool, hex(hashutil.sha1(source).digest())
794 sharepool, hex(hashutil.sha1(source).digest())
795 )
795 )
796 else:
796 else:
797 raise error.Abort(
797 raise error.Abort(
798 _(b'unknown share naming mode: %s') % sharenamemode
798 _(b'unknown share naming mode: %s') % sharenamemode
799 )
799 )
800
800
801 # TODO this is a somewhat arbitrary restriction.
801 # TODO this is a somewhat arbitrary restriction.
802 if narrow:
802 if narrow:
803 ui.status(
803 ui.status(
804 _(b'(pooled storage not supported for narrow clones)\n')
804 _(b'(pooled storage not supported for narrow clones)\n')
805 )
805 )
806 sharepath = None
806 sharepath = None
807
807
808 if sharepath:
808 if sharepath:
809 return clonewithshare(
809 return clonewithshare(
810 ui,
810 ui,
811 peeropts,
811 peeropts,
812 sharepath,
812 sharepath,
813 source,
813 source,
814 srcpeer,
814 srcpeer,
815 dest,
815 dest,
816 pull=pull,
816 pull=pull,
817 rev=revs,
817 rev=revs,
818 update=update,
818 update=update,
819 stream=stream,
819 stream=stream,
820 )
820 )
821
821
822 srcrepo = srcpeer.local()
822 srcrepo = srcpeer.local()
823
823
824 abspath = origsource
824 abspath = origsource
825 if islocal(origsource):
825 if islocal(origsource):
826 abspath = os.path.abspath(urlutil.urllocalpath(origsource))
826 abspath = os.path.abspath(urlutil.urllocalpath(origsource))
827
827
828 if islocal(dest):
828 if islocal(dest):
829 cleandir = dest
829 cleandir = dest
830
830
831 copy = False
831 copy = False
832 if (
832 if (
833 srcrepo
833 srcrepo
834 and srcrepo.cancopy()
834 and srcrepo.cancopy()
835 and islocal(dest)
835 and islocal(dest)
836 and not phases.hassecret(srcrepo)
836 and not phases.hassecret(srcrepo)
837 ):
837 ):
838 copy = not pull and not revs
838 copy = not pull and not revs
839
839
840 # TODO this is a somewhat arbitrary restriction.
840 # TODO this is a somewhat arbitrary restriction.
841 if narrow:
841 if narrow:
842 copy = False
842 copy = False
843
843
844 if copy:
844 if copy:
845 try:
845 try:
846 # we use a lock here because if we race with commit, we
846 # we use a lock here because if we race with commit, we
847 # can end up with extra data in the cloned revlogs that's
847 # can end up with extra data in the cloned revlogs that's
848 # not pointed to by changesets, thus causing verify to
848 # not pointed to by changesets, thus causing verify to
849 # fail
849 # fail
850 srclock = srcrepo.lock(wait=False)
850 srclock = srcrepo.lock(wait=False)
851 except error.LockError:
851 except error.LockError:
852 copy = False
852 copy = False
853
853
854 if copy:
854 if copy:
855 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
855 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
856 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
856 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
857 if not os.path.exists(dest):
857 if not os.path.exists(dest):
858 util.makedirs(dest)
858 util.makedirs(dest)
859 else:
859 else:
860 # only clean up directories we create ourselves
860 # only clean up directories we create ourselves
861 cleandir = hgdir
861 cleandir = hgdir
862 try:
862 try:
863 destpath = hgdir
863 destpath = hgdir
864 util.makedir(destpath, notindexed=True)
864 util.makedir(destpath, notindexed=True)
865 except OSError as inst:
865 except OSError as inst:
866 if inst.errno == errno.EEXIST:
866 if inst.errno == errno.EEXIST:
867 cleandir = None
867 cleandir = None
868 raise error.Abort(
868 raise error.Abort(
869 _(b"destination '%s' already exists") % dest
869 _(b"destination '%s' already exists") % dest
870 )
870 )
871 raise
871 raise
872
872
873 destlock = copystore(ui, srcrepo, destpath)
873 destlock = copystore(ui, srcrepo, destpath)
874 # copy bookmarks over
874 # copy bookmarks over
875 srcbookmarks = srcrepo.vfs.join(b'bookmarks')
875 srcbookmarks = srcrepo.vfs.join(b'bookmarks')
876 dstbookmarks = os.path.join(destpath, b'bookmarks')
876 dstbookmarks = os.path.join(destpath, b'bookmarks')
877 if os.path.exists(srcbookmarks):
877 if os.path.exists(srcbookmarks):
878 util.copyfile(srcbookmarks, dstbookmarks)
878 util.copyfile(srcbookmarks, dstbookmarks)
879
879
880 dstcachedir = os.path.join(destpath, b'cache')
880 dstcachedir = os.path.join(destpath, b'cache')
881 for cache in cacheutil.cachetocopy(srcrepo):
881 for cache in cacheutil.cachetocopy(srcrepo):
882 _copycache(srcrepo, dstcachedir, cache)
882 _copycache(srcrepo, dstcachedir, cache)
883
883
884 # we need to re-init the repo after manually copying the data
884 # we need to re-init the repo after manually copying the data
885 # into it
885 # into it
886 destpeer = peer(srcrepo, peeropts, dest)
886 destpeer = peer(srcrepo, peeropts, dest)
887 srcrepo.hook(b'outgoing', source=b'clone', node=nullhex)
887 srcrepo.hook(b'outgoing', source=b'clone', node=nullhex)
888 else:
888 else:
889 try:
889 try:
890 # only pass ui when no srcrepo
890 # only pass ui when no srcrepo
891 destpeer = peer(
891 destpeer = peer(
892 srcrepo or ui,
892 srcrepo or ui,
893 peeropts,
893 peeropts,
894 dest,
894 dest,
895 create=True,
895 create=True,
896 createopts=createopts,
896 createopts=createopts,
897 )
897 )
898 except OSError as inst:
898 except OSError as inst:
899 if inst.errno == errno.EEXIST:
899 if inst.errno == errno.EEXIST:
900 cleandir = None
900 cleandir = None
901 raise error.Abort(
901 raise error.Abort(
902 _(b"destination '%s' already exists") % dest
902 _(b"destination '%s' already exists") % dest
903 )
903 )
904 raise
904 raise
905
905
906 if revs:
906 if revs:
907 if not srcpeer.capable(b'lookup'):
907 if not srcpeer.capable(b'lookup'):
908 raise error.Abort(
908 raise error.Abort(
909 _(
909 _(
910 b"src repository does not support "
910 b"src repository does not support "
911 b"revision lookup and so doesn't "
911 b"revision lookup and so doesn't "
912 b"support clone by revision"
912 b"support clone by revision"
913 )
913 )
914 )
914 )
915
915
916 # TODO this is batchable.
916 # TODO this is batchable.
917 remoterevs = []
917 remoterevs = []
918 for rev in revs:
918 for rev in revs:
919 with srcpeer.commandexecutor() as e:
919 with srcpeer.commandexecutor() as e:
920 remoterevs.append(
920 remoterevs.append(
921 e.callcommand(
921 e.callcommand(
922 b'lookup',
922 b'lookup',
923 {
923 {
924 b'key': rev,
924 b'key': rev,
925 },
925 },
926 ).result()
926 ).result()
927 )
927 )
928 revs = remoterevs
928 revs = remoterevs
929
929
930 checkout = revs[0]
930 checkout = revs[0]
931 else:
931 else:
932 revs = None
932 revs = None
933 local = destpeer.local()
933 local = destpeer.local()
934 if local:
934 if local:
935 if narrow:
935 if narrow:
936 with local.wlock(), local.lock():
936 with local.wlock(), local.lock():
937 local.setnarrowpats(storeincludepats, storeexcludepats)
937 local.setnarrowpats(storeincludepats, storeexcludepats)
938 narrowspec.copytoworkingcopy(local)
938 narrowspec.copytoworkingcopy(local)
939
939
940 u = urlutil.url(abspath)
940 u = urlutil.url(abspath)
941 defaulturl = bytes(u)
941 defaulturl = bytes(u)
942 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
942 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
943 if not stream:
943 if not stream:
944 if pull:
944 if pull:
945 stream = False
945 stream = False
946 else:
946 else:
947 stream = None
947 stream = None
948 # internal config: ui.quietbookmarkmove
948 # internal config: ui.quietbookmarkmove
949 overrides = {(b'ui', b'quietbookmarkmove'): True}
949 overrides = {(b'ui', b'quietbookmarkmove'): True}
950 with local.ui.configoverride(overrides, b'clone'):
950 with local.ui.configoverride(overrides, b'clone'):
951 exchange.pull(
951 exchange.pull(
952 local,
952 local,
953 srcpeer,
953 srcpeer,
954 revs,
954 revs,
955 streamclonerequested=stream,
955 streamclonerequested=stream,
956 includepats=storeincludepats,
956 includepats=storeincludepats,
957 excludepats=storeexcludepats,
957 excludepats=storeexcludepats,
958 depth=depth,
958 depth=depth,
959 )
959 )
960 elif srcrepo:
960 elif srcrepo:
961 # TODO lift restriction once exchange.push() accepts narrow
961 # TODO lift restriction once exchange.push() accepts narrow
962 # push.
962 # push.
963 if narrow:
963 if narrow:
964 raise error.Abort(
964 raise error.Abort(
965 _(
965 _(
966 b'narrow clone not available for '
966 b'narrow clone not available for '
967 b'remote destinations'
967 b'remote destinations'
968 )
968 )
969 )
969 )
970
970
971 exchange.push(
971 exchange.push(
972 srcrepo,
972 srcrepo,
973 destpeer,
973 destpeer,
974 revs=revs,
974 revs=revs,
975 bookmarks=srcrepo._bookmarks.keys(),
975 bookmarks=srcrepo._bookmarks.keys(),
976 )
976 )
977 else:
977 else:
978 raise error.Abort(
978 raise error.Abort(
979 _(b"clone from remote to remote not supported")
979 _(b"clone from remote to remote not supported")
980 )
980 )
981
981
982 cleandir = None
982 cleandir = None
983
983
984 destrepo = destpeer.local()
984 destrepo = destpeer.local()
985 if destrepo:
985 if destrepo:
986 template = uimod.samplehgrcs[b'cloned']
986 template = uimod.samplehgrcs[b'cloned']
987 u = urlutil.url(abspath)
987 u = urlutil.url(abspath)
988 u.passwd = None
988 u.passwd = None
989 defaulturl = bytes(u)
989 defaulturl = bytes(u)
990 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
990 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
991 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
991 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
992
992
993 if ui.configbool(b'experimental', b'remotenames'):
993 if ui.configbool(b'experimental', b'remotenames'):
994 logexchange.pullremotenames(destrepo, srcpeer)
994 logexchange.pullremotenames(destrepo, srcpeer)
995
995
996 if update:
996 if update:
997 if update is not True:
997 if update is not True:
998 with srcpeer.commandexecutor() as e:
998 with srcpeer.commandexecutor() as e:
999 checkout = e.callcommand(
999 checkout = e.callcommand(
1000 b'lookup',
1000 b'lookup',
1001 {
1001 {
1002 b'key': update,
1002 b'key': update,
1003 },
1003 },
1004 ).result()
1004 ).result()
1005
1005
1006 uprev = None
1006 uprev = None
1007 status = None
1007 status = None
1008 if checkout is not None:
1008 if checkout is not None:
1009 # Some extensions (at least hg-git and hg-subversion) have
1009 # Some extensions (at least hg-git and hg-subversion) have
1010 # a peer.lookup() implementation that returns a name instead
1010 # a peer.lookup() implementation that returns a name instead
1011 # of a nodeid. We work around it here until we've figured
1011 # of a nodeid. We work around it here until we've figured
1012 # out a better solution.
1012 # out a better solution.
1013 if len(checkout) == 20 and checkout in destrepo:
1013 if len(checkout) == 20 and checkout in destrepo:
1014 uprev = checkout
1014 uprev = checkout
1015 elif scmutil.isrevsymbol(destrepo, checkout):
1015 elif scmutil.isrevsymbol(destrepo, checkout):
1016 uprev = scmutil.revsymbol(destrepo, checkout).node()
1016 uprev = scmutil.revsymbol(destrepo, checkout).node()
1017 else:
1017 else:
1018 if update is not True:
1018 if update is not True:
1019 try:
1019 try:
1020 uprev = destrepo.lookup(update)
1020 uprev = destrepo.lookup(update)
1021 except error.RepoLookupError:
1021 except error.RepoLookupError:
1022 pass
1022 pass
1023 if uprev is None:
1023 if uprev is None:
1024 try:
1024 try:
1025 if destrepo._activebookmark:
1025 if destrepo._activebookmark:
1026 uprev = destrepo.lookup(destrepo._activebookmark)
1026 uprev = destrepo.lookup(destrepo._activebookmark)
1027 update = destrepo._activebookmark
1027 update = destrepo._activebookmark
1028 else:
1028 else:
1029 uprev = destrepo._bookmarks[b'@']
1029 uprev = destrepo._bookmarks[b'@']
1030 update = b'@'
1030 update = b'@'
1031 bn = destrepo[uprev].branch()
1031 bn = destrepo[uprev].branch()
1032 if bn == b'default':
1032 if bn == b'default':
1033 status = _(b"updating to bookmark %s\n" % update)
1033 status = _(b"updating to bookmark %s\n" % update)
1034 else:
1034 else:
1035 status = (
1035 status = (
1036 _(b"updating to bookmark %s on branch %s\n")
1036 _(b"updating to bookmark %s on branch %s\n")
1037 ) % (update, bn)
1037 ) % (update, bn)
1038 except KeyError:
1038 except KeyError:
1039 try:
1039 try:
1040 uprev = destrepo.branchtip(b'default')
1040 uprev = destrepo.branchtip(b'default')
1041 except error.RepoLookupError:
1041 except error.RepoLookupError:
1042 uprev = destrepo.lookup(b'tip')
1042 uprev = destrepo.lookup(b'tip')
1043 if not status:
1043 if not status:
1044 bn = destrepo[uprev].branch()
1044 bn = destrepo[uprev].branch()
1045 status = _(b"updating to branch %s\n") % bn
1045 status = _(b"updating to branch %s\n") % bn
1046 destrepo.ui.status(status)
1046 destrepo.ui.status(status)
1047 _update(destrepo, uprev)
1047 _update(destrepo, uprev)
1048 if update in destrepo._bookmarks:
1048 if update in destrepo._bookmarks:
1049 bookmarks.activate(destrepo, update)
1049 bookmarks.activate(destrepo, update)
1050 if destlock is not None:
1050 if destlock is not None:
1051 release(destlock)
1051 release(destlock)
1052 # here is a tiny windows were someone could end up writing the
1052 # here is a tiny windows were someone could end up writing the
1053 # repository before the cache are sure to be warm. This is "fine"
1053 # repository before the cache are sure to be warm. This is "fine"
1054 # as the only "bad" outcome would be some slowness. That potential
1054 # as the only "bad" outcome would be some slowness. That potential
1055 # slowness already affect reader.
1055 # slowness already affect reader.
1056 with destrepo.lock():
1056 with destrepo.lock():
1057 destrepo.updatecaches(full=True)
1057 destrepo.updatecaches(full=True)
1058 finally:
1058 finally:
1059 release(srclock, destlock)
1059 release(srclock, destlock)
1060 if cleandir is not None:
1060 if cleandir is not None:
1061 shutil.rmtree(cleandir, True)
1061 shutil.rmtree(cleandir, True)
1062 if srcpeer is not None:
1062 if srcpeer is not None:
1063 srcpeer.close()
1063 srcpeer.close()
1064 if destpeer and destpeer.local() is None:
1064 if destpeer and destpeer.local() is None:
1065 destpeer.close()
1065 destpeer.close()
1066 return srcpeer, destpeer
1066 return srcpeer, destpeer
1067
1067
1068
1068
1069 def _showstats(repo, stats, quietempty=False):
1069 def _showstats(repo, stats, quietempty=False):
1070 if quietempty and stats.isempty():
1070 if quietempty and stats.isempty():
1071 return
1071 return
1072 repo.ui.status(
1072 repo.ui.status(
1073 _(
1073 _(
1074 b"%d files updated, %d files merged, "
1074 b"%d files updated, %d files merged, "
1075 b"%d files removed, %d files unresolved\n"
1075 b"%d files removed, %d files unresolved\n"
1076 )
1076 )
1077 % (
1077 % (
1078 stats.updatedcount,
1078 stats.updatedcount,
1079 stats.mergedcount,
1079 stats.mergedcount,
1080 stats.removedcount,
1080 stats.removedcount,
1081 stats.unresolvedcount,
1081 stats.unresolvedcount,
1082 )
1082 )
1083 )
1083 )
1084
1084
1085
1085
1086 def updaterepo(repo, node, overwrite, updatecheck=None):
1086 def updaterepo(repo, node, overwrite, updatecheck=None):
1087 """Update the working directory to node.
1087 """Update the working directory to node.
1088
1088
1089 When overwrite is set, changes are clobbered, merged else
1089 When overwrite is set, changes are clobbered, merged else
1090
1090
1091 returns stats (see pydoc mercurial.merge.applyupdates)"""
1091 returns stats (see pydoc mercurial.merge.applyupdates)"""
1092 repo.ui.deprecwarn(
1092 repo.ui.deprecwarn(
1093 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1093 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1094 b'5.7',
1094 b'5.7',
1095 )
1095 )
1096 return mergemod._update(
1096 return mergemod._update(
1097 repo,
1097 repo,
1098 node,
1098 node,
1099 branchmerge=False,
1099 branchmerge=False,
1100 force=overwrite,
1100 force=overwrite,
1101 labels=[b'working copy', b'destination'],
1101 labels=[b'working copy', b'destination'],
1102 updatecheck=updatecheck,
1102 updatecheck=updatecheck,
1103 )
1103 )
1104
1104
1105
1105
1106 def update(repo, node, quietempty=False, updatecheck=None):
1106 def update(repo, node, quietempty=False, updatecheck=None):
1107 """update the working directory to node"""
1107 """update the working directory to node"""
1108 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1108 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1109 _showstats(repo, stats, quietempty)
1109 _showstats(repo, stats, quietempty)
1110 if stats.unresolvedcount:
1110 if stats.unresolvedcount:
1111 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1111 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1112 return stats.unresolvedcount > 0
1112 return stats.unresolvedcount > 0
1113
1113
1114
1114
1115 # naming conflict in clone()
1115 # naming conflict in clone()
1116 _update = update
1116 _update = update
1117
1117
1118
1118
1119 def clean(repo, node, show_stats=True, quietempty=False):
1119 def clean(repo, node, show_stats=True, quietempty=False):
1120 """forcibly switch the working directory to node, clobbering changes"""
1120 """forcibly switch the working directory to node, clobbering changes"""
1121 stats = mergemod.clean_update(repo[node])
1121 stats = mergemod.clean_update(repo[node])
1122 assert stats.unresolvedcount == 0
1122 assert stats.unresolvedcount == 0
1123 if show_stats:
1123 if show_stats:
1124 _showstats(repo, stats, quietempty)
1124 _showstats(repo, stats, quietempty)
1125 return False
1125 return False
1126
1126
1127
1127
1128 # naming conflict in updatetotally()
1128 # naming conflict in updatetotally()
1129 _clean = clean
1129 _clean = clean
1130
1130
1131 _VALID_UPDATECHECKS = {
1131 _VALID_UPDATECHECKS = {
1132 mergemod.UPDATECHECK_ABORT,
1132 mergemod.UPDATECHECK_ABORT,
1133 mergemod.UPDATECHECK_NONE,
1133 mergemod.UPDATECHECK_NONE,
1134 mergemod.UPDATECHECK_LINEAR,
1134 mergemod.UPDATECHECK_LINEAR,
1135 mergemod.UPDATECHECK_NO_CONFLICT,
1135 mergemod.UPDATECHECK_NO_CONFLICT,
1136 }
1136 }
1137
1137
1138
1138
1139 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1139 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1140 """Update the working directory with extra care for non-file components
1140 """Update the working directory with extra care for non-file components
1141
1141
1142 This takes care of non-file components below:
1142 This takes care of non-file components below:
1143
1143
1144 :bookmark: might be advanced or (in)activated
1144 :bookmark: might be advanced or (in)activated
1145
1145
1146 This takes arguments below:
1146 This takes arguments below:
1147
1147
1148 :checkout: to which revision the working directory is updated
1148 :checkout: to which revision the working directory is updated
1149 :brev: a name, which might be a bookmark to be activated after updating
1149 :brev: a name, which might be a bookmark to be activated after updating
1150 :clean: whether changes in the working directory can be discarded
1150 :clean: whether changes in the working directory can be discarded
1151 :updatecheck: how to deal with a dirty working directory
1151 :updatecheck: how to deal with a dirty working directory
1152
1152
1153 Valid values for updatecheck are the UPDATECHECK_* constants
1153 Valid values for updatecheck are the UPDATECHECK_* constants
1154 defined in the merge module. Passing `None` will result in using the
1154 defined in the merge module. Passing `None` will result in using the
1155 configured default.
1155 configured default.
1156
1156
1157 * ABORT: abort if the working directory is dirty
1157 * ABORT: abort if the working directory is dirty
1158 * NONE: don't check (merge working directory changes into destination)
1158 * NONE: don't check (merge working directory changes into destination)
1159 * LINEAR: check that update is linear before merging working directory
1159 * LINEAR: check that update is linear before merging working directory
1160 changes into destination
1160 changes into destination
1161 * NO_CONFLICT: check that the update does not result in file merges
1161 * NO_CONFLICT: check that the update does not result in file merges
1162
1162
1163 This returns whether conflict is detected at updating or not.
1163 This returns whether conflict is detected at updating or not.
1164 """
1164 """
1165 if updatecheck is None:
1165 if updatecheck is None:
1166 updatecheck = ui.config(b'commands', b'update.check')
1166 updatecheck = ui.config(b'commands', b'update.check')
1167 if updatecheck not in _VALID_UPDATECHECKS:
1167 if updatecheck not in _VALID_UPDATECHECKS:
1168 # If not configured, or invalid value configured
1168 # If not configured, or invalid value configured
1169 updatecheck = mergemod.UPDATECHECK_LINEAR
1169 updatecheck = mergemod.UPDATECHECK_LINEAR
1170 if updatecheck not in _VALID_UPDATECHECKS:
1170 if updatecheck not in _VALID_UPDATECHECKS:
1171 raise ValueError(
1171 raise ValueError(
1172 r'Invalid updatecheck value %r (can accept %r)'
1172 r'Invalid updatecheck value %r (can accept %r)'
1173 % (updatecheck, _VALID_UPDATECHECKS)
1173 % (updatecheck, _VALID_UPDATECHECKS)
1174 )
1174 )
1175 with repo.wlock():
1175 with repo.wlock():
1176 movemarkfrom = None
1176 movemarkfrom = None
1177 warndest = False
1177 warndest = False
1178 if checkout is None:
1178 if checkout is None:
1179 updata = destutil.destupdate(repo, clean=clean)
1179 updata = destutil.destupdate(repo, clean=clean)
1180 checkout, movemarkfrom, brev = updata
1180 checkout, movemarkfrom, brev = updata
1181 warndest = True
1181 warndest = True
1182
1182
1183 if clean:
1183 if clean:
1184 ret = _clean(repo, checkout)
1184 ret = _clean(repo, checkout)
1185 else:
1185 else:
1186 if updatecheck == mergemod.UPDATECHECK_ABORT:
1186 if updatecheck == mergemod.UPDATECHECK_ABORT:
1187 cmdutil.bailifchanged(repo, merge=False)
1187 cmdutil.bailifchanged(repo, merge=False)
1188 updatecheck = mergemod.UPDATECHECK_NONE
1188 updatecheck = mergemod.UPDATECHECK_NONE
1189 ret = _update(repo, checkout, updatecheck=updatecheck)
1189 ret = _update(repo, checkout, updatecheck=updatecheck)
1190
1190
1191 if not ret and movemarkfrom:
1191 if not ret and movemarkfrom:
1192 if movemarkfrom == repo[b'.'].node():
1192 if movemarkfrom == repo[b'.'].node():
1193 pass # no-op update
1193 pass # no-op update
1194 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1194 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1195 b = ui.label(repo._activebookmark, b'bookmarks.active')
1195 b = ui.label(repo._activebookmark, b'bookmarks.active')
1196 ui.status(_(b"updating bookmark %s\n") % b)
1196 ui.status(_(b"updating bookmark %s\n") % b)
1197 else:
1197 else:
1198 # this can happen with a non-linear update
1198 # this can happen with a non-linear update
1199 b = ui.label(repo._activebookmark, b'bookmarks')
1199 b = ui.label(repo._activebookmark, b'bookmarks')
1200 ui.status(_(b"(leaving bookmark %s)\n") % b)
1200 ui.status(_(b"(leaving bookmark %s)\n") % b)
1201 bookmarks.deactivate(repo)
1201 bookmarks.deactivate(repo)
1202 elif brev in repo._bookmarks:
1202 elif brev in repo._bookmarks:
1203 if brev != repo._activebookmark:
1203 if brev != repo._activebookmark:
1204 b = ui.label(brev, b'bookmarks.active')
1204 b = ui.label(brev, b'bookmarks.active')
1205 ui.status(_(b"(activating bookmark %s)\n") % b)
1205 ui.status(_(b"(activating bookmark %s)\n") % b)
1206 bookmarks.activate(repo, brev)
1206 bookmarks.activate(repo, brev)
1207 elif brev:
1207 elif brev:
1208 if repo._activebookmark:
1208 if repo._activebookmark:
1209 b = ui.label(repo._activebookmark, b'bookmarks')
1209 b = ui.label(repo._activebookmark, b'bookmarks')
1210 ui.status(_(b"(leaving bookmark %s)\n") % b)
1210 ui.status(_(b"(leaving bookmark %s)\n") % b)
1211 bookmarks.deactivate(repo)
1211 bookmarks.deactivate(repo)
1212
1212
1213 if warndest:
1213 if warndest:
1214 destutil.statusotherdests(ui, repo)
1214 destutil.statusotherdests(ui, repo)
1215
1215
1216 return ret
1216 return ret
1217
1217
1218
1218
1219 def merge(
1219 def merge(
1220 ctx,
1220 ctx,
1221 force=False,
1221 force=False,
1222 remind=True,
1222 remind=True,
1223 labels=None,
1223 labels=None,
1224 ):
1224 ):
1225 """Branch merge with node, resolving changes. Return true if any
1225 """Branch merge with node, resolving changes. Return true if any
1226 unresolved conflicts."""
1226 unresolved conflicts."""
1227 repo = ctx.repo()
1227 repo = ctx.repo()
1228 stats = mergemod.merge(ctx, force=force, labels=labels)
1228 stats = mergemod.merge(ctx, force=force, labels=labels)
1229 _showstats(repo, stats)
1229 _showstats(repo, stats)
1230 if stats.unresolvedcount:
1230 if stats.unresolvedcount:
1231 repo.ui.status(
1231 repo.ui.status(
1232 _(
1232 _(
1233 b"use 'hg resolve' to retry unresolved file merges "
1233 b"use 'hg resolve' to retry unresolved file merges "
1234 b"or 'hg merge --abort' to abandon\n"
1234 b"or 'hg merge --abort' to abandon\n"
1235 )
1235 )
1236 )
1236 )
1237 elif remind:
1237 elif remind:
1238 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1238 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1239 return stats.unresolvedcount > 0
1239 return stats.unresolvedcount > 0
1240
1240
1241
1241
1242 def abortmerge(ui, repo):
1242 def abortmerge(ui, repo):
1243 ms = mergestatemod.mergestate.read(repo)
1243 ms = mergestatemod.mergestate.read(repo)
1244 if ms.active():
1244 if ms.active():
1245 # there were conflicts
1245 # there were conflicts
1246 node = ms.localctx.hex()
1246 node = ms.localctx.hex()
1247 else:
1247 else:
1248 # there were no conficts, mergestate was not stored
1248 # there were no conficts, mergestate was not stored
1249 node = repo[b'.'].hex()
1249 node = repo[b'.'].hex()
1250
1250
1251 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1251 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1252 stats = mergemod.clean_update(repo[node])
1252 stats = mergemod.clean_update(repo[node])
1253 assert stats.unresolvedcount == 0
1253 assert stats.unresolvedcount == 0
1254 _showstats(repo, stats)
1254 _showstats(repo, stats)
1255
1255
1256
1256
1257 def _incoming(
1257 def _incoming(
1258 displaychlist,
1258 displaychlist,
1259 subreporecurse,
1259 subreporecurse,
1260 ui,
1260 ui,
1261 repo,
1261 repo,
1262 source,
1262 source,
1263 opts,
1263 opts,
1264 buffered=False,
1264 buffered=False,
1265 subpath=None,
1265 subpath=None,
1266 ):
1266 ):
1267 """
1267 """
1268 Helper for incoming / gincoming.
1268 Helper for incoming / gincoming.
1269 displaychlist gets called with
1269 displaychlist gets called with
1270 (remoterepo, incomingchangesetlist, displayer) parameters,
1270 (remoterepo, incomingchangesetlist, displayer) parameters,
1271 and is supposed to contain only code that can't be unified.
1271 and is supposed to contain only code that can't be unified.
1272 """
1272 """
1273 srcs = urlutil.get_pull_paths(repo, ui, [source], opts.get(b'branch'))
1273 srcs = urlutil.get_pull_paths(repo, ui, [source], opts.get(b'branch'))
1274 srcs = list(srcs)
1274 srcs = list(srcs)
1275 if len(srcs) != 1:
1275 if len(srcs) != 1:
1276 msg = _('for now, incoming supports only a single source, %d provided')
1276 msg = _('for now, incoming supports only a single source, %d provided')
1277 msg %= len(srcs)
1277 msg %= len(srcs)
1278 raise error.Abort(msg)
1278 raise error.Abort(msg)
1279 source, branches = srcs[0]
1279 source, branches = srcs[0]
1280 if subpath is not None:
1280 if subpath is not None:
1281 subpath = urlutil.url(subpath)
1281 subpath = urlutil.url(subpath)
1282 if subpath.isabs():
1282 if subpath.isabs():
1283 source = bytes(subpath)
1283 source = bytes(subpath)
1284 else:
1284 else:
1285 p = urlutil.url(source)
1285 p = urlutil.url(source)
1286 p.path = os.path.normpath(b'%s/%s' % (p.path, subpath))
1286 p.path = os.path.normpath(b'%s/%s' % (p.path, subpath))
1287 source = bytes(p)
1287 source = bytes(p)
1288 other = peer(repo, opts, source)
1288 other = peer(repo, opts, source)
1289 cleanupfn = other.close
1289 cleanupfn = other.close
1290 try:
1290 try:
1291 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source))
1291 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source))
1292 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1292 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1293
1293
1294 if revs:
1294 if revs:
1295 revs = [other.lookup(rev) for rev in revs]
1295 revs = [other.lookup(rev) for rev in revs]
1296 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1296 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1297 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
1297 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
1298 )
1298 )
1299
1299
1300 if not chlist:
1300 if not chlist:
1301 ui.status(_(b"no changes found\n"))
1301 ui.status(_(b"no changes found\n"))
1302 return subreporecurse()
1302 return subreporecurse()
1303 ui.pager(b'incoming')
1303 ui.pager(b'incoming')
1304 displayer = logcmdutil.changesetdisplayer(
1304 displayer = logcmdutil.changesetdisplayer(
1305 ui, other, opts, buffered=buffered
1305 ui, other, opts, buffered=buffered
1306 )
1306 )
1307 displaychlist(other, chlist, displayer)
1307 displaychlist(other, chlist, displayer)
1308 displayer.close()
1308 displayer.close()
1309 finally:
1309 finally:
1310 cleanupfn()
1310 cleanupfn()
1311 subreporecurse()
1311 subreporecurse()
1312 return 0 # exit code is zero since we found incoming changes
1312 return 0 # exit code is zero since we found incoming changes
1313
1313
1314
1314
1315 def incoming(ui, repo, source, opts, subpath=None):
1315 def incoming(ui, repo, source, opts, subpath=None):
1316 def subreporecurse():
1316 def subreporecurse():
1317 ret = 1
1317 ret = 1
1318 if opts.get(b'subrepos'):
1318 if opts.get(b'subrepos'):
1319 ctx = repo[None]
1319 ctx = repo[None]
1320 for subpath in sorted(ctx.substate):
1320 for subpath in sorted(ctx.substate):
1321 sub = ctx.sub(subpath)
1321 sub = ctx.sub(subpath)
1322 ret = min(ret, sub.incoming(ui, source, opts))
1322 ret = min(ret, sub.incoming(ui, source, opts))
1323 return ret
1323 return ret
1324
1324
1325 def display(other, chlist, displayer):
1325 def display(other, chlist, displayer):
1326 limit = logcmdutil.getlimit(opts)
1326 limit = logcmdutil.getlimit(opts)
1327 if opts.get(b'newest_first'):
1327 if opts.get(b'newest_first'):
1328 chlist.reverse()
1328 chlist.reverse()
1329 count = 0
1329 count = 0
1330 for n in chlist:
1330 for n in chlist:
1331 if limit is not None and count >= limit:
1331 if limit is not None and count >= limit:
1332 break
1332 break
1333 parents = [p for p in other.changelog.parents(n) if p != nullid]
1333 parents = [p for p in other.changelog.parents(n) if p != nullid]
1334 if opts.get(b'no_merges') and len(parents) == 2:
1334 if opts.get(b'no_merges') and len(parents) == 2:
1335 continue
1335 continue
1336 count += 1
1336 count += 1
1337 displayer.show(other[n])
1337 displayer.show(other[n])
1338
1338
1339 return _incoming(
1339 return _incoming(
1340 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1340 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1341 )
1341 )
1342
1342
1343
1343
1344 def _outgoing(ui, repo, dests, opts, subpath=None):
1344 def _outgoing(ui, repo, dests, opts, subpath=None):
1345 out = set()
1345 out = set()
1346 others = []
1346 others = []
1347 for path in urlutil.get_push_paths(repo, ui, dests):
1347 for path in urlutil.get_push_paths(repo, ui, dests):
1348 dest = path.pushloc or path.loc
1348 dest = path.pushloc or path.loc
1349 if subpath is not None:
1349 if subpath is not None:
1350 subpath = urlutil.url(subpath)
1350 subpath = urlutil.url(subpath)
1351 if subpath.isabs():
1351 if subpath.isabs():
1352 dest = bytes(subpath)
1352 dest = bytes(subpath)
1353 else:
1353 else:
1354 p = urlutil.url(dest)
1354 p = urlutil.url(dest)
1355 p.path = os.path.normpath(b'%s/%s' % (p.path, subpath))
1355 p.path = os.path.normpath(b'%s/%s' % (p.path, subpath))
1356 dest = bytes(p)
1356 dest = bytes(p)
1357 branches = path.branch, opts.get(b'branch') or []
1357 branches = path.branch, opts.get(b'branch') or []
1358
1358
1359 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1359 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1360 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1360 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1361 if revs:
1361 if revs:
1362 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1362 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1363
1363
1364 other = peer(repo, opts, dest)
1364 other = peer(repo, opts, dest)
1365 try:
1365 try:
1366 outgoing = discovery.findcommonoutgoing(
1366 outgoing = discovery.findcommonoutgoing(
1367 repo, other, revs, force=opts.get(b'force')
1367 repo, other, revs, force=opts.get(b'force')
1368 )
1368 )
1369 o = outgoing.missing
1369 o = outgoing.missing
1370 out.update(o)
1370 out.update(o)
1371 if not o:
1371 if not o:
1372 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1372 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1373 others.append(other)
1373 others.append(other)
1374 except: # re-raises
1374 except: # re-raises
1375 other.close()
1375 other.close()
1376 raise
1376 raise
1377 # make sure this is ordered by revision number
1377 # make sure this is ordered by revision number
1378 outgoing_revs = list(out)
1378 outgoing_revs = list(out)
1379 cl = repo.changelog
1379 cl = repo.changelog
1380 outgoing_revs.sort(key=cl.rev)
1380 outgoing_revs.sort(key=cl.rev)
1381 return outgoing_revs, others
1381 return outgoing_revs, others
1382
1382
1383
1383
1384 def _outgoing_recurse(ui, repo, dests, opts):
1384 def _outgoing_recurse(ui, repo, dests, opts):
1385 ret = 1
1385 ret = 1
1386 if opts.get(b'subrepos'):
1386 if opts.get(b'subrepos'):
1387 ctx = repo[None]
1387 ctx = repo[None]
1388 for subpath in sorted(ctx.substate):
1388 for subpath in sorted(ctx.substate):
1389 sub = ctx.sub(subpath)
1389 sub = ctx.sub(subpath)
1390 ret = min(ret, sub.outgoing(ui, dests, opts))
1390 ret = min(ret, sub.outgoing(ui, dests, opts))
1391 return ret
1391 return ret
1392
1392
1393
1393
1394 def _outgoing_filter(repo, revs, opts):
1394 def _outgoing_filter(repo, revs, opts):
1395 """apply revision filtering/ordering option for outgoing"""
1395 """apply revision filtering/ordering option for outgoing"""
1396 limit = logcmdutil.getlimit(opts)
1396 limit = logcmdutil.getlimit(opts)
1397 no_merges = opts.get(b'no_merges')
1397 no_merges = opts.get(b'no_merges')
1398 if opts.get(b'newest_first'):
1398 if opts.get(b'newest_first'):
1399 revs.reverse()
1399 revs.reverse()
1400 if limit is None and not no_merges:
1400 if limit is None and not no_merges:
1401 for r in revs:
1401 for r in revs:
1402 yield r
1402 yield r
1403 return
1403 return
1404
1404
1405 count = 0
1405 count = 0
1406 cl = repo.changelog
1406 cl = repo.changelog
1407 for n in revs:
1407 for n in revs:
1408 if limit is not None and count >= limit:
1408 if limit is not None and count >= limit:
1409 break
1409 break
1410 parents = [p for p in cl.parents(n) if p != nullid]
1410 parents = [p for p in cl.parents(n) if p != nullid]
1411 if no_merges and len(parents) == 2:
1411 if no_merges and len(parents) == 2:
1412 continue
1412 continue
1413 count += 1
1413 count += 1
1414 yield n
1414 yield n
1415
1415
1416
1416
1417 def outgoing(ui, repo, dests, opts, subpath=None):
1417 def outgoing(ui, repo, dests, opts, subpath=None):
1418 if opts.get(b'graph'):
1418 if opts.get(b'graph'):
1419 logcmdutil.checkunsupportedgraphflags([], opts)
1419 logcmdutil.checkunsupportedgraphflags([], opts)
1420 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1420 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1421 ret = 1
1421 ret = 1
1422 try:
1422 try:
1423 if o:
1423 if o:
1424 ret = 0
1424 ret = 0
1425
1425
1426 if opts.get(b'graph'):
1426 if opts.get(b'graph'):
1427 revdag = logcmdutil.graphrevs(repo, o, opts)
1427 revdag = logcmdutil.graphrevs(repo, o, opts)
1428 ui.pager(b'outgoing')
1428 ui.pager(b'outgoing')
1429 displayer = logcmdutil.changesetdisplayer(
1429 displayer = logcmdutil.changesetdisplayer(
1430 ui, repo, opts, buffered=True
1430 ui, repo, opts, buffered=True
1431 )
1431 )
1432 logcmdutil.displaygraph(
1432 logcmdutil.displaygraph(
1433 ui, repo, revdag, displayer, graphmod.asciiedges
1433 ui, repo, revdag, displayer, graphmod.asciiedges
1434 )
1434 )
1435 else:
1435 else:
1436 ui.pager(b'outgoing')
1436 ui.pager(b'outgoing')
1437 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1437 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1438 for n in _outgoing_filter(repo, o, opts):
1438 for n in _outgoing_filter(repo, o, opts):
1439 displayer.show(repo[n])
1439 displayer.show(repo[n])
1440 displayer.close()
1440 displayer.close()
1441 for oth in others:
1441 for oth in others:
1442 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1442 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1443 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1443 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1444 return ret # exit code is zero since we found outgoing changes
1444 return ret # exit code is zero since we found outgoing changes
1445 finally:
1445 finally:
1446 for oth in others:
1446 for oth in others:
1447 oth.close()
1447 oth.close()
1448
1448
1449
1449
1450 def verify(repo, level=None):
1450 def verify(repo, level=None):
1451 """verify the consistency of a repository"""
1451 """verify the consistency of a repository"""
1452 ret = verifymod.verify(repo, level=level)
1452 ret = verifymod.verify(repo, level=level)
1453
1453
1454 # Broken subrepo references in hidden csets don't seem worth worrying about,
1454 # Broken subrepo references in hidden csets don't seem worth worrying about,
1455 # since they can't be pushed/pulled, and --hidden can be used if they are a
1455 # since they can't be pushed/pulled, and --hidden can be used if they are a
1456 # concern.
1456 # concern.
1457
1457
1458 # pathto() is needed for -R case
1458 # pathto() is needed for -R case
1459 revs = repo.revs(
1459 revs = repo.revs(
1460 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1460 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1461 )
1461 )
1462
1462
1463 if revs:
1463 if revs:
1464 repo.ui.status(_(b'checking subrepo links\n'))
1464 repo.ui.status(_(b'checking subrepo links\n'))
1465 for rev in revs:
1465 for rev in revs:
1466 ctx = repo[rev]
1466 ctx = repo[rev]
1467 try:
1467 try:
1468 for subpath in ctx.substate:
1468 for subpath in ctx.substate:
1469 try:
1469 try:
1470 ret = (
1470 ret = (
1471 ctx.sub(subpath, allowcreate=False).verify() or ret
1471 ctx.sub(subpath, allowcreate=False).verify() or ret
1472 )
1472 )
1473 except error.RepoError as e:
1473 except error.RepoError as e:
1474 repo.ui.warn(b'%d: %s\n' % (rev, e))
1474 repo.ui.warn(b'%d: %s\n' % (rev, e))
1475 except Exception:
1475 except Exception:
1476 repo.ui.warn(
1476 repo.ui.warn(
1477 _(b'.hgsubstate is corrupt in revision %s\n')
1477 _(b'.hgsubstate is corrupt in revision %s\n')
1478 % short(ctx.node())
1478 % short(ctx.node())
1479 )
1479 )
1480
1480
1481 return ret
1481 return ret
1482
1482
1483
1483
1484 def remoteui(src, opts):
1484 def remoteui(src, opts):
1485 """build a remote ui from ui or repo and opts"""
1485 """build a remote ui from ui or repo and opts"""
1486 if util.safehasattr(src, b'baseui'): # looks like a repository
1486 if util.safehasattr(src, b'baseui'): # looks like a repository
1487 dst = src.baseui.copy() # drop repo-specific config
1487 dst = src.baseui.copy() # drop repo-specific config
1488 src = src.ui # copy target options from repo
1488 src = src.ui # copy target options from repo
1489 else: # assume it's a global ui object
1489 else: # assume it's a global ui object
1490 dst = src.copy() # keep all global options
1490 dst = src.copy() # keep all global options
1491
1491
1492 # copy ssh-specific options
1492 # copy ssh-specific options
1493 for o in b'ssh', b'remotecmd':
1493 for o in b'ssh', b'remotecmd':
1494 v = opts.get(o) or src.config(b'ui', o)
1494 v = opts.get(o) or src.config(b'ui', o)
1495 if v:
1495 if v:
1496 dst.setconfig(b"ui", o, v, b'copied')
1496 dst.setconfig(b"ui", o, v, b'copied')
1497
1497
1498 # copy bundle-specific options
1498 # copy bundle-specific options
1499 r = src.config(b'bundle', b'mainreporoot')
1499 r = src.config(b'bundle', b'mainreporoot')
1500 if r:
1500 if r:
1501 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1501 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1502
1502
1503 # copy selected local settings to the remote ui
1503 # copy selected local settings to the remote ui
1504 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1504 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1505 for key, val in src.configitems(sect):
1505 for key, val in src.configitems(sect):
1506 dst.setconfig(sect, key, val, b'copied')
1506 dst.setconfig(sect, key, val, b'copied')
1507 v = src.config(b'web', b'cacerts')
1507 v = src.config(b'web', b'cacerts')
1508 if v:
1508 if v:
1509 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1509 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1510
1510
1511 return dst
1511 return dst
1512
1512
1513
1513
1514 # Files of interest
1514 # Files of interest
1515 # Used to check if the repository has changed looking at mtime and size of
1515 # Used to check if the repository has changed looking at mtime and size of
1516 # these files.
1516 # these files.
1517 foi = [
1517 foi = [
1518 (b'spath', b'00changelog.i'),
1518 (b'spath', b'00changelog.i'),
1519 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1519 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1520 (b'spath', b'obsstore'),
1520 (b'spath', b'obsstore'),
1521 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1521 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1522 ]
1522 ]
1523
1523
1524
1524
1525 class cachedlocalrepo(object):
1525 class cachedlocalrepo(object):
1526 """Holds a localrepository that can be cached and reused."""
1526 """Holds a localrepository that can be cached and reused."""
1527
1527
1528 def __init__(self, repo):
1528 def __init__(self, repo):
1529 """Create a new cached repo from an existing repo.
1529 """Create a new cached repo from an existing repo.
1530
1530
1531 We assume the passed in repo was recently created. If the
1531 We assume the passed in repo was recently created. If the
1532 repo has changed between when it was created and when it was
1532 repo has changed between when it was created and when it was
1533 turned into a cache, it may not refresh properly.
1533 turned into a cache, it may not refresh properly.
1534 """
1534 """
1535 assert isinstance(repo, localrepo.localrepository)
1535 assert isinstance(repo, localrepo.localrepository)
1536 self._repo = repo
1536 self._repo = repo
1537 self._state, self.mtime = self._repostate()
1537 self._state, self.mtime = self._repostate()
1538 self._filtername = repo.filtername
1538 self._filtername = repo.filtername
1539
1539
1540 def fetch(self):
1540 def fetch(self):
1541 """Refresh (if necessary) and return a repository.
1541 """Refresh (if necessary) and return a repository.
1542
1542
1543 If the cached instance is out of date, it will be recreated
1543 If the cached instance is out of date, it will be recreated
1544 automatically and returned.
1544 automatically and returned.
1545
1545
1546 Returns a tuple of the repo and a boolean indicating whether a new
1546 Returns a tuple of the repo and a boolean indicating whether a new
1547 repo instance was created.
1547 repo instance was created.
1548 """
1548 """
1549 # We compare the mtimes and sizes of some well-known files to
1549 # We compare the mtimes and sizes of some well-known files to
1550 # determine if the repo changed. This is not precise, as mtimes
1550 # determine if the repo changed. This is not precise, as mtimes
1551 # are susceptible to clock skew and imprecise filesystems and
1551 # are susceptible to clock skew and imprecise filesystems and
1552 # file content can change while maintaining the same size.
1552 # file content can change while maintaining the same size.
1553
1553
1554 state, mtime = self._repostate()
1554 state, mtime = self._repostate()
1555 if state == self._state:
1555 if state == self._state:
1556 return self._repo, False
1556 return self._repo, False
1557
1557
1558 repo = repository(self._repo.baseui, self._repo.url())
1558 repo = repository(self._repo.baseui, self._repo.url())
1559 if self._filtername:
1559 if self._filtername:
1560 self._repo = repo.filtered(self._filtername)
1560 self._repo = repo.filtered(self._filtername)
1561 else:
1561 else:
1562 self._repo = repo.unfiltered()
1562 self._repo = repo.unfiltered()
1563 self._state = state
1563 self._state = state
1564 self.mtime = mtime
1564 self.mtime = mtime
1565
1565
1566 return self._repo, True
1566 return self._repo, True
1567
1567
1568 def _repostate(self):
1568 def _repostate(self):
1569 state = []
1569 state = []
1570 maxmtime = -1
1570 maxmtime = -1
1571 for attr, fname in foi:
1571 for attr, fname in foi:
1572 prefix = getattr(self._repo, attr)
1572 prefix = getattr(self._repo, attr)
1573 p = os.path.join(prefix, fname)
1573 p = os.path.join(prefix, fname)
1574 try:
1574 try:
1575 st = os.stat(p)
1575 st = os.stat(p)
1576 except OSError:
1576 except OSError:
1577 st = os.stat(prefix)
1577 st = os.stat(prefix)
1578 state.append((st[stat.ST_MTIME], st.st_size))
1578 state.append((st[stat.ST_MTIME], st.st_size))
1579 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1579 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1580
1580
1581 return tuple(state), maxmtime
1581 return tuple(state), maxmtime
1582
1582
1583 def copy(self):
1583 def copy(self):
1584 """Obtain a copy of this class instance.
1584 """Obtain a copy of this class instance.
1585
1585
1586 A new localrepository instance is obtained. The new instance should be
1586 A new localrepository instance is obtained. The new instance should be
1587 completely independent of the original.
1587 completely independent of the original.
1588 """
1588 """
1589 repo = repository(self._repo.baseui, self._repo.origroot)
1589 repo = repository(self._repo.baseui, self._repo.origroot)
1590 if self._filtername:
1590 if self._filtername:
1591 repo = repo.filtered(self._filtername)
1591 repo = repo.filtered(self._filtername)
1592 else:
1592 else:
1593 repo = repo.unfiltered()
1593 repo = repo.unfiltered()
1594 c = cachedlocalrepo(repo)
1594 c = cachedlocalrepo(repo)
1595 c._state = self._state
1595 c._state = self._state
1596 c.mtime = self.mtime
1596 c.mtime = self.mtime
1597 return c
1597 return c
General Comments 0
You need to be logged in to leave comments. Login now