##// END OF EJS Templates
share: use `get_clone_path`...
marmoute -
r47715:394cfc42 default
parent child Browse files
Show More
@@ -1,1597 +1,1596 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import os
12 import os
13 import shutil
13 import shutil
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import (
17 from .node import (
18 hex,
18 hex,
19 nullhex,
19 nullhex,
20 nullid,
20 nullid,
21 short,
21 short,
22 )
22 )
23 from .pycompat import getattr
23 from .pycompat import getattr
24
24
25 from . import (
25 from . import (
26 bookmarks,
26 bookmarks,
27 bundlerepo,
27 bundlerepo,
28 cacheutil,
28 cacheutil,
29 cmdutil,
29 cmdutil,
30 destutil,
30 destutil,
31 discovery,
31 discovery,
32 error,
32 error,
33 exchange,
33 exchange,
34 extensions,
34 extensions,
35 graphmod,
35 graphmod,
36 httppeer,
36 httppeer,
37 localrepo,
37 localrepo,
38 lock,
38 lock,
39 logcmdutil,
39 logcmdutil,
40 logexchange,
40 logexchange,
41 merge as mergemod,
41 merge as mergemod,
42 mergestate as mergestatemod,
42 mergestate as mergestatemod,
43 narrowspec,
43 narrowspec,
44 phases,
44 phases,
45 requirements,
45 requirements,
46 scmutil,
46 scmutil,
47 sshpeer,
47 sshpeer,
48 statichttprepo,
48 statichttprepo,
49 ui as uimod,
49 ui as uimod,
50 unionrepo,
50 unionrepo,
51 url,
51 url,
52 util,
52 util,
53 verify as verifymod,
53 verify as verifymod,
54 vfs as vfsmod,
54 vfs as vfsmod,
55 )
55 )
56 from .utils import (
56 from .utils import (
57 hashutil,
57 hashutil,
58 stringutil,
58 stringutil,
59 urlutil,
59 urlutil,
60 )
60 )
61
61
62
62
63 release = lock.release
63 release = lock.release
64
64
65 # shared features
65 # shared features
66 sharedbookmarks = b'bookmarks'
66 sharedbookmarks = b'bookmarks'
67
67
68
68
69 def _local(path):
69 def _local(path):
70 path = util.expandpath(urlutil.urllocalpath(path))
70 path = util.expandpath(urlutil.urllocalpath(path))
71
71
72 try:
72 try:
73 # we use os.stat() directly here instead of os.path.isfile()
73 # we use os.stat() directly here instead of os.path.isfile()
74 # because the latter started returning `False` on invalid path
74 # because the latter started returning `False` on invalid path
75 # exceptions starting in 3.8 and we care about handling
75 # exceptions starting in 3.8 and we care about handling
76 # invalid paths specially here.
76 # invalid paths specially here.
77 st = os.stat(path)
77 st = os.stat(path)
78 isfile = stat.S_ISREG(st.st_mode)
78 isfile = stat.S_ISREG(st.st_mode)
79 # Python 2 raises TypeError, Python 3 ValueError.
79 # Python 2 raises TypeError, Python 3 ValueError.
80 except (TypeError, ValueError) as e:
80 except (TypeError, ValueError) as e:
81 raise error.Abort(
81 raise error.Abort(
82 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
82 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
83 )
83 )
84 except OSError:
84 except OSError:
85 isfile = False
85 isfile = False
86
86
87 return isfile and bundlerepo or localrepo
87 return isfile and bundlerepo or localrepo
88
88
89
89
90 def addbranchrevs(lrepo, other, branches, revs):
90 def addbranchrevs(lrepo, other, branches, revs):
91 peer = other.peer() # a courtesy to callers using a localrepo for other
91 peer = other.peer() # a courtesy to callers using a localrepo for other
92 hashbranch, branches = branches
92 hashbranch, branches = branches
93 if not hashbranch and not branches:
93 if not hashbranch and not branches:
94 x = revs or None
94 x = revs or None
95 if revs:
95 if revs:
96 y = revs[0]
96 y = revs[0]
97 else:
97 else:
98 y = None
98 y = None
99 return x, y
99 return x, y
100 if revs:
100 if revs:
101 revs = list(revs)
101 revs = list(revs)
102 else:
102 else:
103 revs = []
103 revs = []
104
104
105 if not peer.capable(b'branchmap'):
105 if not peer.capable(b'branchmap'):
106 if branches:
106 if branches:
107 raise error.Abort(_(b"remote branch lookup not supported"))
107 raise error.Abort(_(b"remote branch lookup not supported"))
108 revs.append(hashbranch)
108 revs.append(hashbranch)
109 return revs, revs[0]
109 return revs, revs[0]
110
110
111 with peer.commandexecutor() as e:
111 with peer.commandexecutor() as e:
112 branchmap = e.callcommand(b'branchmap', {}).result()
112 branchmap = e.callcommand(b'branchmap', {}).result()
113
113
114 def primary(branch):
114 def primary(branch):
115 if branch == b'.':
115 if branch == b'.':
116 if not lrepo:
116 if not lrepo:
117 raise error.Abort(_(b"dirstate branch not accessible"))
117 raise error.Abort(_(b"dirstate branch not accessible"))
118 branch = lrepo.dirstate.branch()
118 branch = lrepo.dirstate.branch()
119 if branch in branchmap:
119 if branch in branchmap:
120 revs.extend(hex(r) for r in reversed(branchmap[branch]))
120 revs.extend(hex(r) for r in reversed(branchmap[branch]))
121 return True
121 return True
122 else:
122 else:
123 return False
123 return False
124
124
125 for branch in branches:
125 for branch in branches:
126 if not primary(branch):
126 if not primary(branch):
127 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
127 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
128 if hashbranch:
128 if hashbranch:
129 if not primary(hashbranch):
129 if not primary(hashbranch):
130 revs.append(hashbranch)
130 revs.append(hashbranch)
131 return revs, revs[0]
131 return revs, revs[0]
132
132
133
133
134 def parseurl(path, branches=None):
134 def parseurl(path, branches=None):
135 '''parse url#branch, returning (url, (branch, branches))'''
135 '''parse url#branch, returning (url, (branch, branches))'''
136 msg = b'parseurl(...) moved to mercurial.utils.urlutil'
136 msg = b'parseurl(...) moved to mercurial.utils.urlutil'
137 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
137 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
138 return urlutil.parseurl(path, branches=branches)
138 return urlutil.parseurl(path, branches=branches)
139
139
140
140
141 schemes = {
141 schemes = {
142 b'bundle': bundlerepo,
142 b'bundle': bundlerepo,
143 b'union': unionrepo,
143 b'union': unionrepo,
144 b'file': _local,
144 b'file': _local,
145 b'http': httppeer,
145 b'http': httppeer,
146 b'https': httppeer,
146 b'https': httppeer,
147 b'ssh': sshpeer,
147 b'ssh': sshpeer,
148 b'static-http': statichttprepo,
148 b'static-http': statichttprepo,
149 }
149 }
150
150
151
151
152 def _peerlookup(path):
152 def _peerlookup(path):
153 u = urlutil.url(path)
153 u = urlutil.url(path)
154 scheme = u.scheme or b'file'
154 scheme = u.scheme or b'file'
155 thing = schemes.get(scheme) or schemes[b'file']
155 thing = schemes.get(scheme) or schemes[b'file']
156 try:
156 try:
157 return thing(path)
157 return thing(path)
158 except TypeError:
158 except TypeError:
159 # we can't test callable(thing) because 'thing' can be an unloaded
159 # we can't test callable(thing) because 'thing' can be an unloaded
160 # module that implements __call__
160 # module that implements __call__
161 if not util.safehasattr(thing, b'instance'):
161 if not util.safehasattr(thing, b'instance'):
162 raise
162 raise
163 return thing
163 return thing
164
164
165
165
166 def islocal(repo):
166 def islocal(repo):
167 '''return true if repo (or path pointing to repo) is local'''
167 '''return true if repo (or path pointing to repo) is local'''
168 if isinstance(repo, bytes):
168 if isinstance(repo, bytes):
169 try:
169 try:
170 return _peerlookup(repo).islocal(repo)
170 return _peerlookup(repo).islocal(repo)
171 except AttributeError:
171 except AttributeError:
172 return False
172 return False
173 return repo.local()
173 return repo.local()
174
174
175
175
176 def openpath(ui, path, sendaccept=True):
176 def openpath(ui, path, sendaccept=True):
177 '''open path with open if local, url.open if remote'''
177 '''open path with open if local, url.open if remote'''
178 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
178 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
179 if pathurl.islocal():
179 if pathurl.islocal():
180 return util.posixfile(pathurl.localpath(), b'rb')
180 return util.posixfile(pathurl.localpath(), b'rb')
181 else:
181 else:
182 return url.open(ui, path, sendaccept=sendaccept)
182 return url.open(ui, path, sendaccept=sendaccept)
183
183
184
184
185 # a list of (ui, repo) functions called for wire peer initialization
185 # a list of (ui, repo) functions called for wire peer initialization
186 wirepeersetupfuncs = []
186 wirepeersetupfuncs = []
187
187
188
188
189 def _peerorrepo(
189 def _peerorrepo(
190 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
190 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
191 ):
191 ):
192 """return a repository object for the specified path"""
192 """return a repository object for the specified path"""
193 obj = _peerlookup(path).instance(
193 obj = _peerlookup(path).instance(
194 ui, path, create, intents=intents, createopts=createopts
194 ui, path, create, intents=intents, createopts=createopts
195 )
195 )
196 ui = getattr(obj, "ui", ui)
196 ui = getattr(obj, "ui", ui)
197 for f in presetupfuncs or []:
197 for f in presetupfuncs or []:
198 f(ui, obj)
198 f(ui, obj)
199 ui.log(b'extension', b'- executing reposetup hooks\n')
199 ui.log(b'extension', b'- executing reposetup hooks\n')
200 with util.timedcm('all reposetup') as allreposetupstats:
200 with util.timedcm('all reposetup') as allreposetupstats:
201 for name, module in extensions.extensions(ui):
201 for name, module in extensions.extensions(ui):
202 ui.log(b'extension', b' - running reposetup for %s\n', name)
202 ui.log(b'extension', b' - running reposetup for %s\n', name)
203 hook = getattr(module, 'reposetup', None)
203 hook = getattr(module, 'reposetup', None)
204 if hook:
204 if hook:
205 with util.timedcm('reposetup %r', name) as stats:
205 with util.timedcm('reposetup %r', name) as stats:
206 hook(ui, obj)
206 hook(ui, obj)
207 ui.log(
207 ui.log(
208 b'extension', b' > reposetup for %s took %s\n', name, stats
208 b'extension', b' > reposetup for %s took %s\n', name, stats
209 )
209 )
210 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
210 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
211 if not obj.local():
211 if not obj.local():
212 for f in wirepeersetupfuncs:
212 for f in wirepeersetupfuncs:
213 f(ui, obj)
213 f(ui, obj)
214 return obj
214 return obj
215
215
216
216
217 def repository(
217 def repository(
218 ui,
218 ui,
219 path=b'',
219 path=b'',
220 create=False,
220 create=False,
221 presetupfuncs=None,
221 presetupfuncs=None,
222 intents=None,
222 intents=None,
223 createopts=None,
223 createopts=None,
224 ):
224 ):
225 """return a repository object for the specified path"""
225 """return a repository object for the specified path"""
226 peer = _peerorrepo(
226 peer = _peerorrepo(
227 ui,
227 ui,
228 path,
228 path,
229 create,
229 create,
230 presetupfuncs=presetupfuncs,
230 presetupfuncs=presetupfuncs,
231 intents=intents,
231 intents=intents,
232 createopts=createopts,
232 createopts=createopts,
233 )
233 )
234 repo = peer.local()
234 repo = peer.local()
235 if not repo:
235 if not repo:
236 raise error.Abort(
236 raise error.Abort(
237 _(b"repository '%s' is not local") % (path or peer.url())
237 _(b"repository '%s' is not local") % (path or peer.url())
238 )
238 )
239 return repo.filtered(b'visible')
239 return repo.filtered(b'visible')
240
240
241
241
242 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
242 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
243 '''return a repository peer for the specified path'''
243 '''return a repository peer for the specified path'''
244 rui = remoteui(uiorrepo, opts)
244 rui = remoteui(uiorrepo, opts)
245 return _peerorrepo(
245 return _peerorrepo(
246 rui, path, create, intents=intents, createopts=createopts
246 rui, path, create, intents=intents, createopts=createopts
247 ).peer()
247 ).peer()
248
248
249
249
250 def defaultdest(source):
250 def defaultdest(source):
251 """return default destination of clone if none is given
251 """return default destination of clone if none is given
252
252
253 >>> defaultdest(b'foo')
253 >>> defaultdest(b'foo')
254 'foo'
254 'foo'
255 >>> defaultdest(b'/foo/bar')
255 >>> defaultdest(b'/foo/bar')
256 'bar'
256 'bar'
257 >>> defaultdest(b'/')
257 >>> defaultdest(b'/')
258 ''
258 ''
259 >>> defaultdest(b'')
259 >>> defaultdest(b'')
260 ''
260 ''
261 >>> defaultdest(b'http://example.org/')
261 >>> defaultdest(b'http://example.org/')
262 ''
262 ''
263 >>> defaultdest(b'http://example.org/foo/')
263 >>> defaultdest(b'http://example.org/foo/')
264 'foo'
264 'foo'
265 """
265 """
266 path = urlutil.url(source).path
266 path = urlutil.url(source).path
267 if not path:
267 if not path:
268 return b''
268 return b''
269 return os.path.basename(os.path.normpath(path))
269 return os.path.basename(os.path.normpath(path))
270
270
271
271
272 def sharedreposource(repo):
272 def sharedreposource(repo):
273 """Returns repository object for source repository of a shared repo.
273 """Returns repository object for source repository of a shared repo.
274
274
275 If repo is not a shared repository, returns None.
275 If repo is not a shared repository, returns None.
276 """
276 """
277 if repo.sharedpath == repo.path:
277 if repo.sharedpath == repo.path:
278 return None
278 return None
279
279
280 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
280 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
281 return repo.srcrepo
281 return repo.srcrepo
282
282
283 # the sharedpath always ends in the .hg; we want the path to the repo
283 # the sharedpath always ends in the .hg; we want the path to the repo
284 source = repo.vfs.split(repo.sharedpath)[0]
284 source = repo.vfs.split(repo.sharedpath)[0]
285 srcurl, branches = urlutil.parseurl(source)
285 srcurl, branches = urlutil.parseurl(source)
286 srcrepo = repository(repo.ui, srcurl)
286 srcrepo = repository(repo.ui, srcurl)
287 repo.srcrepo = srcrepo
287 repo.srcrepo = srcrepo
288 return srcrepo
288 return srcrepo
289
289
290
290
291 def share(
291 def share(
292 ui,
292 ui,
293 source,
293 source,
294 dest=None,
294 dest=None,
295 update=True,
295 update=True,
296 bookmarks=True,
296 bookmarks=True,
297 defaultpath=None,
297 defaultpath=None,
298 relative=False,
298 relative=False,
299 ):
299 ):
300 '''create a shared repository'''
300 '''create a shared repository'''
301
301
302 if not islocal(source):
302 if not islocal(source):
303 raise error.Abort(_(b'can only share local repositories'))
303 raise error.Abort(_(b'can only share local repositories'))
304
304
305 if not dest:
305 if not dest:
306 dest = defaultdest(source)
306 dest = defaultdest(source)
307 else:
307 else:
308 dest = ui.expandpath(dest)
308 dest = urlutil.get_clone_path(ui, dest)[1]
309
309
310 if isinstance(source, bytes):
310 if isinstance(source, bytes):
311 origsource = ui.expandpath(source)
311 origsource, source, branches = urlutil.get_clone_path(ui, source)
312 source, branches = urlutil.parseurl(origsource)
313 srcrepo = repository(ui, source)
312 srcrepo = repository(ui, source)
314 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
313 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
315 else:
314 else:
316 srcrepo = source.local()
315 srcrepo = source.local()
317 checkout = None
316 checkout = None
318
317
319 shareditems = set()
318 shareditems = set()
320 if bookmarks:
319 if bookmarks:
321 shareditems.add(sharedbookmarks)
320 shareditems.add(sharedbookmarks)
322
321
323 r = repository(
322 r = repository(
324 ui,
323 ui,
325 dest,
324 dest,
326 create=True,
325 create=True,
327 createopts={
326 createopts={
328 b'sharedrepo': srcrepo,
327 b'sharedrepo': srcrepo,
329 b'sharedrelative': relative,
328 b'sharedrelative': relative,
330 b'shareditems': shareditems,
329 b'shareditems': shareditems,
331 },
330 },
332 )
331 )
333
332
334 postshare(srcrepo, r, defaultpath=defaultpath)
333 postshare(srcrepo, r, defaultpath=defaultpath)
335 r = repository(ui, dest)
334 r = repository(ui, dest)
336 _postshareupdate(r, update, checkout=checkout)
335 _postshareupdate(r, update, checkout=checkout)
337 return r
336 return r
338
337
339
338
340 def _prependsourcehgrc(repo):
339 def _prependsourcehgrc(repo):
341 """copies the source repo config and prepend it in current repo .hg/hgrc
340 """copies the source repo config and prepend it in current repo .hg/hgrc
342 on unshare. This is only done if the share was perfomed using share safe
341 on unshare. This is only done if the share was perfomed using share safe
343 method where we share config of source in shares"""
342 method where we share config of source in shares"""
344 srcvfs = vfsmod.vfs(repo.sharedpath)
343 srcvfs = vfsmod.vfs(repo.sharedpath)
345 dstvfs = vfsmod.vfs(repo.path)
344 dstvfs = vfsmod.vfs(repo.path)
346
345
347 if not srcvfs.exists(b'hgrc'):
346 if not srcvfs.exists(b'hgrc'):
348 return
347 return
349
348
350 currentconfig = b''
349 currentconfig = b''
351 if dstvfs.exists(b'hgrc'):
350 if dstvfs.exists(b'hgrc'):
352 currentconfig = dstvfs.read(b'hgrc')
351 currentconfig = dstvfs.read(b'hgrc')
353
352
354 with dstvfs(b'hgrc', b'wb') as fp:
353 with dstvfs(b'hgrc', b'wb') as fp:
355 sourceconfig = srcvfs.read(b'hgrc')
354 sourceconfig = srcvfs.read(b'hgrc')
356 fp.write(b"# Config copied from shared source\n")
355 fp.write(b"# Config copied from shared source\n")
357 fp.write(sourceconfig)
356 fp.write(sourceconfig)
358 fp.write(b'\n')
357 fp.write(b'\n')
359 fp.write(currentconfig)
358 fp.write(currentconfig)
360
359
361
360
362 def unshare(ui, repo):
361 def unshare(ui, repo):
363 """convert a shared repository to a normal one
362 """convert a shared repository to a normal one
364
363
365 Copy the store data to the repo and remove the sharedpath data.
364 Copy the store data to the repo and remove the sharedpath data.
366
365
367 Returns a new repository object representing the unshared repository.
366 Returns a new repository object representing the unshared repository.
368
367
369 The passed repository object is not usable after this function is
368 The passed repository object is not usable after this function is
370 called.
369 called.
371 """
370 """
372
371
373 with repo.lock():
372 with repo.lock():
374 # we use locks here because if we race with commit, we
373 # we use locks here because if we race with commit, we
375 # can end up with extra data in the cloned revlogs that's
374 # can end up with extra data in the cloned revlogs that's
376 # not pointed to by changesets, thus causing verify to
375 # not pointed to by changesets, thus causing verify to
377 # fail
376 # fail
378 destlock = copystore(ui, repo, repo.path)
377 destlock = copystore(ui, repo, repo.path)
379 with destlock or util.nullcontextmanager():
378 with destlock or util.nullcontextmanager():
380 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
379 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
381 # we were sharing .hg/hgrc of the share source with the current
380 # we were sharing .hg/hgrc of the share source with the current
382 # repo. We need to copy that while unsharing otherwise it can
381 # repo. We need to copy that while unsharing otherwise it can
383 # disable hooks and other checks
382 # disable hooks and other checks
384 _prependsourcehgrc(repo)
383 _prependsourcehgrc(repo)
385
384
386 sharefile = repo.vfs.join(b'sharedpath')
385 sharefile = repo.vfs.join(b'sharedpath')
387 util.rename(sharefile, sharefile + b'.old')
386 util.rename(sharefile, sharefile + b'.old')
388
387
389 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
388 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
390 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
389 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
391 scmutil.writereporequirements(repo)
390 scmutil.writereporequirements(repo)
392
391
393 # Removing share changes some fundamental properties of the repo instance.
392 # Removing share changes some fundamental properties of the repo instance.
394 # So we instantiate a new repo object and operate on it rather than
393 # So we instantiate a new repo object and operate on it rather than
395 # try to keep the existing repo usable.
394 # try to keep the existing repo usable.
396 newrepo = repository(repo.baseui, repo.root, create=False)
395 newrepo = repository(repo.baseui, repo.root, create=False)
397
396
398 # TODO: figure out how to access subrepos that exist, but were previously
397 # TODO: figure out how to access subrepos that exist, but were previously
399 # removed from .hgsub
398 # removed from .hgsub
400 c = newrepo[b'.']
399 c = newrepo[b'.']
401 subs = c.substate
400 subs = c.substate
402 for s in sorted(subs):
401 for s in sorted(subs):
403 c.sub(s).unshare()
402 c.sub(s).unshare()
404
403
405 localrepo.poisonrepository(repo)
404 localrepo.poisonrepository(repo)
406
405
407 return newrepo
406 return newrepo
408
407
409
408
410 def postshare(sourcerepo, destrepo, defaultpath=None):
409 def postshare(sourcerepo, destrepo, defaultpath=None):
411 """Called after a new shared repo is created.
410 """Called after a new shared repo is created.
412
411
413 The new repo only has a requirements file and pointer to the source.
412 The new repo only has a requirements file and pointer to the source.
414 This function configures additional shared data.
413 This function configures additional shared data.
415
414
416 Extensions can wrap this function and write additional entries to
415 Extensions can wrap this function and write additional entries to
417 destrepo/.hg/shared to indicate additional pieces of data to be shared.
416 destrepo/.hg/shared to indicate additional pieces of data to be shared.
418 """
417 """
419 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
418 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
420 if default:
419 if default:
421 template = b'[paths]\ndefault = %s\n'
420 template = b'[paths]\ndefault = %s\n'
422 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
421 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
423 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
422 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
424 with destrepo.wlock():
423 with destrepo.wlock():
425 narrowspec.copytoworkingcopy(destrepo)
424 narrowspec.copytoworkingcopy(destrepo)
426
425
427
426
428 def _postshareupdate(repo, update, checkout=None):
427 def _postshareupdate(repo, update, checkout=None):
429 """Maybe perform a working directory update after a shared repo is created.
428 """Maybe perform a working directory update after a shared repo is created.
430
429
431 ``update`` can be a boolean or a revision to update to.
430 ``update`` can be a boolean or a revision to update to.
432 """
431 """
433 if not update:
432 if not update:
434 return
433 return
435
434
436 repo.ui.status(_(b"updating working directory\n"))
435 repo.ui.status(_(b"updating working directory\n"))
437 if update is not True:
436 if update is not True:
438 checkout = update
437 checkout = update
439 for test in (checkout, b'default', b'tip'):
438 for test in (checkout, b'default', b'tip'):
440 if test is None:
439 if test is None:
441 continue
440 continue
442 try:
441 try:
443 uprev = repo.lookup(test)
442 uprev = repo.lookup(test)
444 break
443 break
445 except error.RepoLookupError:
444 except error.RepoLookupError:
446 continue
445 continue
447 _update(repo, uprev)
446 _update(repo, uprev)
448
447
449
448
450 def copystore(ui, srcrepo, destpath):
449 def copystore(ui, srcrepo, destpath):
451 """copy files from store of srcrepo in destpath
450 """copy files from store of srcrepo in destpath
452
451
453 returns destlock
452 returns destlock
454 """
453 """
455 destlock = None
454 destlock = None
456 try:
455 try:
457 hardlink = None
456 hardlink = None
458 topic = _(b'linking') if hardlink else _(b'copying')
457 topic = _(b'linking') if hardlink else _(b'copying')
459 with ui.makeprogress(topic, unit=_(b'files')) as progress:
458 with ui.makeprogress(topic, unit=_(b'files')) as progress:
460 num = 0
459 num = 0
461 srcpublishing = srcrepo.publishing()
460 srcpublishing = srcrepo.publishing()
462 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
461 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
463 dstvfs = vfsmod.vfs(destpath)
462 dstvfs = vfsmod.vfs(destpath)
464 for f in srcrepo.store.copylist():
463 for f in srcrepo.store.copylist():
465 if srcpublishing and f.endswith(b'phaseroots'):
464 if srcpublishing and f.endswith(b'phaseroots'):
466 continue
465 continue
467 dstbase = os.path.dirname(f)
466 dstbase = os.path.dirname(f)
468 if dstbase and not dstvfs.exists(dstbase):
467 if dstbase and not dstvfs.exists(dstbase):
469 dstvfs.mkdir(dstbase)
468 dstvfs.mkdir(dstbase)
470 if srcvfs.exists(f):
469 if srcvfs.exists(f):
471 if f.endswith(b'data'):
470 if f.endswith(b'data'):
472 # 'dstbase' may be empty (e.g. revlog format 0)
471 # 'dstbase' may be empty (e.g. revlog format 0)
473 lockfile = os.path.join(dstbase, b"lock")
472 lockfile = os.path.join(dstbase, b"lock")
474 # lock to avoid premature writing to the target
473 # lock to avoid premature writing to the target
475 destlock = lock.lock(dstvfs, lockfile)
474 destlock = lock.lock(dstvfs, lockfile)
476 hardlink, n = util.copyfiles(
475 hardlink, n = util.copyfiles(
477 srcvfs.join(f), dstvfs.join(f), hardlink, progress
476 srcvfs.join(f), dstvfs.join(f), hardlink, progress
478 )
477 )
479 num += n
478 num += n
480 if hardlink:
479 if hardlink:
481 ui.debug(b"linked %d files\n" % num)
480 ui.debug(b"linked %d files\n" % num)
482 else:
481 else:
483 ui.debug(b"copied %d files\n" % num)
482 ui.debug(b"copied %d files\n" % num)
484 return destlock
483 return destlock
485 except: # re-raises
484 except: # re-raises
486 release(destlock)
485 release(destlock)
487 raise
486 raise
488
487
489
488
490 def clonewithshare(
489 def clonewithshare(
491 ui,
490 ui,
492 peeropts,
491 peeropts,
493 sharepath,
492 sharepath,
494 source,
493 source,
495 srcpeer,
494 srcpeer,
496 dest,
495 dest,
497 pull=False,
496 pull=False,
498 rev=None,
497 rev=None,
499 update=True,
498 update=True,
500 stream=False,
499 stream=False,
501 ):
500 ):
502 """Perform a clone using a shared repo.
501 """Perform a clone using a shared repo.
503
502
504 The store for the repository will be located at <sharepath>/.hg. The
503 The store for the repository will be located at <sharepath>/.hg. The
505 specified revisions will be cloned or pulled from "source". A shared repo
504 specified revisions will be cloned or pulled from "source". A shared repo
506 will be created at "dest" and a working copy will be created if "update" is
505 will be created at "dest" and a working copy will be created if "update" is
507 True.
506 True.
508 """
507 """
509 revs = None
508 revs = None
510 if rev:
509 if rev:
511 if not srcpeer.capable(b'lookup'):
510 if not srcpeer.capable(b'lookup'):
512 raise error.Abort(
511 raise error.Abort(
513 _(
512 _(
514 b"src repository does not support "
513 b"src repository does not support "
515 b"revision lookup and so doesn't "
514 b"revision lookup and so doesn't "
516 b"support clone by revision"
515 b"support clone by revision"
517 )
516 )
518 )
517 )
519
518
520 # TODO this is batchable.
519 # TODO this is batchable.
521 remoterevs = []
520 remoterevs = []
522 for r in rev:
521 for r in rev:
523 with srcpeer.commandexecutor() as e:
522 with srcpeer.commandexecutor() as e:
524 remoterevs.append(
523 remoterevs.append(
525 e.callcommand(
524 e.callcommand(
526 b'lookup',
525 b'lookup',
527 {
526 {
528 b'key': r,
527 b'key': r,
529 },
528 },
530 ).result()
529 ).result()
531 )
530 )
532 revs = remoterevs
531 revs = remoterevs
533
532
534 # Obtain a lock before checking for or cloning the pooled repo otherwise
533 # Obtain a lock before checking for or cloning the pooled repo otherwise
535 # 2 clients may race creating or populating it.
534 # 2 clients may race creating or populating it.
536 pooldir = os.path.dirname(sharepath)
535 pooldir = os.path.dirname(sharepath)
537 # lock class requires the directory to exist.
536 # lock class requires the directory to exist.
538 try:
537 try:
539 util.makedir(pooldir, False)
538 util.makedir(pooldir, False)
540 except OSError as e:
539 except OSError as e:
541 if e.errno != errno.EEXIST:
540 if e.errno != errno.EEXIST:
542 raise
541 raise
543
542
544 poolvfs = vfsmod.vfs(pooldir)
543 poolvfs = vfsmod.vfs(pooldir)
545 basename = os.path.basename(sharepath)
544 basename = os.path.basename(sharepath)
546
545
547 with lock.lock(poolvfs, b'%s.lock' % basename):
546 with lock.lock(poolvfs, b'%s.lock' % basename):
548 if os.path.exists(sharepath):
547 if os.path.exists(sharepath):
549 ui.status(
548 ui.status(
550 _(b'(sharing from existing pooled repository %s)\n') % basename
549 _(b'(sharing from existing pooled repository %s)\n') % basename
551 )
550 )
552 else:
551 else:
553 ui.status(
552 ui.status(
554 _(b'(sharing from new pooled repository %s)\n') % basename
553 _(b'(sharing from new pooled repository %s)\n') % basename
555 )
554 )
556 # Always use pull mode because hardlinks in share mode don't work
555 # Always use pull mode because hardlinks in share mode don't work
557 # well. Never update because working copies aren't necessary in
556 # well. Never update because working copies aren't necessary in
558 # share mode.
557 # share mode.
559 clone(
558 clone(
560 ui,
559 ui,
561 peeropts,
560 peeropts,
562 source,
561 source,
563 dest=sharepath,
562 dest=sharepath,
564 pull=True,
563 pull=True,
565 revs=rev,
564 revs=rev,
566 update=False,
565 update=False,
567 stream=stream,
566 stream=stream,
568 )
567 )
569
568
570 # Resolve the value to put in [paths] section for the source.
569 # Resolve the value to put in [paths] section for the source.
571 if islocal(source):
570 if islocal(source):
572 defaultpath = os.path.abspath(urlutil.urllocalpath(source))
571 defaultpath = os.path.abspath(urlutil.urllocalpath(source))
573 else:
572 else:
574 defaultpath = source
573 defaultpath = source
575
574
576 sharerepo = repository(ui, path=sharepath)
575 sharerepo = repository(ui, path=sharepath)
577 destrepo = share(
576 destrepo = share(
578 ui,
577 ui,
579 sharerepo,
578 sharerepo,
580 dest=dest,
579 dest=dest,
581 update=False,
580 update=False,
582 bookmarks=False,
581 bookmarks=False,
583 defaultpath=defaultpath,
582 defaultpath=defaultpath,
584 )
583 )
585
584
586 # We need to perform a pull against the dest repo to fetch bookmarks
585 # We need to perform a pull against the dest repo to fetch bookmarks
587 # and other non-store data that isn't shared by default. In the case of
586 # and other non-store data that isn't shared by default. In the case of
588 # non-existing shared repo, this means we pull from the remote twice. This
587 # non-existing shared repo, this means we pull from the remote twice. This
589 # is a bit weird. But at the time it was implemented, there wasn't an easy
588 # is a bit weird. But at the time it was implemented, there wasn't an easy
590 # way to pull just non-changegroup data.
589 # way to pull just non-changegroup data.
591 exchange.pull(destrepo, srcpeer, heads=revs)
590 exchange.pull(destrepo, srcpeer, heads=revs)
592
591
593 _postshareupdate(destrepo, update)
592 _postshareupdate(destrepo, update)
594
593
595 return srcpeer, peer(ui, peeropts, dest)
594 return srcpeer, peer(ui, peeropts, dest)
596
595
597
596
598 # Recomputing caches is often slow on big repos, so copy them.
597 # Recomputing caches is often slow on big repos, so copy them.
599 def _copycache(srcrepo, dstcachedir, fname):
598 def _copycache(srcrepo, dstcachedir, fname):
600 """copy a cache from srcrepo to destcachedir (if it exists)"""
599 """copy a cache from srcrepo to destcachedir (if it exists)"""
601 srcfname = srcrepo.cachevfs.join(fname)
600 srcfname = srcrepo.cachevfs.join(fname)
602 dstfname = os.path.join(dstcachedir, fname)
601 dstfname = os.path.join(dstcachedir, fname)
603 if os.path.exists(srcfname):
602 if os.path.exists(srcfname):
604 if not os.path.exists(dstcachedir):
603 if not os.path.exists(dstcachedir):
605 os.mkdir(dstcachedir)
604 os.mkdir(dstcachedir)
606 util.copyfile(srcfname, dstfname)
605 util.copyfile(srcfname, dstfname)
607
606
608
607
609 def clone(
608 def clone(
610 ui,
609 ui,
611 peeropts,
610 peeropts,
612 source,
611 source,
613 dest=None,
612 dest=None,
614 pull=False,
613 pull=False,
615 revs=None,
614 revs=None,
616 update=True,
615 update=True,
617 stream=False,
616 stream=False,
618 branch=None,
617 branch=None,
619 shareopts=None,
618 shareopts=None,
620 storeincludepats=None,
619 storeincludepats=None,
621 storeexcludepats=None,
620 storeexcludepats=None,
622 depth=None,
621 depth=None,
623 ):
622 ):
624 """Make a copy of an existing repository.
623 """Make a copy of an existing repository.
625
624
626 Create a copy of an existing repository in a new directory. The
625 Create a copy of an existing repository in a new directory. The
627 source and destination are URLs, as passed to the repository
626 source and destination are URLs, as passed to the repository
628 function. Returns a pair of repository peers, the source and
627 function. Returns a pair of repository peers, the source and
629 newly created destination.
628 newly created destination.
630
629
631 The location of the source is added to the new repository's
630 The location of the source is added to the new repository's
632 .hg/hgrc file, as the default to be used for future pulls and
631 .hg/hgrc file, as the default to be used for future pulls and
633 pushes.
632 pushes.
634
633
635 If an exception is raised, the partly cloned/updated destination
634 If an exception is raised, the partly cloned/updated destination
636 repository will be deleted.
635 repository will be deleted.
637
636
638 Arguments:
637 Arguments:
639
638
640 source: repository object or URL
639 source: repository object or URL
641
640
642 dest: URL of destination repository to create (defaults to base
641 dest: URL of destination repository to create (defaults to base
643 name of source repository)
642 name of source repository)
644
643
645 pull: always pull from source repository, even in local case or if the
644 pull: always pull from source repository, even in local case or if the
646 server prefers streaming
645 server prefers streaming
647
646
648 stream: stream raw data uncompressed from repository (fast over
647 stream: stream raw data uncompressed from repository (fast over
649 LAN, slow over WAN)
648 LAN, slow over WAN)
650
649
651 revs: revision to clone up to (implies pull=True)
650 revs: revision to clone up to (implies pull=True)
652
651
653 update: update working directory after clone completes, if
652 update: update working directory after clone completes, if
654 destination is local repository (True means update to default rev,
653 destination is local repository (True means update to default rev,
655 anything else is treated as a revision)
654 anything else is treated as a revision)
656
655
657 branch: branches to clone
656 branch: branches to clone
658
657
659 shareopts: dict of options to control auto sharing behavior. The "pool" key
658 shareopts: dict of options to control auto sharing behavior. The "pool" key
660 activates auto sharing mode and defines the directory for stores. The
659 activates auto sharing mode and defines the directory for stores. The
661 "mode" key determines how to construct the directory name of the shared
660 "mode" key determines how to construct the directory name of the shared
662 repository. "identity" means the name is derived from the node of the first
661 repository. "identity" means the name is derived from the node of the first
663 changeset in the repository. "remote" means the name is derived from the
662 changeset in the repository. "remote" means the name is derived from the
664 remote's path/URL. Defaults to "identity."
663 remote's path/URL. Defaults to "identity."
665
664
666 storeincludepats and storeexcludepats: sets of file patterns to include and
665 storeincludepats and storeexcludepats: sets of file patterns to include and
667 exclude in the repository copy, respectively. If not defined, all files
666 exclude in the repository copy, respectively. If not defined, all files
668 will be included (a "full" clone). Otherwise a "narrow" clone containing
667 will be included (a "full" clone). Otherwise a "narrow" clone containing
669 only the requested files will be performed. If ``storeincludepats`` is not
668 only the requested files will be performed. If ``storeincludepats`` is not
670 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
669 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
671 ``path:.``. If both are empty sets, no files will be cloned.
670 ``path:.``. If both are empty sets, no files will be cloned.
672 """
671 """
673
672
674 if isinstance(source, bytes):
673 if isinstance(source, bytes):
675 src = urlutil.get_clone_path(ui, source, branch)
674 src = urlutil.get_clone_path(ui, source, branch)
676 origsource, source, branches = src
675 origsource, source, branches = src
677 srcpeer = peer(ui, peeropts, source)
676 srcpeer = peer(ui, peeropts, source)
678 else:
677 else:
679 srcpeer = source.peer() # in case we were called with a localrepo
678 srcpeer = source.peer() # in case we were called with a localrepo
680 branches = (None, branch or [])
679 branches = (None, branch or [])
681 origsource = source = srcpeer.url()
680 origsource = source = srcpeer.url()
682 srclock = destlock = cleandir = None
681 srclock = destlock = cleandir = None
683 destpeer = None
682 destpeer = None
684 try:
683 try:
685 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
684 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
686
685
687 if dest is None:
686 if dest is None:
688 dest = defaultdest(source)
687 dest = defaultdest(source)
689 if dest:
688 if dest:
690 ui.status(_(b"destination directory: %s\n") % dest)
689 ui.status(_(b"destination directory: %s\n") % dest)
691 else:
690 else:
692 dest = urlutil.get_clone_path(ui, dest)[0]
691 dest = urlutil.get_clone_path(ui, dest)[0]
693
692
694 dest = urlutil.urllocalpath(dest)
693 dest = urlutil.urllocalpath(dest)
695 source = urlutil.urllocalpath(source)
694 source = urlutil.urllocalpath(source)
696
695
697 if not dest:
696 if not dest:
698 raise error.InputError(_(b"empty destination path is not valid"))
697 raise error.InputError(_(b"empty destination path is not valid"))
699
698
700 destvfs = vfsmod.vfs(dest, expandpath=True)
699 destvfs = vfsmod.vfs(dest, expandpath=True)
701 if destvfs.lexists():
700 if destvfs.lexists():
702 if not destvfs.isdir():
701 if not destvfs.isdir():
703 raise error.InputError(
702 raise error.InputError(
704 _(b"destination '%s' already exists") % dest
703 _(b"destination '%s' already exists") % dest
705 )
704 )
706 elif destvfs.listdir():
705 elif destvfs.listdir():
707 raise error.InputError(
706 raise error.InputError(
708 _(b"destination '%s' is not empty") % dest
707 _(b"destination '%s' is not empty") % dest
709 )
708 )
710
709
711 createopts = {}
710 createopts = {}
712 narrow = False
711 narrow = False
713
712
714 if storeincludepats is not None:
713 if storeincludepats is not None:
715 narrowspec.validatepatterns(storeincludepats)
714 narrowspec.validatepatterns(storeincludepats)
716 narrow = True
715 narrow = True
717
716
718 if storeexcludepats is not None:
717 if storeexcludepats is not None:
719 narrowspec.validatepatterns(storeexcludepats)
718 narrowspec.validatepatterns(storeexcludepats)
720 narrow = True
719 narrow = True
721
720
722 if narrow:
721 if narrow:
723 # Include everything by default if only exclusion patterns defined.
722 # Include everything by default if only exclusion patterns defined.
724 if storeexcludepats and not storeincludepats:
723 if storeexcludepats and not storeincludepats:
725 storeincludepats = {b'path:.'}
724 storeincludepats = {b'path:.'}
726
725
727 createopts[b'narrowfiles'] = True
726 createopts[b'narrowfiles'] = True
728
727
729 if depth:
728 if depth:
730 createopts[b'shallowfilestore'] = True
729 createopts[b'shallowfilestore'] = True
731
730
732 if srcpeer.capable(b'lfs-serve'):
731 if srcpeer.capable(b'lfs-serve'):
733 # Repository creation honors the config if it disabled the extension, so
732 # Repository creation honors the config if it disabled the extension, so
734 # we can't just announce that lfs will be enabled. This check avoids
733 # we can't just announce that lfs will be enabled. This check avoids
735 # saying that lfs will be enabled, and then saying it's an unknown
734 # saying that lfs will be enabled, and then saying it's an unknown
736 # feature. The lfs creation option is set in either case so that a
735 # feature. The lfs creation option is set in either case so that a
737 # requirement is added. If the extension is explicitly disabled but the
736 # requirement is added. If the extension is explicitly disabled but the
738 # requirement is set, the clone aborts early, before transferring any
737 # requirement is set, the clone aborts early, before transferring any
739 # data.
738 # data.
740 createopts[b'lfs'] = True
739 createopts[b'lfs'] = True
741
740
742 if extensions.disabled_help(b'lfs'):
741 if extensions.disabled_help(b'lfs'):
743 ui.status(
742 ui.status(
744 _(
743 _(
745 b'(remote is using large file support (lfs), but it is '
744 b'(remote is using large file support (lfs), but it is '
746 b'explicitly disabled in the local configuration)\n'
745 b'explicitly disabled in the local configuration)\n'
747 )
746 )
748 )
747 )
749 else:
748 else:
750 ui.status(
749 ui.status(
751 _(
750 _(
752 b'(remote is using large file support (lfs); lfs will '
751 b'(remote is using large file support (lfs); lfs will '
753 b'be enabled for this repository)\n'
752 b'be enabled for this repository)\n'
754 )
753 )
755 )
754 )
756
755
757 shareopts = shareopts or {}
756 shareopts = shareopts or {}
758 sharepool = shareopts.get(b'pool')
757 sharepool = shareopts.get(b'pool')
759 sharenamemode = shareopts.get(b'mode')
758 sharenamemode = shareopts.get(b'mode')
760 if sharepool and islocal(dest):
759 if sharepool and islocal(dest):
761 sharepath = None
760 sharepath = None
762 if sharenamemode == b'identity':
761 if sharenamemode == b'identity':
763 # Resolve the name from the initial changeset in the remote
762 # Resolve the name from the initial changeset in the remote
764 # repository. This returns nullid when the remote is empty. It
763 # repository. This returns nullid when the remote is empty. It
765 # raises RepoLookupError if revision 0 is filtered or otherwise
764 # raises RepoLookupError if revision 0 is filtered or otherwise
766 # not available. If we fail to resolve, sharing is not enabled.
765 # not available. If we fail to resolve, sharing is not enabled.
767 try:
766 try:
768 with srcpeer.commandexecutor() as e:
767 with srcpeer.commandexecutor() as e:
769 rootnode = e.callcommand(
768 rootnode = e.callcommand(
770 b'lookup',
769 b'lookup',
771 {
770 {
772 b'key': b'0',
771 b'key': b'0',
773 },
772 },
774 ).result()
773 ).result()
775
774
776 if rootnode != nullid:
775 if rootnode != nullid:
777 sharepath = os.path.join(sharepool, hex(rootnode))
776 sharepath = os.path.join(sharepool, hex(rootnode))
778 else:
777 else:
779 ui.status(
778 ui.status(
780 _(
779 _(
781 b'(not using pooled storage: '
780 b'(not using pooled storage: '
782 b'remote appears to be empty)\n'
781 b'remote appears to be empty)\n'
783 )
782 )
784 )
783 )
785 except error.RepoLookupError:
784 except error.RepoLookupError:
786 ui.status(
785 ui.status(
787 _(
786 _(
788 b'(not using pooled storage: '
787 b'(not using pooled storage: '
789 b'unable to resolve identity of remote)\n'
788 b'unable to resolve identity of remote)\n'
790 )
789 )
791 )
790 )
792 elif sharenamemode == b'remote':
791 elif sharenamemode == b'remote':
793 sharepath = os.path.join(
792 sharepath = os.path.join(
794 sharepool, hex(hashutil.sha1(source).digest())
793 sharepool, hex(hashutil.sha1(source).digest())
795 )
794 )
796 else:
795 else:
797 raise error.Abort(
796 raise error.Abort(
798 _(b'unknown share naming mode: %s') % sharenamemode
797 _(b'unknown share naming mode: %s') % sharenamemode
799 )
798 )
800
799
801 # TODO this is a somewhat arbitrary restriction.
800 # TODO this is a somewhat arbitrary restriction.
802 if narrow:
801 if narrow:
803 ui.status(
802 ui.status(
804 _(b'(pooled storage not supported for narrow clones)\n')
803 _(b'(pooled storage not supported for narrow clones)\n')
805 )
804 )
806 sharepath = None
805 sharepath = None
807
806
808 if sharepath:
807 if sharepath:
809 return clonewithshare(
808 return clonewithshare(
810 ui,
809 ui,
811 peeropts,
810 peeropts,
812 sharepath,
811 sharepath,
813 source,
812 source,
814 srcpeer,
813 srcpeer,
815 dest,
814 dest,
816 pull=pull,
815 pull=pull,
817 rev=revs,
816 rev=revs,
818 update=update,
817 update=update,
819 stream=stream,
818 stream=stream,
820 )
819 )
821
820
822 srcrepo = srcpeer.local()
821 srcrepo = srcpeer.local()
823
822
824 abspath = origsource
823 abspath = origsource
825 if islocal(origsource):
824 if islocal(origsource):
826 abspath = os.path.abspath(urlutil.urllocalpath(origsource))
825 abspath = os.path.abspath(urlutil.urllocalpath(origsource))
827
826
828 if islocal(dest):
827 if islocal(dest):
829 cleandir = dest
828 cleandir = dest
830
829
831 copy = False
830 copy = False
832 if (
831 if (
833 srcrepo
832 srcrepo
834 and srcrepo.cancopy()
833 and srcrepo.cancopy()
835 and islocal(dest)
834 and islocal(dest)
836 and not phases.hassecret(srcrepo)
835 and not phases.hassecret(srcrepo)
837 ):
836 ):
838 copy = not pull and not revs
837 copy = not pull and not revs
839
838
840 # TODO this is a somewhat arbitrary restriction.
839 # TODO this is a somewhat arbitrary restriction.
841 if narrow:
840 if narrow:
842 copy = False
841 copy = False
843
842
844 if copy:
843 if copy:
845 try:
844 try:
846 # we use a lock here because if we race with commit, we
845 # we use a lock here because if we race with commit, we
847 # can end up with extra data in the cloned revlogs that's
846 # can end up with extra data in the cloned revlogs that's
848 # not pointed to by changesets, thus causing verify to
847 # not pointed to by changesets, thus causing verify to
849 # fail
848 # fail
850 srclock = srcrepo.lock(wait=False)
849 srclock = srcrepo.lock(wait=False)
851 except error.LockError:
850 except error.LockError:
852 copy = False
851 copy = False
853
852
854 if copy:
853 if copy:
855 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
854 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
856 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
855 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
857 if not os.path.exists(dest):
856 if not os.path.exists(dest):
858 util.makedirs(dest)
857 util.makedirs(dest)
859 else:
858 else:
860 # only clean up directories we create ourselves
859 # only clean up directories we create ourselves
861 cleandir = hgdir
860 cleandir = hgdir
862 try:
861 try:
863 destpath = hgdir
862 destpath = hgdir
864 util.makedir(destpath, notindexed=True)
863 util.makedir(destpath, notindexed=True)
865 except OSError as inst:
864 except OSError as inst:
866 if inst.errno == errno.EEXIST:
865 if inst.errno == errno.EEXIST:
867 cleandir = None
866 cleandir = None
868 raise error.Abort(
867 raise error.Abort(
869 _(b"destination '%s' already exists") % dest
868 _(b"destination '%s' already exists") % dest
870 )
869 )
871 raise
870 raise
872
871
873 destlock = copystore(ui, srcrepo, destpath)
872 destlock = copystore(ui, srcrepo, destpath)
874 # copy bookmarks over
873 # copy bookmarks over
875 srcbookmarks = srcrepo.vfs.join(b'bookmarks')
874 srcbookmarks = srcrepo.vfs.join(b'bookmarks')
876 dstbookmarks = os.path.join(destpath, b'bookmarks')
875 dstbookmarks = os.path.join(destpath, b'bookmarks')
877 if os.path.exists(srcbookmarks):
876 if os.path.exists(srcbookmarks):
878 util.copyfile(srcbookmarks, dstbookmarks)
877 util.copyfile(srcbookmarks, dstbookmarks)
879
878
880 dstcachedir = os.path.join(destpath, b'cache')
879 dstcachedir = os.path.join(destpath, b'cache')
881 for cache in cacheutil.cachetocopy(srcrepo):
880 for cache in cacheutil.cachetocopy(srcrepo):
882 _copycache(srcrepo, dstcachedir, cache)
881 _copycache(srcrepo, dstcachedir, cache)
883
882
884 # we need to re-init the repo after manually copying the data
883 # we need to re-init the repo after manually copying the data
885 # into it
884 # into it
886 destpeer = peer(srcrepo, peeropts, dest)
885 destpeer = peer(srcrepo, peeropts, dest)
887 srcrepo.hook(b'outgoing', source=b'clone', node=nullhex)
886 srcrepo.hook(b'outgoing', source=b'clone', node=nullhex)
888 else:
887 else:
889 try:
888 try:
890 # only pass ui when no srcrepo
889 # only pass ui when no srcrepo
891 destpeer = peer(
890 destpeer = peer(
892 srcrepo or ui,
891 srcrepo or ui,
893 peeropts,
892 peeropts,
894 dest,
893 dest,
895 create=True,
894 create=True,
896 createopts=createopts,
895 createopts=createopts,
897 )
896 )
898 except OSError as inst:
897 except OSError as inst:
899 if inst.errno == errno.EEXIST:
898 if inst.errno == errno.EEXIST:
900 cleandir = None
899 cleandir = None
901 raise error.Abort(
900 raise error.Abort(
902 _(b"destination '%s' already exists") % dest
901 _(b"destination '%s' already exists") % dest
903 )
902 )
904 raise
903 raise
905
904
906 if revs:
905 if revs:
907 if not srcpeer.capable(b'lookup'):
906 if not srcpeer.capable(b'lookup'):
908 raise error.Abort(
907 raise error.Abort(
909 _(
908 _(
910 b"src repository does not support "
909 b"src repository does not support "
911 b"revision lookup and so doesn't "
910 b"revision lookup and so doesn't "
912 b"support clone by revision"
911 b"support clone by revision"
913 )
912 )
914 )
913 )
915
914
916 # TODO this is batchable.
915 # TODO this is batchable.
917 remoterevs = []
916 remoterevs = []
918 for rev in revs:
917 for rev in revs:
919 with srcpeer.commandexecutor() as e:
918 with srcpeer.commandexecutor() as e:
920 remoterevs.append(
919 remoterevs.append(
921 e.callcommand(
920 e.callcommand(
922 b'lookup',
921 b'lookup',
923 {
922 {
924 b'key': rev,
923 b'key': rev,
925 },
924 },
926 ).result()
925 ).result()
927 )
926 )
928 revs = remoterevs
927 revs = remoterevs
929
928
930 checkout = revs[0]
929 checkout = revs[0]
931 else:
930 else:
932 revs = None
931 revs = None
933 local = destpeer.local()
932 local = destpeer.local()
934 if local:
933 if local:
935 if narrow:
934 if narrow:
936 with local.wlock(), local.lock():
935 with local.wlock(), local.lock():
937 local.setnarrowpats(storeincludepats, storeexcludepats)
936 local.setnarrowpats(storeincludepats, storeexcludepats)
938 narrowspec.copytoworkingcopy(local)
937 narrowspec.copytoworkingcopy(local)
939
938
940 u = urlutil.url(abspath)
939 u = urlutil.url(abspath)
941 defaulturl = bytes(u)
940 defaulturl = bytes(u)
942 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
941 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
943 if not stream:
942 if not stream:
944 if pull:
943 if pull:
945 stream = False
944 stream = False
946 else:
945 else:
947 stream = None
946 stream = None
948 # internal config: ui.quietbookmarkmove
947 # internal config: ui.quietbookmarkmove
949 overrides = {(b'ui', b'quietbookmarkmove'): True}
948 overrides = {(b'ui', b'quietbookmarkmove'): True}
950 with local.ui.configoverride(overrides, b'clone'):
949 with local.ui.configoverride(overrides, b'clone'):
951 exchange.pull(
950 exchange.pull(
952 local,
951 local,
953 srcpeer,
952 srcpeer,
954 revs,
953 revs,
955 streamclonerequested=stream,
954 streamclonerequested=stream,
956 includepats=storeincludepats,
955 includepats=storeincludepats,
957 excludepats=storeexcludepats,
956 excludepats=storeexcludepats,
958 depth=depth,
957 depth=depth,
959 )
958 )
960 elif srcrepo:
959 elif srcrepo:
961 # TODO lift restriction once exchange.push() accepts narrow
960 # TODO lift restriction once exchange.push() accepts narrow
962 # push.
961 # push.
963 if narrow:
962 if narrow:
964 raise error.Abort(
963 raise error.Abort(
965 _(
964 _(
966 b'narrow clone not available for '
965 b'narrow clone not available for '
967 b'remote destinations'
966 b'remote destinations'
968 )
967 )
969 )
968 )
970
969
971 exchange.push(
970 exchange.push(
972 srcrepo,
971 srcrepo,
973 destpeer,
972 destpeer,
974 revs=revs,
973 revs=revs,
975 bookmarks=srcrepo._bookmarks.keys(),
974 bookmarks=srcrepo._bookmarks.keys(),
976 )
975 )
977 else:
976 else:
978 raise error.Abort(
977 raise error.Abort(
979 _(b"clone from remote to remote not supported")
978 _(b"clone from remote to remote not supported")
980 )
979 )
981
980
982 cleandir = None
981 cleandir = None
983
982
984 destrepo = destpeer.local()
983 destrepo = destpeer.local()
985 if destrepo:
984 if destrepo:
986 template = uimod.samplehgrcs[b'cloned']
985 template = uimod.samplehgrcs[b'cloned']
987 u = urlutil.url(abspath)
986 u = urlutil.url(abspath)
988 u.passwd = None
987 u.passwd = None
989 defaulturl = bytes(u)
988 defaulturl = bytes(u)
990 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
989 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
991 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
990 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
992
991
993 if ui.configbool(b'experimental', b'remotenames'):
992 if ui.configbool(b'experimental', b'remotenames'):
994 logexchange.pullremotenames(destrepo, srcpeer)
993 logexchange.pullremotenames(destrepo, srcpeer)
995
994
996 if update:
995 if update:
997 if update is not True:
996 if update is not True:
998 with srcpeer.commandexecutor() as e:
997 with srcpeer.commandexecutor() as e:
999 checkout = e.callcommand(
998 checkout = e.callcommand(
1000 b'lookup',
999 b'lookup',
1001 {
1000 {
1002 b'key': update,
1001 b'key': update,
1003 },
1002 },
1004 ).result()
1003 ).result()
1005
1004
1006 uprev = None
1005 uprev = None
1007 status = None
1006 status = None
1008 if checkout is not None:
1007 if checkout is not None:
1009 # Some extensions (at least hg-git and hg-subversion) have
1008 # Some extensions (at least hg-git and hg-subversion) have
1010 # a peer.lookup() implementation that returns a name instead
1009 # a peer.lookup() implementation that returns a name instead
1011 # of a nodeid. We work around it here until we've figured
1010 # of a nodeid. We work around it here until we've figured
1012 # out a better solution.
1011 # out a better solution.
1013 if len(checkout) == 20 and checkout in destrepo:
1012 if len(checkout) == 20 and checkout in destrepo:
1014 uprev = checkout
1013 uprev = checkout
1015 elif scmutil.isrevsymbol(destrepo, checkout):
1014 elif scmutil.isrevsymbol(destrepo, checkout):
1016 uprev = scmutil.revsymbol(destrepo, checkout).node()
1015 uprev = scmutil.revsymbol(destrepo, checkout).node()
1017 else:
1016 else:
1018 if update is not True:
1017 if update is not True:
1019 try:
1018 try:
1020 uprev = destrepo.lookup(update)
1019 uprev = destrepo.lookup(update)
1021 except error.RepoLookupError:
1020 except error.RepoLookupError:
1022 pass
1021 pass
1023 if uprev is None:
1022 if uprev is None:
1024 try:
1023 try:
1025 if destrepo._activebookmark:
1024 if destrepo._activebookmark:
1026 uprev = destrepo.lookup(destrepo._activebookmark)
1025 uprev = destrepo.lookup(destrepo._activebookmark)
1027 update = destrepo._activebookmark
1026 update = destrepo._activebookmark
1028 else:
1027 else:
1029 uprev = destrepo._bookmarks[b'@']
1028 uprev = destrepo._bookmarks[b'@']
1030 update = b'@'
1029 update = b'@'
1031 bn = destrepo[uprev].branch()
1030 bn = destrepo[uprev].branch()
1032 if bn == b'default':
1031 if bn == b'default':
1033 status = _(b"updating to bookmark %s\n" % update)
1032 status = _(b"updating to bookmark %s\n" % update)
1034 else:
1033 else:
1035 status = (
1034 status = (
1036 _(b"updating to bookmark %s on branch %s\n")
1035 _(b"updating to bookmark %s on branch %s\n")
1037 ) % (update, bn)
1036 ) % (update, bn)
1038 except KeyError:
1037 except KeyError:
1039 try:
1038 try:
1040 uprev = destrepo.branchtip(b'default')
1039 uprev = destrepo.branchtip(b'default')
1041 except error.RepoLookupError:
1040 except error.RepoLookupError:
1042 uprev = destrepo.lookup(b'tip')
1041 uprev = destrepo.lookup(b'tip')
1043 if not status:
1042 if not status:
1044 bn = destrepo[uprev].branch()
1043 bn = destrepo[uprev].branch()
1045 status = _(b"updating to branch %s\n") % bn
1044 status = _(b"updating to branch %s\n") % bn
1046 destrepo.ui.status(status)
1045 destrepo.ui.status(status)
1047 _update(destrepo, uprev)
1046 _update(destrepo, uprev)
1048 if update in destrepo._bookmarks:
1047 if update in destrepo._bookmarks:
1049 bookmarks.activate(destrepo, update)
1048 bookmarks.activate(destrepo, update)
1050 if destlock is not None:
1049 if destlock is not None:
1051 release(destlock)
1050 release(destlock)
1052 # here is a tiny windows were someone could end up writing the
1051 # here is a tiny windows were someone could end up writing the
1053 # repository before the cache are sure to be warm. This is "fine"
1052 # repository before the cache are sure to be warm. This is "fine"
1054 # as the only "bad" outcome would be some slowness. That potential
1053 # as the only "bad" outcome would be some slowness. That potential
1055 # slowness already affect reader.
1054 # slowness already affect reader.
1056 with destrepo.lock():
1055 with destrepo.lock():
1057 destrepo.updatecaches(full=True)
1056 destrepo.updatecaches(full=True)
1058 finally:
1057 finally:
1059 release(srclock, destlock)
1058 release(srclock, destlock)
1060 if cleandir is not None:
1059 if cleandir is not None:
1061 shutil.rmtree(cleandir, True)
1060 shutil.rmtree(cleandir, True)
1062 if srcpeer is not None:
1061 if srcpeer is not None:
1063 srcpeer.close()
1062 srcpeer.close()
1064 if destpeer and destpeer.local() is None:
1063 if destpeer and destpeer.local() is None:
1065 destpeer.close()
1064 destpeer.close()
1066 return srcpeer, destpeer
1065 return srcpeer, destpeer
1067
1066
1068
1067
1069 def _showstats(repo, stats, quietempty=False):
1068 def _showstats(repo, stats, quietempty=False):
1070 if quietempty and stats.isempty():
1069 if quietempty and stats.isempty():
1071 return
1070 return
1072 repo.ui.status(
1071 repo.ui.status(
1073 _(
1072 _(
1074 b"%d files updated, %d files merged, "
1073 b"%d files updated, %d files merged, "
1075 b"%d files removed, %d files unresolved\n"
1074 b"%d files removed, %d files unresolved\n"
1076 )
1075 )
1077 % (
1076 % (
1078 stats.updatedcount,
1077 stats.updatedcount,
1079 stats.mergedcount,
1078 stats.mergedcount,
1080 stats.removedcount,
1079 stats.removedcount,
1081 stats.unresolvedcount,
1080 stats.unresolvedcount,
1082 )
1081 )
1083 )
1082 )
1084
1083
1085
1084
1086 def updaterepo(repo, node, overwrite, updatecheck=None):
1085 def updaterepo(repo, node, overwrite, updatecheck=None):
1087 """Update the working directory to node.
1086 """Update the working directory to node.
1088
1087
1089 When overwrite is set, changes are clobbered, merged else
1088 When overwrite is set, changes are clobbered, merged else
1090
1089
1091 returns stats (see pydoc mercurial.merge.applyupdates)"""
1090 returns stats (see pydoc mercurial.merge.applyupdates)"""
1092 repo.ui.deprecwarn(
1091 repo.ui.deprecwarn(
1093 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1092 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1094 b'5.7',
1093 b'5.7',
1095 )
1094 )
1096 return mergemod._update(
1095 return mergemod._update(
1097 repo,
1096 repo,
1098 node,
1097 node,
1099 branchmerge=False,
1098 branchmerge=False,
1100 force=overwrite,
1099 force=overwrite,
1101 labels=[b'working copy', b'destination'],
1100 labels=[b'working copy', b'destination'],
1102 updatecheck=updatecheck,
1101 updatecheck=updatecheck,
1103 )
1102 )
1104
1103
1105
1104
1106 def update(repo, node, quietempty=False, updatecheck=None):
1105 def update(repo, node, quietempty=False, updatecheck=None):
1107 """update the working directory to node"""
1106 """update the working directory to node"""
1108 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1107 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1109 _showstats(repo, stats, quietempty)
1108 _showstats(repo, stats, quietempty)
1110 if stats.unresolvedcount:
1109 if stats.unresolvedcount:
1111 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1110 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1112 return stats.unresolvedcount > 0
1111 return stats.unresolvedcount > 0
1113
1112
1114
1113
1115 # naming conflict in clone()
1114 # naming conflict in clone()
1116 _update = update
1115 _update = update
1117
1116
1118
1117
1119 def clean(repo, node, show_stats=True, quietempty=False):
1118 def clean(repo, node, show_stats=True, quietempty=False):
1120 """forcibly switch the working directory to node, clobbering changes"""
1119 """forcibly switch the working directory to node, clobbering changes"""
1121 stats = mergemod.clean_update(repo[node])
1120 stats = mergemod.clean_update(repo[node])
1122 assert stats.unresolvedcount == 0
1121 assert stats.unresolvedcount == 0
1123 if show_stats:
1122 if show_stats:
1124 _showstats(repo, stats, quietempty)
1123 _showstats(repo, stats, quietempty)
1125 return False
1124 return False
1126
1125
1127
1126
1128 # naming conflict in updatetotally()
1127 # naming conflict in updatetotally()
1129 _clean = clean
1128 _clean = clean
1130
1129
1131 _VALID_UPDATECHECKS = {
1130 _VALID_UPDATECHECKS = {
1132 mergemod.UPDATECHECK_ABORT,
1131 mergemod.UPDATECHECK_ABORT,
1133 mergemod.UPDATECHECK_NONE,
1132 mergemod.UPDATECHECK_NONE,
1134 mergemod.UPDATECHECK_LINEAR,
1133 mergemod.UPDATECHECK_LINEAR,
1135 mergemod.UPDATECHECK_NO_CONFLICT,
1134 mergemod.UPDATECHECK_NO_CONFLICT,
1136 }
1135 }
1137
1136
1138
1137
1139 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1138 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1140 """Update the working directory with extra care for non-file components
1139 """Update the working directory with extra care for non-file components
1141
1140
1142 This takes care of non-file components below:
1141 This takes care of non-file components below:
1143
1142
1144 :bookmark: might be advanced or (in)activated
1143 :bookmark: might be advanced or (in)activated
1145
1144
1146 This takes arguments below:
1145 This takes arguments below:
1147
1146
1148 :checkout: to which revision the working directory is updated
1147 :checkout: to which revision the working directory is updated
1149 :brev: a name, which might be a bookmark to be activated after updating
1148 :brev: a name, which might be a bookmark to be activated after updating
1150 :clean: whether changes in the working directory can be discarded
1149 :clean: whether changes in the working directory can be discarded
1151 :updatecheck: how to deal with a dirty working directory
1150 :updatecheck: how to deal with a dirty working directory
1152
1151
1153 Valid values for updatecheck are the UPDATECHECK_* constants
1152 Valid values for updatecheck are the UPDATECHECK_* constants
1154 defined in the merge module. Passing `None` will result in using the
1153 defined in the merge module. Passing `None` will result in using the
1155 configured default.
1154 configured default.
1156
1155
1157 * ABORT: abort if the working directory is dirty
1156 * ABORT: abort if the working directory is dirty
1158 * NONE: don't check (merge working directory changes into destination)
1157 * NONE: don't check (merge working directory changes into destination)
1159 * LINEAR: check that update is linear before merging working directory
1158 * LINEAR: check that update is linear before merging working directory
1160 changes into destination
1159 changes into destination
1161 * NO_CONFLICT: check that the update does not result in file merges
1160 * NO_CONFLICT: check that the update does not result in file merges
1162
1161
1163 This returns whether conflict is detected at updating or not.
1162 This returns whether conflict is detected at updating or not.
1164 """
1163 """
1165 if updatecheck is None:
1164 if updatecheck is None:
1166 updatecheck = ui.config(b'commands', b'update.check')
1165 updatecheck = ui.config(b'commands', b'update.check')
1167 if updatecheck not in _VALID_UPDATECHECKS:
1166 if updatecheck not in _VALID_UPDATECHECKS:
1168 # If not configured, or invalid value configured
1167 # If not configured, or invalid value configured
1169 updatecheck = mergemod.UPDATECHECK_LINEAR
1168 updatecheck = mergemod.UPDATECHECK_LINEAR
1170 if updatecheck not in _VALID_UPDATECHECKS:
1169 if updatecheck not in _VALID_UPDATECHECKS:
1171 raise ValueError(
1170 raise ValueError(
1172 r'Invalid updatecheck value %r (can accept %r)'
1171 r'Invalid updatecheck value %r (can accept %r)'
1173 % (updatecheck, _VALID_UPDATECHECKS)
1172 % (updatecheck, _VALID_UPDATECHECKS)
1174 )
1173 )
1175 with repo.wlock():
1174 with repo.wlock():
1176 movemarkfrom = None
1175 movemarkfrom = None
1177 warndest = False
1176 warndest = False
1178 if checkout is None:
1177 if checkout is None:
1179 updata = destutil.destupdate(repo, clean=clean)
1178 updata = destutil.destupdate(repo, clean=clean)
1180 checkout, movemarkfrom, brev = updata
1179 checkout, movemarkfrom, brev = updata
1181 warndest = True
1180 warndest = True
1182
1181
1183 if clean:
1182 if clean:
1184 ret = _clean(repo, checkout)
1183 ret = _clean(repo, checkout)
1185 else:
1184 else:
1186 if updatecheck == mergemod.UPDATECHECK_ABORT:
1185 if updatecheck == mergemod.UPDATECHECK_ABORT:
1187 cmdutil.bailifchanged(repo, merge=False)
1186 cmdutil.bailifchanged(repo, merge=False)
1188 updatecheck = mergemod.UPDATECHECK_NONE
1187 updatecheck = mergemod.UPDATECHECK_NONE
1189 ret = _update(repo, checkout, updatecheck=updatecheck)
1188 ret = _update(repo, checkout, updatecheck=updatecheck)
1190
1189
1191 if not ret and movemarkfrom:
1190 if not ret and movemarkfrom:
1192 if movemarkfrom == repo[b'.'].node():
1191 if movemarkfrom == repo[b'.'].node():
1193 pass # no-op update
1192 pass # no-op update
1194 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1193 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1195 b = ui.label(repo._activebookmark, b'bookmarks.active')
1194 b = ui.label(repo._activebookmark, b'bookmarks.active')
1196 ui.status(_(b"updating bookmark %s\n") % b)
1195 ui.status(_(b"updating bookmark %s\n") % b)
1197 else:
1196 else:
1198 # this can happen with a non-linear update
1197 # this can happen with a non-linear update
1199 b = ui.label(repo._activebookmark, b'bookmarks')
1198 b = ui.label(repo._activebookmark, b'bookmarks')
1200 ui.status(_(b"(leaving bookmark %s)\n") % b)
1199 ui.status(_(b"(leaving bookmark %s)\n") % b)
1201 bookmarks.deactivate(repo)
1200 bookmarks.deactivate(repo)
1202 elif brev in repo._bookmarks:
1201 elif brev in repo._bookmarks:
1203 if brev != repo._activebookmark:
1202 if brev != repo._activebookmark:
1204 b = ui.label(brev, b'bookmarks.active')
1203 b = ui.label(brev, b'bookmarks.active')
1205 ui.status(_(b"(activating bookmark %s)\n") % b)
1204 ui.status(_(b"(activating bookmark %s)\n") % b)
1206 bookmarks.activate(repo, brev)
1205 bookmarks.activate(repo, brev)
1207 elif brev:
1206 elif brev:
1208 if repo._activebookmark:
1207 if repo._activebookmark:
1209 b = ui.label(repo._activebookmark, b'bookmarks')
1208 b = ui.label(repo._activebookmark, b'bookmarks')
1210 ui.status(_(b"(leaving bookmark %s)\n") % b)
1209 ui.status(_(b"(leaving bookmark %s)\n") % b)
1211 bookmarks.deactivate(repo)
1210 bookmarks.deactivate(repo)
1212
1211
1213 if warndest:
1212 if warndest:
1214 destutil.statusotherdests(ui, repo)
1213 destutil.statusotherdests(ui, repo)
1215
1214
1216 return ret
1215 return ret
1217
1216
1218
1217
1219 def merge(
1218 def merge(
1220 ctx,
1219 ctx,
1221 force=False,
1220 force=False,
1222 remind=True,
1221 remind=True,
1223 labels=None,
1222 labels=None,
1224 ):
1223 ):
1225 """Branch merge with node, resolving changes. Return true if any
1224 """Branch merge with node, resolving changes. Return true if any
1226 unresolved conflicts."""
1225 unresolved conflicts."""
1227 repo = ctx.repo()
1226 repo = ctx.repo()
1228 stats = mergemod.merge(ctx, force=force, labels=labels)
1227 stats = mergemod.merge(ctx, force=force, labels=labels)
1229 _showstats(repo, stats)
1228 _showstats(repo, stats)
1230 if stats.unresolvedcount:
1229 if stats.unresolvedcount:
1231 repo.ui.status(
1230 repo.ui.status(
1232 _(
1231 _(
1233 b"use 'hg resolve' to retry unresolved file merges "
1232 b"use 'hg resolve' to retry unresolved file merges "
1234 b"or 'hg merge --abort' to abandon\n"
1233 b"or 'hg merge --abort' to abandon\n"
1235 )
1234 )
1236 )
1235 )
1237 elif remind:
1236 elif remind:
1238 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1237 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1239 return stats.unresolvedcount > 0
1238 return stats.unresolvedcount > 0
1240
1239
1241
1240
1242 def abortmerge(ui, repo):
1241 def abortmerge(ui, repo):
1243 ms = mergestatemod.mergestate.read(repo)
1242 ms = mergestatemod.mergestate.read(repo)
1244 if ms.active():
1243 if ms.active():
1245 # there were conflicts
1244 # there were conflicts
1246 node = ms.localctx.hex()
1245 node = ms.localctx.hex()
1247 else:
1246 else:
1248 # there were no conficts, mergestate was not stored
1247 # there were no conficts, mergestate was not stored
1249 node = repo[b'.'].hex()
1248 node = repo[b'.'].hex()
1250
1249
1251 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1250 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1252 stats = mergemod.clean_update(repo[node])
1251 stats = mergemod.clean_update(repo[node])
1253 assert stats.unresolvedcount == 0
1252 assert stats.unresolvedcount == 0
1254 _showstats(repo, stats)
1253 _showstats(repo, stats)
1255
1254
1256
1255
1257 def _incoming(
1256 def _incoming(
1258 displaychlist,
1257 displaychlist,
1259 subreporecurse,
1258 subreporecurse,
1260 ui,
1259 ui,
1261 repo,
1260 repo,
1262 source,
1261 source,
1263 opts,
1262 opts,
1264 buffered=False,
1263 buffered=False,
1265 subpath=None,
1264 subpath=None,
1266 ):
1265 ):
1267 """
1266 """
1268 Helper for incoming / gincoming.
1267 Helper for incoming / gincoming.
1269 displaychlist gets called with
1268 displaychlist gets called with
1270 (remoterepo, incomingchangesetlist, displayer) parameters,
1269 (remoterepo, incomingchangesetlist, displayer) parameters,
1271 and is supposed to contain only code that can't be unified.
1270 and is supposed to contain only code that can't be unified.
1272 """
1271 """
1273 srcs = urlutil.get_pull_paths(repo, ui, [source], opts.get(b'branch'))
1272 srcs = urlutil.get_pull_paths(repo, ui, [source], opts.get(b'branch'))
1274 srcs = list(srcs)
1273 srcs = list(srcs)
1275 if len(srcs) != 1:
1274 if len(srcs) != 1:
1276 msg = _('for now, incoming supports only a single source, %d provided')
1275 msg = _('for now, incoming supports only a single source, %d provided')
1277 msg %= len(srcs)
1276 msg %= len(srcs)
1278 raise error.Abort(msg)
1277 raise error.Abort(msg)
1279 source, branches = srcs[0]
1278 source, branches = srcs[0]
1280 if subpath is not None:
1279 if subpath is not None:
1281 subpath = urlutil.url(subpath)
1280 subpath = urlutil.url(subpath)
1282 if subpath.isabs():
1281 if subpath.isabs():
1283 source = bytes(subpath)
1282 source = bytes(subpath)
1284 else:
1283 else:
1285 p = urlutil.url(source)
1284 p = urlutil.url(source)
1286 p.path = os.path.normpath(b'%s/%s' % (p.path, subpath))
1285 p.path = os.path.normpath(b'%s/%s' % (p.path, subpath))
1287 source = bytes(p)
1286 source = bytes(p)
1288 other = peer(repo, opts, source)
1287 other = peer(repo, opts, source)
1289 cleanupfn = other.close
1288 cleanupfn = other.close
1290 try:
1289 try:
1291 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source))
1290 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source))
1292 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1291 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1293
1292
1294 if revs:
1293 if revs:
1295 revs = [other.lookup(rev) for rev in revs]
1294 revs = [other.lookup(rev) for rev in revs]
1296 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1295 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1297 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
1296 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
1298 )
1297 )
1299
1298
1300 if not chlist:
1299 if not chlist:
1301 ui.status(_(b"no changes found\n"))
1300 ui.status(_(b"no changes found\n"))
1302 return subreporecurse()
1301 return subreporecurse()
1303 ui.pager(b'incoming')
1302 ui.pager(b'incoming')
1304 displayer = logcmdutil.changesetdisplayer(
1303 displayer = logcmdutil.changesetdisplayer(
1305 ui, other, opts, buffered=buffered
1304 ui, other, opts, buffered=buffered
1306 )
1305 )
1307 displaychlist(other, chlist, displayer)
1306 displaychlist(other, chlist, displayer)
1308 displayer.close()
1307 displayer.close()
1309 finally:
1308 finally:
1310 cleanupfn()
1309 cleanupfn()
1311 subreporecurse()
1310 subreporecurse()
1312 return 0 # exit code is zero since we found incoming changes
1311 return 0 # exit code is zero since we found incoming changes
1313
1312
1314
1313
1315 def incoming(ui, repo, source, opts, subpath=None):
1314 def incoming(ui, repo, source, opts, subpath=None):
1316 def subreporecurse():
1315 def subreporecurse():
1317 ret = 1
1316 ret = 1
1318 if opts.get(b'subrepos'):
1317 if opts.get(b'subrepos'):
1319 ctx = repo[None]
1318 ctx = repo[None]
1320 for subpath in sorted(ctx.substate):
1319 for subpath in sorted(ctx.substate):
1321 sub = ctx.sub(subpath)
1320 sub = ctx.sub(subpath)
1322 ret = min(ret, sub.incoming(ui, source, opts))
1321 ret = min(ret, sub.incoming(ui, source, opts))
1323 return ret
1322 return ret
1324
1323
1325 def display(other, chlist, displayer):
1324 def display(other, chlist, displayer):
1326 limit = logcmdutil.getlimit(opts)
1325 limit = logcmdutil.getlimit(opts)
1327 if opts.get(b'newest_first'):
1326 if opts.get(b'newest_first'):
1328 chlist.reverse()
1327 chlist.reverse()
1329 count = 0
1328 count = 0
1330 for n in chlist:
1329 for n in chlist:
1331 if limit is not None and count >= limit:
1330 if limit is not None and count >= limit:
1332 break
1331 break
1333 parents = [p for p in other.changelog.parents(n) if p != nullid]
1332 parents = [p for p in other.changelog.parents(n) if p != nullid]
1334 if opts.get(b'no_merges') and len(parents) == 2:
1333 if opts.get(b'no_merges') and len(parents) == 2:
1335 continue
1334 continue
1336 count += 1
1335 count += 1
1337 displayer.show(other[n])
1336 displayer.show(other[n])
1338
1337
1339 return _incoming(
1338 return _incoming(
1340 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1339 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1341 )
1340 )
1342
1341
1343
1342
1344 def _outgoing(ui, repo, dests, opts, subpath=None):
1343 def _outgoing(ui, repo, dests, opts, subpath=None):
1345 out = set()
1344 out = set()
1346 others = []
1345 others = []
1347 for path in urlutil.get_push_paths(repo, ui, dests):
1346 for path in urlutil.get_push_paths(repo, ui, dests):
1348 dest = path.pushloc or path.loc
1347 dest = path.pushloc or path.loc
1349 if subpath is not None:
1348 if subpath is not None:
1350 subpath = urlutil.url(subpath)
1349 subpath = urlutil.url(subpath)
1351 if subpath.isabs():
1350 if subpath.isabs():
1352 dest = bytes(subpath)
1351 dest = bytes(subpath)
1353 else:
1352 else:
1354 p = urlutil.url(dest)
1353 p = urlutil.url(dest)
1355 p.path = os.path.normpath(b'%s/%s' % (p.path, subpath))
1354 p.path = os.path.normpath(b'%s/%s' % (p.path, subpath))
1356 dest = bytes(p)
1355 dest = bytes(p)
1357 branches = path.branch, opts.get(b'branch') or []
1356 branches = path.branch, opts.get(b'branch') or []
1358
1357
1359 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1358 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1360 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1359 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1361 if revs:
1360 if revs:
1362 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1361 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1363
1362
1364 other = peer(repo, opts, dest)
1363 other = peer(repo, opts, dest)
1365 try:
1364 try:
1366 outgoing = discovery.findcommonoutgoing(
1365 outgoing = discovery.findcommonoutgoing(
1367 repo, other, revs, force=opts.get(b'force')
1366 repo, other, revs, force=opts.get(b'force')
1368 )
1367 )
1369 o = outgoing.missing
1368 o = outgoing.missing
1370 out.update(o)
1369 out.update(o)
1371 if not o:
1370 if not o:
1372 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1371 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1373 others.append(other)
1372 others.append(other)
1374 except: # re-raises
1373 except: # re-raises
1375 other.close()
1374 other.close()
1376 raise
1375 raise
1377 # make sure this is ordered by revision number
1376 # make sure this is ordered by revision number
1378 outgoing_revs = list(out)
1377 outgoing_revs = list(out)
1379 cl = repo.changelog
1378 cl = repo.changelog
1380 outgoing_revs.sort(key=cl.rev)
1379 outgoing_revs.sort(key=cl.rev)
1381 return outgoing_revs, others
1380 return outgoing_revs, others
1382
1381
1383
1382
1384 def _outgoing_recurse(ui, repo, dests, opts):
1383 def _outgoing_recurse(ui, repo, dests, opts):
1385 ret = 1
1384 ret = 1
1386 if opts.get(b'subrepos'):
1385 if opts.get(b'subrepos'):
1387 ctx = repo[None]
1386 ctx = repo[None]
1388 for subpath in sorted(ctx.substate):
1387 for subpath in sorted(ctx.substate):
1389 sub = ctx.sub(subpath)
1388 sub = ctx.sub(subpath)
1390 ret = min(ret, sub.outgoing(ui, dests, opts))
1389 ret = min(ret, sub.outgoing(ui, dests, opts))
1391 return ret
1390 return ret
1392
1391
1393
1392
1394 def _outgoing_filter(repo, revs, opts):
1393 def _outgoing_filter(repo, revs, opts):
1395 """apply revision filtering/ordering option for outgoing"""
1394 """apply revision filtering/ordering option for outgoing"""
1396 limit = logcmdutil.getlimit(opts)
1395 limit = logcmdutil.getlimit(opts)
1397 no_merges = opts.get(b'no_merges')
1396 no_merges = opts.get(b'no_merges')
1398 if opts.get(b'newest_first'):
1397 if opts.get(b'newest_first'):
1399 revs.reverse()
1398 revs.reverse()
1400 if limit is None and not no_merges:
1399 if limit is None and not no_merges:
1401 for r in revs:
1400 for r in revs:
1402 yield r
1401 yield r
1403 return
1402 return
1404
1403
1405 count = 0
1404 count = 0
1406 cl = repo.changelog
1405 cl = repo.changelog
1407 for n in revs:
1406 for n in revs:
1408 if limit is not None and count >= limit:
1407 if limit is not None and count >= limit:
1409 break
1408 break
1410 parents = [p for p in cl.parents(n) if p != nullid]
1409 parents = [p for p in cl.parents(n) if p != nullid]
1411 if no_merges and len(parents) == 2:
1410 if no_merges and len(parents) == 2:
1412 continue
1411 continue
1413 count += 1
1412 count += 1
1414 yield n
1413 yield n
1415
1414
1416
1415
1417 def outgoing(ui, repo, dests, opts, subpath=None):
1416 def outgoing(ui, repo, dests, opts, subpath=None):
1418 if opts.get(b'graph'):
1417 if opts.get(b'graph'):
1419 logcmdutil.checkunsupportedgraphflags([], opts)
1418 logcmdutil.checkunsupportedgraphflags([], opts)
1420 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1419 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1421 ret = 1
1420 ret = 1
1422 try:
1421 try:
1423 if o:
1422 if o:
1424 ret = 0
1423 ret = 0
1425
1424
1426 if opts.get(b'graph'):
1425 if opts.get(b'graph'):
1427 revdag = logcmdutil.graphrevs(repo, o, opts)
1426 revdag = logcmdutil.graphrevs(repo, o, opts)
1428 ui.pager(b'outgoing')
1427 ui.pager(b'outgoing')
1429 displayer = logcmdutil.changesetdisplayer(
1428 displayer = logcmdutil.changesetdisplayer(
1430 ui, repo, opts, buffered=True
1429 ui, repo, opts, buffered=True
1431 )
1430 )
1432 logcmdutil.displaygraph(
1431 logcmdutil.displaygraph(
1433 ui, repo, revdag, displayer, graphmod.asciiedges
1432 ui, repo, revdag, displayer, graphmod.asciiedges
1434 )
1433 )
1435 else:
1434 else:
1436 ui.pager(b'outgoing')
1435 ui.pager(b'outgoing')
1437 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1436 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1438 for n in _outgoing_filter(repo, o, opts):
1437 for n in _outgoing_filter(repo, o, opts):
1439 displayer.show(repo[n])
1438 displayer.show(repo[n])
1440 displayer.close()
1439 displayer.close()
1441 for oth in others:
1440 for oth in others:
1442 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1441 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1443 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1442 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1444 return ret # exit code is zero since we found outgoing changes
1443 return ret # exit code is zero since we found outgoing changes
1445 finally:
1444 finally:
1446 for oth in others:
1445 for oth in others:
1447 oth.close()
1446 oth.close()
1448
1447
1449
1448
1450 def verify(repo, level=None):
1449 def verify(repo, level=None):
1451 """verify the consistency of a repository"""
1450 """verify the consistency of a repository"""
1452 ret = verifymod.verify(repo, level=level)
1451 ret = verifymod.verify(repo, level=level)
1453
1452
1454 # Broken subrepo references in hidden csets don't seem worth worrying about,
1453 # Broken subrepo references in hidden csets don't seem worth worrying about,
1455 # since they can't be pushed/pulled, and --hidden can be used if they are a
1454 # since they can't be pushed/pulled, and --hidden can be used if they are a
1456 # concern.
1455 # concern.
1457
1456
1458 # pathto() is needed for -R case
1457 # pathto() is needed for -R case
1459 revs = repo.revs(
1458 revs = repo.revs(
1460 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1459 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1461 )
1460 )
1462
1461
1463 if revs:
1462 if revs:
1464 repo.ui.status(_(b'checking subrepo links\n'))
1463 repo.ui.status(_(b'checking subrepo links\n'))
1465 for rev in revs:
1464 for rev in revs:
1466 ctx = repo[rev]
1465 ctx = repo[rev]
1467 try:
1466 try:
1468 for subpath in ctx.substate:
1467 for subpath in ctx.substate:
1469 try:
1468 try:
1470 ret = (
1469 ret = (
1471 ctx.sub(subpath, allowcreate=False).verify() or ret
1470 ctx.sub(subpath, allowcreate=False).verify() or ret
1472 )
1471 )
1473 except error.RepoError as e:
1472 except error.RepoError as e:
1474 repo.ui.warn(b'%d: %s\n' % (rev, e))
1473 repo.ui.warn(b'%d: %s\n' % (rev, e))
1475 except Exception:
1474 except Exception:
1476 repo.ui.warn(
1475 repo.ui.warn(
1477 _(b'.hgsubstate is corrupt in revision %s\n')
1476 _(b'.hgsubstate is corrupt in revision %s\n')
1478 % short(ctx.node())
1477 % short(ctx.node())
1479 )
1478 )
1480
1479
1481 return ret
1480 return ret
1482
1481
1483
1482
1484 def remoteui(src, opts):
1483 def remoteui(src, opts):
1485 """build a remote ui from ui or repo and opts"""
1484 """build a remote ui from ui or repo and opts"""
1486 if util.safehasattr(src, b'baseui'): # looks like a repository
1485 if util.safehasattr(src, b'baseui'): # looks like a repository
1487 dst = src.baseui.copy() # drop repo-specific config
1486 dst = src.baseui.copy() # drop repo-specific config
1488 src = src.ui # copy target options from repo
1487 src = src.ui # copy target options from repo
1489 else: # assume it's a global ui object
1488 else: # assume it's a global ui object
1490 dst = src.copy() # keep all global options
1489 dst = src.copy() # keep all global options
1491
1490
1492 # copy ssh-specific options
1491 # copy ssh-specific options
1493 for o in b'ssh', b'remotecmd':
1492 for o in b'ssh', b'remotecmd':
1494 v = opts.get(o) or src.config(b'ui', o)
1493 v = opts.get(o) or src.config(b'ui', o)
1495 if v:
1494 if v:
1496 dst.setconfig(b"ui", o, v, b'copied')
1495 dst.setconfig(b"ui", o, v, b'copied')
1497
1496
1498 # copy bundle-specific options
1497 # copy bundle-specific options
1499 r = src.config(b'bundle', b'mainreporoot')
1498 r = src.config(b'bundle', b'mainreporoot')
1500 if r:
1499 if r:
1501 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1500 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1502
1501
1503 # copy selected local settings to the remote ui
1502 # copy selected local settings to the remote ui
1504 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1503 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1505 for key, val in src.configitems(sect):
1504 for key, val in src.configitems(sect):
1506 dst.setconfig(sect, key, val, b'copied')
1505 dst.setconfig(sect, key, val, b'copied')
1507 v = src.config(b'web', b'cacerts')
1506 v = src.config(b'web', b'cacerts')
1508 if v:
1507 if v:
1509 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1508 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1510
1509
1511 return dst
1510 return dst
1512
1511
1513
1512
1514 # Files of interest
1513 # Files of interest
1515 # Used to check if the repository has changed looking at mtime and size of
1514 # Used to check if the repository has changed looking at mtime and size of
1516 # these files.
1515 # these files.
1517 foi = [
1516 foi = [
1518 (b'spath', b'00changelog.i'),
1517 (b'spath', b'00changelog.i'),
1519 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1518 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1520 (b'spath', b'obsstore'),
1519 (b'spath', b'obsstore'),
1521 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1520 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1522 ]
1521 ]
1523
1522
1524
1523
1525 class cachedlocalrepo(object):
1524 class cachedlocalrepo(object):
1526 """Holds a localrepository that can be cached and reused."""
1525 """Holds a localrepository that can be cached and reused."""
1527
1526
1528 def __init__(self, repo):
1527 def __init__(self, repo):
1529 """Create a new cached repo from an existing repo.
1528 """Create a new cached repo from an existing repo.
1530
1529
1531 We assume the passed in repo was recently created. If the
1530 We assume the passed in repo was recently created. If the
1532 repo has changed between when it was created and when it was
1531 repo has changed between when it was created and when it was
1533 turned into a cache, it may not refresh properly.
1532 turned into a cache, it may not refresh properly.
1534 """
1533 """
1535 assert isinstance(repo, localrepo.localrepository)
1534 assert isinstance(repo, localrepo.localrepository)
1536 self._repo = repo
1535 self._repo = repo
1537 self._state, self.mtime = self._repostate()
1536 self._state, self.mtime = self._repostate()
1538 self._filtername = repo.filtername
1537 self._filtername = repo.filtername
1539
1538
1540 def fetch(self):
1539 def fetch(self):
1541 """Refresh (if necessary) and return a repository.
1540 """Refresh (if necessary) and return a repository.
1542
1541
1543 If the cached instance is out of date, it will be recreated
1542 If the cached instance is out of date, it will be recreated
1544 automatically and returned.
1543 automatically and returned.
1545
1544
1546 Returns a tuple of the repo and a boolean indicating whether a new
1545 Returns a tuple of the repo and a boolean indicating whether a new
1547 repo instance was created.
1546 repo instance was created.
1548 """
1547 """
1549 # We compare the mtimes and sizes of some well-known files to
1548 # We compare the mtimes and sizes of some well-known files to
1550 # determine if the repo changed. This is not precise, as mtimes
1549 # determine if the repo changed. This is not precise, as mtimes
1551 # are susceptible to clock skew and imprecise filesystems and
1550 # are susceptible to clock skew and imprecise filesystems and
1552 # file content can change while maintaining the same size.
1551 # file content can change while maintaining the same size.
1553
1552
1554 state, mtime = self._repostate()
1553 state, mtime = self._repostate()
1555 if state == self._state:
1554 if state == self._state:
1556 return self._repo, False
1555 return self._repo, False
1557
1556
1558 repo = repository(self._repo.baseui, self._repo.url())
1557 repo = repository(self._repo.baseui, self._repo.url())
1559 if self._filtername:
1558 if self._filtername:
1560 self._repo = repo.filtered(self._filtername)
1559 self._repo = repo.filtered(self._filtername)
1561 else:
1560 else:
1562 self._repo = repo.unfiltered()
1561 self._repo = repo.unfiltered()
1563 self._state = state
1562 self._state = state
1564 self.mtime = mtime
1563 self.mtime = mtime
1565
1564
1566 return self._repo, True
1565 return self._repo, True
1567
1566
1568 def _repostate(self):
1567 def _repostate(self):
1569 state = []
1568 state = []
1570 maxmtime = -1
1569 maxmtime = -1
1571 for attr, fname in foi:
1570 for attr, fname in foi:
1572 prefix = getattr(self._repo, attr)
1571 prefix = getattr(self._repo, attr)
1573 p = os.path.join(prefix, fname)
1572 p = os.path.join(prefix, fname)
1574 try:
1573 try:
1575 st = os.stat(p)
1574 st = os.stat(p)
1576 except OSError:
1575 except OSError:
1577 st = os.stat(prefix)
1576 st = os.stat(prefix)
1578 state.append((st[stat.ST_MTIME], st.st_size))
1577 state.append((st[stat.ST_MTIME], st.st_size))
1579 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1578 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1580
1579
1581 return tuple(state), maxmtime
1580 return tuple(state), maxmtime
1582
1581
1583 def copy(self):
1582 def copy(self):
1584 """Obtain a copy of this class instance.
1583 """Obtain a copy of this class instance.
1585
1584
1586 A new localrepository instance is obtained. The new instance should be
1585 A new localrepository instance is obtained. The new instance should be
1587 completely independent of the original.
1586 completely independent of the original.
1588 """
1587 """
1589 repo = repository(self._repo.baseui, self._repo.origroot)
1588 repo = repository(self._repo.baseui, self._repo.origroot)
1590 if self._filtername:
1589 if self._filtername:
1591 repo = repo.filtered(self._filtername)
1590 repo = repo.filtered(self._filtername)
1592 else:
1591 else:
1593 repo = repo.unfiltered()
1592 repo = repo.unfiltered()
1594 c = cachedlocalrepo(repo)
1593 c = cachedlocalrepo(repo)
1595 c._state = self._state
1594 c._state = self._state
1596 c.mtime = self.mtime
1595 c.mtime = self.mtime
1597 return c
1596 return c
General Comments 0
You need to be logged in to leave comments. Login now