##// END OF EJS Templates
clone: cleanup the "cleanup dir" logic used during local clone...
marmoute -
r48209:1c7f3d91 default
parent child Browse files
Show More
@@ -1,1600 +1,1602 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import os
12 import os
13 import shutil
13 import shutil
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import (
17 from .node import (
18 hex,
18 hex,
19 sha1nodeconstants,
19 sha1nodeconstants,
20 short,
20 short,
21 )
21 )
22 from .pycompat import getattr
22 from .pycompat import getattr
23
23
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 bundlerepo,
26 bundlerepo,
27 cacheutil,
27 cacheutil,
28 cmdutil,
28 cmdutil,
29 destutil,
29 destutil,
30 discovery,
30 discovery,
31 error,
31 error,
32 exchange,
32 exchange,
33 extensions,
33 extensions,
34 graphmod,
34 graphmod,
35 httppeer,
35 httppeer,
36 localrepo,
36 localrepo,
37 lock,
37 lock,
38 logcmdutil,
38 logcmdutil,
39 logexchange,
39 logexchange,
40 merge as mergemod,
40 merge as mergemod,
41 mergestate as mergestatemod,
41 mergestate as mergestatemod,
42 narrowspec,
42 narrowspec,
43 phases,
43 phases,
44 requirements,
44 requirements,
45 scmutil,
45 scmutil,
46 sshpeer,
46 sshpeer,
47 statichttprepo,
47 statichttprepo,
48 ui as uimod,
48 ui as uimod,
49 unionrepo,
49 unionrepo,
50 url,
50 url,
51 util,
51 util,
52 verify as verifymod,
52 verify as verifymod,
53 vfs as vfsmod,
53 vfs as vfsmod,
54 )
54 )
55 from .interfaces import repository as repositorymod
55 from .interfaces import repository as repositorymod
56 from .utils import (
56 from .utils import (
57 hashutil,
57 hashutil,
58 stringutil,
58 stringutil,
59 urlutil,
59 urlutil,
60 )
60 )
61
61
62
62
63 release = lock.release
63 release = lock.release
64
64
65 # shared features
65 # shared features
66 sharedbookmarks = b'bookmarks'
66 sharedbookmarks = b'bookmarks'
67
67
68
68
69 def _local(path):
69 def _local(path):
70 path = util.expandpath(urlutil.urllocalpath(path))
70 path = util.expandpath(urlutil.urllocalpath(path))
71
71
72 try:
72 try:
73 # we use os.stat() directly here instead of os.path.isfile()
73 # we use os.stat() directly here instead of os.path.isfile()
74 # because the latter started returning `False` on invalid path
74 # because the latter started returning `False` on invalid path
75 # exceptions starting in 3.8 and we care about handling
75 # exceptions starting in 3.8 and we care about handling
76 # invalid paths specially here.
76 # invalid paths specially here.
77 st = os.stat(path)
77 st = os.stat(path)
78 isfile = stat.S_ISREG(st.st_mode)
78 isfile = stat.S_ISREG(st.st_mode)
79 # Python 2 raises TypeError, Python 3 ValueError.
79 # Python 2 raises TypeError, Python 3 ValueError.
80 except (TypeError, ValueError) as e:
80 except (TypeError, ValueError) as e:
81 raise error.Abort(
81 raise error.Abort(
82 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
82 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
83 )
83 )
84 except OSError:
84 except OSError:
85 isfile = False
85 isfile = False
86
86
87 return isfile and bundlerepo or localrepo
87 return isfile and bundlerepo or localrepo
88
88
89
89
90 def addbranchrevs(lrepo, other, branches, revs):
90 def addbranchrevs(lrepo, other, branches, revs):
91 peer = other.peer() # a courtesy to callers using a localrepo for other
91 peer = other.peer() # a courtesy to callers using a localrepo for other
92 hashbranch, branches = branches
92 hashbranch, branches = branches
93 if not hashbranch and not branches:
93 if not hashbranch and not branches:
94 x = revs or None
94 x = revs or None
95 if revs:
95 if revs:
96 y = revs[0]
96 y = revs[0]
97 else:
97 else:
98 y = None
98 y = None
99 return x, y
99 return x, y
100 if revs:
100 if revs:
101 revs = list(revs)
101 revs = list(revs)
102 else:
102 else:
103 revs = []
103 revs = []
104
104
105 if not peer.capable(b'branchmap'):
105 if not peer.capable(b'branchmap'):
106 if branches:
106 if branches:
107 raise error.Abort(_(b"remote branch lookup not supported"))
107 raise error.Abort(_(b"remote branch lookup not supported"))
108 revs.append(hashbranch)
108 revs.append(hashbranch)
109 return revs, revs[0]
109 return revs, revs[0]
110
110
111 with peer.commandexecutor() as e:
111 with peer.commandexecutor() as e:
112 branchmap = e.callcommand(b'branchmap', {}).result()
112 branchmap = e.callcommand(b'branchmap', {}).result()
113
113
114 def primary(branch):
114 def primary(branch):
115 if branch == b'.':
115 if branch == b'.':
116 if not lrepo:
116 if not lrepo:
117 raise error.Abort(_(b"dirstate branch not accessible"))
117 raise error.Abort(_(b"dirstate branch not accessible"))
118 branch = lrepo.dirstate.branch()
118 branch = lrepo.dirstate.branch()
119 if branch in branchmap:
119 if branch in branchmap:
120 revs.extend(hex(r) for r in reversed(branchmap[branch]))
120 revs.extend(hex(r) for r in reversed(branchmap[branch]))
121 return True
121 return True
122 else:
122 else:
123 return False
123 return False
124
124
125 for branch in branches:
125 for branch in branches:
126 if not primary(branch):
126 if not primary(branch):
127 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
127 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
128 if hashbranch:
128 if hashbranch:
129 if not primary(hashbranch):
129 if not primary(hashbranch):
130 revs.append(hashbranch)
130 revs.append(hashbranch)
131 return revs, revs[0]
131 return revs, revs[0]
132
132
133
133
134 def parseurl(path, branches=None):
134 def parseurl(path, branches=None):
135 '''parse url#branch, returning (url, (branch, branches))'''
135 '''parse url#branch, returning (url, (branch, branches))'''
136 msg = b'parseurl(...) moved to mercurial.utils.urlutil'
136 msg = b'parseurl(...) moved to mercurial.utils.urlutil'
137 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
137 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
138 return urlutil.parseurl(path, branches=branches)
138 return urlutil.parseurl(path, branches=branches)
139
139
140
140
141 schemes = {
141 schemes = {
142 b'bundle': bundlerepo,
142 b'bundle': bundlerepo,
143 b'union': unionrepo,
143 b'union': unionrepo,
144 b'file': _local,
144 b'file': _local,
145 b'http': httppeer,
145 b'http': httppeer,
146 b'https': httppeer,
146 b'https': httppeer,
147 b'ssh': sshpeer,
147 b'ssh': sshpeer,
148 b'static-http': statichttprepo,
148 b'static-http': statichttprepo,
149 }
149 }
150
150
151
151
152 def _peerlookup(path):
152 def _peerlookup(path):
153 u = urlutil.url(path)
153 u = urlutil.url(path)
154 scheme = u.scheme or b'file'
154 scheme = u.scheme or b'file'
155 thing = schemes.get(scheme) or schemes[b'file']
155 thing = schemes.get(scheme) or schemes[b'file']
156 try:
156 try:
157 return thing(path)
157 return thing(path)
158 except TypeError:
158 except TypeError:
159 # we can't test callable(thing) because 'thing' can be an unloaded
159 # we can't test callable(thing) because 'thing' can be an unloaded
160 # module that implements __call__
160 # module that implements __call__
161 if not util.safehasattr(thing, b'instance'):
161 if not util.safehasattr(thing, b'instance'):
162 raise
162 raise
163 return thing
163 return thing
164
164
165
165
166 def islocal(repo):
166 def islocal(repo):
167 '''return true if repo (or path pointing to repo) is local'''
167 '''return true if repo (or path pointing to repo) is local'''
168 if isinstance(repo, bytes):
168 if isinstance(repo, bytes):
169 try:
169 try:
170 return _peerlookup(repo).islocal(repo)
170 return _peerlookup(repo).islocal(repo)
171 except AttributeError:
171 except AttributeError:
172 return False
172 return False
173 return repo.local()
173 return repo.local()
174
174
175
175
176 def openpath(ui, path, sendaccept=True):
176 def openpath(ui, path, sendaccept=True):
177 '''open path with open if local, url.open if remote'''
177 '''open path with open if local, url.open if remote'''
178 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
178 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
179 if pathurl.islocal():
179 if pathurl.islocal():
180 return util.posixfile(pathurl.localpath(), b'rb')
180 return util.posixfile(pathurl.localpath(), b'rb')
181 else:
181 else:
182 return url.open(ui, path, sendaccept=sendaccept)
182 return url.open(ui, path, sendaccept=sendaccept)
183
183
184
184
185 # a list of (ui, repo) functions called for wire peer initialization
185 # a list of (ui, repo) functions called for wire peer initialization
186 wirepeersetupfuncs = []
186 wirepeersetupfuncs = []
187
187
188
188
189 def _peerorrepo(
189 def _peerorrepo(
190 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
190 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
191 ):
191 ):
192 """return a repository object for the specified path"""
192 """return a repository object for the specified path"""
193 obj = _peerlookup(path).instance(
193 obj = _peerlookup(path).instance(
194 ui, path, create, intents=intents, createopts=createopts
194 ui, path, create, intents=intents, createopts=createopts
195 )
195 )
196 ui = getattr(obj, "ui", ui)
196 ui = getattr(obj, "ui", ui)
197 for f in presetupfuncs or []:
197 for f in presetupfuncs or []:
198 f(ui, obj)
198 f(ui, obj)
199 ui.log(b'extension', b'- executing reposetup hooks\n')
199 ui.log(b'extension', b'- executing reposetup hooks\n')
200 with util.timedcm('all reposetup') as allreposetupstats:
200 with util.timedcm('all reposetup') as allreposetupstats:
201 for name, module in extensions.extensions(ui):
201 for name, module in extensions.extensions(ui):
202 ui.log(b'extension', b' - running reposetup for %s\n', name)
202 ui.log(b'extension', b' - running reposetup for %s\n', name)
203 hook = getattr(module, 'reposetup', None)
203 hook = getattr(module, 'reposetup', None)
204 if hook:
204 if hook:
205 with util.timedcm('reposetup %r', name) as stats:
205 with util.timedcm('reposetup %r', name) as stats:
206 hook(ui, obj)
206 hook(ui, obj)
207 ui.log(
207 ui.log(
208 b'extension', b' > reposetup for %s took %s\n', name, stats
208 b'extension', b' > reposetup for %s took %s\n', name, stats
209 )
209 )
210 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
210 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
211 if not obj.local():
211 if not obj.local():
212 for f in wirepeersetupfuncs:
212 for f in wirepeersetupfuncs:
213 f(ui, obj)
213 f(ui, obj)
214 return obj
214 return obj
215
215
216
216
217 def repository(
217 def repository(
218 ui,
218 ui,
219 path=b'',
219 path=b'',
220 create=False,
220 create=False,
221 presetupfuncs=None,
221 presetupfuncs=None,
222 intents=None,
222 intents=None,
223 createopts=None,
223 createopts=None,
224 ):
224 ):
225 """return a repository object for the specified path"""
225 """return a repository object for the specified path"""
226 peer = _peerorrepo(
226 peer = _peerorrepo(
227 ui,
227 ui,
228 path,
228 path,
229 create,
229 create,
230 presetupfuncs=presetupfuncs,
230 presetupfuncs=presetupfuncs,
231 intents=intents,
231 intents=intents,
232 createopts=createopts,
232 createopts=createopts,
233 )
233 )
234 repo = peer.local()
234 repo = peer.local()
235 if not repo:
235 if not repo:
236 raise error.Abort(
236 raise error.Abort(
237 _(b"repository '%s' is not local") % (path or peer.url())
237 _(b"repository '%s' is not local") % (path or peer.url())
238 )
238 )
239 return repo.filtered(b'visible')
239 return repo.filtered(b'visible')
240
240
241
241
242 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
242 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
243 '''return a repository peer for the specified path'''
243 '''return a repository peer for the specified path'''
244 rui = remoteui(uiorrepo, opts)
244 rui = remoteui(uiorrepo, opts)
245 return _peerorrepo(
245 return _peerorrepo(
246 rui, path, create, intents=intents, createopts=createopts
246 rui, path, create, intents=intents, createopts=createopts
247 ).peer()
247 ).peer()
248
248
249
249
250 def defaultdest(source):
250 def defaultdest(source):
251 """return default destination of clone if none is given
251 """return default destination of clone if none is given
252
252
253 >>> defaultdest(b'foo')
253 >>> defaultdest(b'foo')
254 'foo'
254 'foo'
255 >>> defaultdest(b'/foo/bar')
255 >>> defaultdest(b'/foo/bar')
256 'bar'
256 'bar'
257 >>> defaultdest(b'/')
257 >>> defaultdest(b'/')
258 ''
258 ''
259 >>> defaultdest(b'')
259 >>> defaultdest(b'')
260 ''
260 ''
261 >>> defaultdest(b'http://example.org/')
261 >>> defaultdest(b'http://example.org/')
262 ''
262 ''
263 >>> defaultdest(b'http://example.org/foo/')
263 >>> defaultdest(b'http://example.org/foo/')
264 'foo'
264 'foo'
265 """
265 """
266 path = urlutil.url(source).path
266 path = urlutil.url(source).path
267 if not path:
267 if not path:
268 return b''
268 return b''
269 return os.path.basename(os.path.normpath(path))
269 return os.path.basename(os.path.normpath(path))
270
270
271
271
272 def sharedreposource(repo):
272 def sharedreposource(repo):
273 """Returns repository object for source repository of a shared repo.
273 """Returns repository object for source repository of a shared repo.
274
274
275 If repo is not a shared repository, returns None.
275 If repo is not a shared repository, returns None.
276 """
276 """
277 if repo.sharedpath == repo.path:
277 if repo.sharedpath == repo.path:
278 return None
278 return None
279
279
280 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
280 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
281 return repo.srcrepo
281 return repo.srcrepo
282
282
283 # the sharedpath always ends in the .hg; we want the path to the repo
283 # the sharedpath always ends in the .hg; we want the path to the repo
284 source = repo.vfs.split(repo.sharedpath)[0]
284 source = repo.vfs.split(repo.sharedpath)[0]
285 srcurl, branches = urlutil.parseurl(source)
285 srcurl, branches = urlutil.parseurl(source)
286 srcrepo = repository(repo.ui, srcurl)
286 srcrepo = repository(repo.ui, srcurl)
287 repo.srcrepo = srcrepo
287 repo.srcrepo = srcrepo
288 return srcrepo
288 return srcrepo
289
289
290
290
291 def share(
291 def share(
292 ui,
292 ui,
293 source,
293 source,
294 dest=None,
294 dest=None,
295 update=True,
295 update=True,
296 bookmarks=True,
296 bookmarks=True,
297 defaultpath=None,
297 defaultpath=None,
298 relative=False,
298 relative=False,
299 ):
299 ):
300 '''create a shared repository'''
300 '''create a shared repository'''
301
301
302 if not islocal(source):
302 if not islocal(source):
303 raise error.Abort(_(b'can only share local repositories'))
303 raise error.Abort(_(b'can only share local repositories'))
304
304
305 if not dest:
305 if not dest:
306 dest = defaultdest(source)
306 dest = defaultdest(source)
307 else:
307 else:
308 dest = urlutil.get_clone_path(ui, dest)[1]
308 dest = urlutil.get_clone_path(ui, dest)[1]
309
309
310 if isinstance(source, bytes):
310 if isinstance(source, bytes):
311 origsource, source, branches = urlutil.get_clone_path(ui, source)
311 origsource, source, branches = urlutil.get_clone_path(ui, source)
312 srcrepo = repository(ui, source)
312 srcrepo = repository(ui, source)
313 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
313 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
314 else:
314 else:
315 srcrepo = source.local()
315 srcrepo = source.local()
316 checkout = None
316 checkout = None
317
317
318 shareditems = set()
318 shareditems = set()
319 if bookmarks:
319 if bookmarks:
320 shareditems.add(sharedbookmarks)
320 shareditems.add(sharedbookmarks)
321
321
322 r = repository(
322 r = repository(
323 ui,
323 ui,
324 dest,
324 dest,
325 create=True,
325 create=True,
326 createopts={
326 createopts={
327 b'sharedrepo': srcrepo,
327 b'sharedrepo': srcrepo,
328 b'sharedrelative': relative,
328 b'sharedrelative': relative,
329 b'shareditems': shareditems,
329 b'shareditems': shareditems,
330 },
330 },
331 )
331 )
332
332
333 postshare(srcrepo, r, defaultpath=defaultpath)
333 postshare(srcrepo, r, defaultpath=defaultpath)
334 r = repository(ui, dest)
334 r = repository(ui, dest)
335 _postshareupdate(r, update, checkout=checkout)
335 _postshareupdate(r, update, checkout=checkout)
336 return r
336 return r
337
337
338
338
339 def _prependsourcehgrc(repo):
339 def _prependsourcehgrc(repo):
340 """copies the source repo config and prepend it in current repo .hg/hgrc
340 """copies the source repo config and prepend it in current repo .hg/hgrc
341 on unshare. This is only done if the share was perfomed using share safe
341 on unshare. This is only done if the share was perfomed using share safe
342 method where we share config of source in shares"""
342 method where we share config of source in shares"""
343 srcvfs = vfsmod.vfs(repo.sharedpath)
343 srcvfs = vfsmod.vfs(repo.sharedpath)
344 dstvfs = vfsmod.vfs(repo.path)
344 dstvfs = vfsmod.vfs(repo.path)
345
345
346 if not srcvfs.exists(b'hgrc'):
346 if not srcvfs.exists(b'hgrc'):
347 return
347 return
348
348
349 currentconfig = b''
349 currentconfig = b''
350 if dstvfs.exists(b'hgrc'):
350 if dstvfs.exists(b'hgrc'):
351 currentconfig = dstvfs.read(b'hgrc')
351 currentconfig = dstvfs.read(b'hgrc')
352
352
353 with dstvfs(b'hgrc', b'wb') as fp:
353 with dstvfs(b'hgrc', b'wb') as fp:
354 sourceconfig = srcvfs.read(b'hgrc')
354 sourceconfig = srcvfs.read(b'hgrc')
355 fp.write(b"# Config copied from shared source\n")
355 fp.write(b"# Config copied from shared source\n")
356 fp.write(sourceconfig)
356 fp.write(sourceconfig)
357 fp.write(b'\n')
357 fp.write(b'\n')
358 fp.write(currentconfig)
358 fp.write(currentconfig)
359
359
360
360
361 def unshare(ui, repo):
361 def unshare(ui, repo):
362 """convert a shared repository to a normal one
362 """convert a shared repository to a normal one
363
363
364 Copy the store data to the repo and remove the sharedpath data.
364 Copy the store data to the repo and remove the sharedpath data.
365
365
366 Returns a new repository object representing the unshared repository.
366 Returns a new repository object representing the unshared repository.
367
367
368 The passed repository object is not usable after this function is
368 The passed repository object is not usable after this function is
369 called.
369 called.
370 """
370 """
371
371
372 with repo.lock():
372 with repo.lock():
373 # we use locks here because if we race with commit, we
373 # we use locks here because if we race with commit, we
374 # can end up with extra data in the cloned revlogs that's
374 # can end up with extra data in the cloned revlogs that's
375 # not pointed to by changesets, thus causing verify to
375 # not pointed to by changesets, thus causing verify to
376 # fail
376 # fail
377 destlock = copystore(ui, repo, repo.path)
377 destlock = copystore(ui, repo, repo.path)
378 with destlock or util.nullcontextmanager():
378 with destlock or util.nullcontextmanager():
379 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
379 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
380 # we were sharing .hg/hgrc of the share source with the current
380 # we were sharing .hg/hgrc of the share source with the current
381 # repo. We need to copy that while unsharing otherwise it can
381 # repo. We need to copy that while unsharing otherwise it can
382 # disable hooks and other checks
382 # disable hooks and other checks
383 _prependsourcehgrc(repo)
383 _prependsourcehgrc(repo)
384
384
385 sharefile = repo.vfs.join(b'sharedpath')
385 sharefile = repo.vfs.join(b'sharedpath')
386 util.rename(sharefile, sharefile + b'.old')
386 util.rename(sharefile, sharefile + b'.old')
387
387
388 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
388 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
389 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
389 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
390 scmutil.writereporequirements(repo)
390 scmutil.writereporequirements(repo)
391
391
392 # Removing share changes some fundamental properties of the repo instance.
392 # Removing share changes some fundamental properties of the repo instance.
393 # So we instantiate a new repo object and operate on it rather than
393 # So we instantiate a new repo object and operate on it rather than
394 # try to keep the existing repo usable.
394 # try to keep the existing repo usable.
395 newrepo = repository(repo.baseui, repo.root, create=False)
395 newrepo = repository(repo.baseui, repo.root, create=False)
396
396
397 # TODO: figure out how to access subrepos that exist, but were previously
397 # TODO: figure out how to access subrepos that exist, but were previously
398 # removed from .hgsub
398 # removed from .hgsub
399 c = newrepo[b'.']
399 c = newrepo[b'.']
400 subs = c.substate
400 subs = c.substate
401 for s in sorted(subs):
401 for s in sorted(subs):
402 c.sub(s).unshare()
402 c.sub(s).unshare()
403
403
404 localrepo.poisonrepository(repo)
404 localrepo.poisonrepository(repo)
405
405
406 return newrepo
406 return newrepo
407
407
408
408
409 def postshare(sourcerepo, destrepo, defaultpath=None):
409 def postshare(sourcerepo, destrepo, defaultpath=None):
410 """Called after a new shared repo is created.
410 """Called after a new shared repo is created.
411
411
412 The new repo only has a requirements file and pointer to the source.
412 The new repo only has a requirements file and pointer to the source.
413 This function configures additional shared data.
413 This function configures additional shared data.
414
414
415 Extensions can wrap this function and write additional entries to
415 Extensions can wrap this function and write additional entries to
416 destrepo/.hg/shared to indicate additional pieces of data to be shared.
416 destrepo/.hg/shared to indicate additional pieces of data to be shared.
417 """
417 """
418 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
418 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
419 if default:
419 if default:
420 template = b'[paths]\ndefault = %s\n'
420 template = b'[paths]\ndefault = %s\n'
421 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
421 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
422 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
422 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
423 with destrepo.wlock():
423 with destrepo.wlock():
424 narrowspec.copytoworkingcopy(destrepo)
424 narrowspec.copytoworkingcopy(destrepo)
425
425
426
426
427 def _postshareupdate(repo, update, checkout=None):
427 def _postshareupdate(repo, update, checkout=None):
428 """Maybe perform a working directory update after a shared repo is created.
428 """Maybe perform a working directory update after a shared repo is created.
429
429
430 ``update`` can be a boolean or a revision to update to.
430 ``update`` can be a boolean or a revision to update to.
431 """
431 """
432 if not update:
432 if not update:
433 return
433 return
434
434
435 repo.ui.status(_(b"updating working directory\n"))
435 repo.ui.status(_(b"updating working directory\n"))
436 if update is not True:
436 if update is not True:
437 checkout = update
437 checkout = update
438 for test in (checkout, b'default', b'tip'):
438 for test in (checkout, b'default', b'tip'):
439 if test is None:
439 if test is None:
440 continue
440 continue
441 try:
441 try:
442 uprev = repo.lookup(test)
442 uprev = repo.lookup(test)
443 break
443 break
444 except error.RepoLookupError:
444 except error.RepoLookupError:
445 continue
445 continue
446 _update(repo, uprev)
446 _update(repo, uprev)
447
447
448
448
449 def copystore(ui, srcrepo, destpath):
449 def copystore(ui, srcrepo, destpath):
450 """copy files from store of srcrepo in destpath
450 """copy files from store of srcrepo in destpath
451
451
452 returns destlock
452 returns destlock
453 """
453 """
454 destlock = None
454 destlock = None
455 try:
455 try:
456 hardlink = None
456 hardlink = None
457 topic = _(b'linking') if hardlink else _(b'copying')
457 topic = _(b'linking') if hardlink else _(b'copying')
458 with ui.makeprogress(topic, unit=_(b'files')) as progress:
458 with ui.makeprogress(topic, unit=_(b'files')) as progress:
459 num = 0
459 num = 0
460 srcpublishing = srcrepo.publishing()
460 srcpublishing = srcrepo.publishing()
461 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
461 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
462 dstvfs = vfsmod.vfs(destpath)
462 dstvfs = vfsmod.vfs(destpath)
463 for f in srcrepo.store.copylist():
463 for f in srcrepo.store.copylist():
464 if srcpublishing and f.endswith(b'phaseroots'):
464 if srcpublishing and f.endswith(b'phaseroots'):
465 continue
465 continue
466 dstbase = os.path.dirname(f)
466 dstbase = os.path.dirname(f)
467 if dstbase and not dstvfs.exists(dstbase):
467 if dstbase and not dstvfs.exists(dstbase):
468 dstvfs.mkdir(dstbase)
468 dstvfs.mkdir(dstbase)
469 if srcvfs.exists(f):
469 if srcvfs.exists(f):
470 if f.endswith(b'data'):
470 if f.endswith(b'data'):
471 # 'dstbase' may be empty (e.g. revlog format 0)
471 # 'dstbase' may be empty (e.g. revlog format 0)
472 lockfile = os.path.join(dstbase, b"lock")
472 lockfile = os.path.join(dstbase, b"lock")
473 # lock to avoid premature writing to the target
473 # lock to avoid premature writing to the target
474 destlock = lock.lock(dstvfs, lockfile)
474 destlock = lock.lock(dstvfs, lockfile)
475 hardlink, n = util.copyfiles(
475 hardlink, n = util.copyfiles(
476 srcvfs.join(f), dstvfs.join(f), hardlink, progress
476 srcvfs.join(f), dstvfs.join(f), hardlink, progress
477 )
477 )
478 num += n
478 num += n
479 if hardlink:
479 if hardlink:
480 ui.debug(b"linked %d files\n" % num)
480 ui.debug(b"linked %d files\n" % num)
481 else:
481 else:
482 ui.debug(b"copied %d files\n" % num)
482 ui.debug(b"copied %d files\n" % num)
483 return destlock
483 return destlock
484 except: # re-raises
484 except: # re-raises
485 release(destlock)
485 release(destlock)
486 raise
486 raise
487
487
488
488
489 def clonewithshare(
489 def clonewithshare(
490 ui,
490 ui,
491 peeropts,
491 peeropts,
492 sharepath,
492 sharepath,
493 source,
493 source,
494 srcpeer,
494 srcpeer,
495 dest,
495 dest,
496 pull=False,
496 pull=False,
497 rev=None,
497 rev=None,
498 update=True,
498 update=True,
499 stream=False,
499 stream=False,
500 ):
500 ):
501 """Perform a clone using a shared repo.
501 """Perform a clone using a shared repo.
502
502
503 The store for the repository will be located at <sharepath>/.hg. The
503 The store for the repository will be located at <sharepath>/.hg. The
504 specified revisions will be cloned or pulled from "source". A shared repo
504 specified revisions will be cloned or pulled from "source". A shared repo
505 will be created at "dest" and a working copy will be created if "update" is
505 will be created at "dest" and a working copy will be created if "update" is
506 True.
506 True.
507 """
507 """
508 revs = None
508 revs = None
509 if rev:
509 if rev:
510 if not srcpeer.capable(b'lookup'):
510 if not srcpeer.capable(b'lookup'):
511 raise error.Abort(
511 raise error.Abort(
512 _(
512 _(
513 b"src repository does not support "
513 b"src repository does not support "
514 b"revision lookup and so doesn't "
514 b"revision lookup and so doesn't "
515 b"support clone by revision"
515 b"support clone by revision"
516 )
516 )
517 )
517 )
518
518
519 # TODO this is batchable.
519 # TODO this is batchable.
520 remoterevs = []
520 remoterevs = []
521 for r in rev:
521 for r in rev:
522 with srcpeer.commandexecutor() as e:
522 with srcpeer.commandexecutor() as e:
523 remoterevs.append(
523 remoterevs.append(
524 e.callcommand(
524 e.callcommand(
525 b'lookup',
525 b'lookup',
526 {
526 {
527 b'key': r,
527 b'key': r,
528 },
528 },
529 ).result()
529 ).result()
530 )
530 )
531 revs = remoterevs
531 revs = remoterevs
532
532
533 # Obtain a lock before checking for or cloning the pooled repo otherwise
533 # Obtain a lock before checking for or cloning the pooled repo otherwise
534 # 2 clients may race creating or populating it.
534 # 2 clients may race creating or populating it.
535 pooldir = os.path.dirname(sharepath)
535 pooldir = os.path.dirname(sharepath)
536 # lock class requires the directory to exist.
536 # lock class requires the directory to exist.
537 try:
537 try:
538 util.makedir(pooldir, False)
538 util.makedir(pooldir, False)
539 except OSError as e:
539 except OSError as e:
540 if e.errno != errno.EEXIST:
540 if e.errno != errno.EEXIST:
541 raise
541 raise
542
542
543 poolvfs = vfsmod.vfs(pooldir)
543 poolvfs = vfsmod.vfs(pooldir)
544 basename = os.path.basename(sharepath)
544 basename = os.path.basename(sharepath)
545
545
546 with lock.lock(poolvfs, b'%s.lock' % basename):
546 with lock.lock(poolvfs, b'%s.lock' % basename):
547 if os.path.exists(sharepath):
547 if os.path.exists(sharepath):
548 ui.status(
548 ui.status(
549 _(b'(sharing from existing pooled repository %s)\n') % basename
549 _(b'(sharing from existing pooled repository %s)\n') % basename
550 )
550 )
551 else:
551 else:
552 ui.status(
552 ui.status(
553 _(b'(sharing from new pooled repository %s)\n') % basename
553 _(b'(sharing from new pooled repository %s)\n') % basename
554 )
554 )
555 # Always use pull mode because hardlinks in share mode don't work
555 # Always use pull mode because hardlinks in share mode don't work
556 # well. Never update because working copies aren't necessary in
556 # well. Never update because working copies aren't necessary in
557 # share mode.
557 # share mode.
558 clone(
558 clone(
559 ui,
559 ui,
560 peeropts,
560 peeropts,
561 source,
561 source,
562 dest=sharepath,
562 dest=sharepath,
563 pull=True,
563 pull=True,
564 revs=rev,
564 revs=rev,
565 update=False,
565 update=False,
566 stream=stream,
566 stream=stream,
567 )
567 )
568
568
569 # Resolve the value to put in [paths] section for the source.
569 # Resolve the value to put in [paths] section for the source.
570 if islocal(source):
570 if islocal(source):
571 defaultpath = os.path.abspath(urlutil.urllocalpath(source))
571 defaultpath = os.path.abspath(urlutil.urllocalpath(source))
572 else:
572 else:
573 defaultpath = source
573 defaultpath = source
574
574
575 sharerepo = repository(ui, path=sharepath)
575 sharerepo = repository(ui, path=sharepath)
576 destrepo = share(
576 destrepo = share(
577 ui,
577 ui,
578 sharerepo,
578 sharerepo,
579 dest=dest,
579 dest=dest,
580 update=False,
580 update=False,
581 bookmarks=False,
581 bookmarks=False,
582 defaultpath=defaultpath,
582 defaultpath=defaultpath,
583 )
583 )
584
584
585 # We need to perform a pull against the dest repo to fetch bookmarks
585 # We need to perform a pull against the dest repo to fetch bookmarks
586 # and other non-store data that isn't shared by default. In the case of
586 # and other non-store data that isn't shared by default. In the case of
587 # non-existing shared repo, this means we pull from the remote twice. This
587 # non-existing shared repo, this means we pull from the remote twice. This
588 # is a bit weird. But at the time it was implemented, there wasn't an easy
588 # is a bit weird. But at the time it was implemented, there wasn't an easy
589 # way to pull just non-changegroup data.
589 # way to pull just non-changegroup data.
590 exchange.pull(destrepo, srcpeer, heads=revs)
590 exchange.pull(destrepo, srcpeer, heads=revs)
591
591
592 _postshareupdate(destrepo, update)
592 _postshareupdate(destrepo, update)
593
593
594 return srcpeer, peer(ui, peeropts, dest)
594 return srcpeer, peer(ui, peeropts, dest)
595
595
596
596
597 # Recomputing caches is often slow on big repos, so copy them.
597 # Recomputing caches is often slow on big repos, so copy them.
598 def _copycache(srcrepo, dstcachedir, fname):
598 def _copycache(srcrepo, dstcachedir, fname):
599 """copy a cache from srcrepo to destcachedir (if it exists)"""
599 """copy a cache from srcrepo to destcachedir (if it exists)"""
600 srcfname = srcrepo.cachevfs.join(fname)
600 srcfname = srcrepo.cachevfs.join(fname)
601 dstfname = os.path.join(dstcachedir, fname)
601 dstfname = os.path.join(dstcachedir, fname)
602 if os.path.exists(srcfname):
602 if os.path.exists(srcfname):
603 if not os.path.exists(dstcachedir):
603 if not os.path.exists(dstcachedir):
604 os.mkdir(dstcachedir)
604 os.mkdir(dstcachedir)
605 util.copyfile(srcfname, dstfname)
605 util.copyfile(srcfname, dstfname)
606
606
607
607
608 def clone(
608 def clone(
609 ui,
609 ui,
610 peeropts,
610 peeropts,
611 source,
611 source,
612 dest=None,
612 dest=None,
613 pull=False,
613 pull=False,
614 revs=None,
614 revs=None,
615 update=True,
615 update=True,
616 stream=False,
616 stream=False,
617 branch=None,
617 branch=None,
618 shareopts=None,
618 shareopts=None,
619 storeincludepats=None,
619 storeincludepats=None,
620 storeexcludepats=None,
620 storeexcludepats=None,
621 depth=None,
621 depth=None,
622 ):
622 ):
623 """Make a copy of an existing repository.
623 """Make a copy of an existing repository.
624
624
625 Create a copy of an existing repository in a new directory. The
625 Create a copy of an existing repository in a new directory. The
626 source and destination are URLs, as passed to the repository
626 source and destination are URLs, as passed to the repository
627 function. Returns a pair of repository peers, the source and
627 function. Returns a pair of repository peers, the source and
628 newly created destination.
628 newly created destination.
629
629
630 The location of the source is added to the new repository's
630 The location of the source is added to the new repository's
631 .hg/hgrc file, as the default to be used for future pulls and
631 .hg/hgrc file, as the default to be used for future pulls and
632 pushes.
632 pushes.
633
633
634 If an exception is raised, the partly cloned/updated destination
634 If an exception is raised, the partly cloned/updated destination
635 repository will be deleted.
635 repository will be deleted.
636
636
637 Arguments:
637 Arguments:
638
638
639 source: repository object or URL
639 source: repository object or URL
640
640
641 dest: URL of destination repository to create (defaults to base
641 dest: URL of destination repository to create (defaults to base
642 name of source repository)
642 name of source repository)
643
643
644 pull: always pull from source repository, even in local case or if the
644 pull: always pull from source repository, even in local case or if the
645 server prefers streaming
645 server prefers streaming
646
646
647 stream: stream raw data uncompressed from repository (fast over
647 stream: stream raw data uncompressed from repository (fast over
648 LAN, slow over WAN)
648 LAN, slow over WAN)
649
649
650 revs: revision to clone up to (implies pull=True)
650 revs: revision to clone up to (implies pull=True)
651
651
652 update: update working directory after clone completes, if
652 update: update working directory after clone completes, if
653 destination is local repository (True means update to default rev,
653 destination is local repository (True means update to default rev,
654 anything else is treated as a revision)
654 anything else is treated as a revision)
655
655
656 branch: branches to clone
656 branch: branches to clone
657
657
658 shareopts: dict of options to control auto sharing behavior. The "pool" key
658 shareopts: dict of options to control auto sharing behavior. The "pool" key
659 activates auto sharing mode and defines the directory for stores. The
659 activates auto sharing mode and defines the directory for stores. The
660 "mode" key determines how to construct the directory name of the shared
660 "mode" key determines how to construct the directory name of the shared
661 repository. "identity" means the name is derived from the node of the first
661 repository. "identity" means the name is derived from the node of the first
662 changeset in the repository. "remote" means the name is derived from the
662 changeset in the repository. "remote" means the name is derived from the
663 remote's path/URL. Defaults to "identity."
663 remote's path/URL. Defaults to "identity."
664
664
665 storeincludepats and storeexcludepats: sets of file patterns to include and
665 storeincludepats and storeexcludepats: sets of file patterns to include and
666 exclude in the repository copy, respectively. If not defined, all files
666 exclude in the repository copy, respectively. If not defined, all files
667 will be included (a "full" clone). Otherwise a "narrow" clone containing
667 will be included (a "full" clone). Otherwise a "narrow" clone containing
668 only the requested files will be performed. If ``storeincludepats`` is not
668 only the requested files will be performed. If ``storeincludepats`` is not
669 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
669 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
670 ``path:.``. If both are empty sets, no files will be cloned.
670 ``path:.``. If both are empty sets, no files will be cloned.
671 """
671 """
672
672
673 if isinstance(source, bytes):
673 if isinstance(source, bytes):
674 src = urlutil.get_clone_path(ui, source, branch)
674 src = urlutil.get_clone_path(ui, source, branch)
675 origsource, source, branches = src
675 origsource, source, branches = src
676 srcpeer = peer(ui, peeropts, source)
676 srcpeer = peer(ui, peeropts, source)
677 else:
677 else:
678 srcpeer = source.peer() # in case we were called with a localrepo
678 srcpeer = source.peer() # in case we were called with a localrepo
679 branches = (None, branch or [])
679 branches = (None, branch or [])
680 origsource = source = srcpeer.url()
680 origsource = source = srcpeer.url()
681 srclock = destlock = cleandir = None
681 srclock = destlock = cleandir = None
682 destpeer = None
682 destpeer = None
683 try:
683 try:
684 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
684 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
685
685
686 if dest is None:
686 if dest is None:
687 dest = defaultdest(source)
687 dest = defaultdest(source)
688 if dest:
688 if dest:
689 ui.status(_(b"destination directory: %s\n") % dest)
689 ui.status(_(b"destination directory: %s\n") % dest)
690 else:
690 else:
691 dest = urlutil.get_clone_path(ui, dest)[0]
691 dest = urlutil.get_clone_path(ui, dest)[0]
692
692
693 dest = urlutil.urllocalpath(dest)
693 dest = urlutil.urllocalpath(dest)
694 source = urlutil.urllocalpath(source)
694 source = urlutil.urllocalpath(source)
695
695
696 if not dest:
696 if not dest:
697 raise error.InputError(_(b"empty destination path is not valid"))
697 raise error.InputError(_(b"empty destination path is not valid"))
698
698
699 destvfs = vfsmod.vfs(dest, expandpath=True)
699 destvfs = vfsmod.vfs(dest, expandpath=True)
700 if destvfs.lexists():
700 if destvfs.lexists():
701 if not destvfs.isdir():
701 if not destvfs.isdir():
702 raise error.InputError(
702 raise error.InputError(
703 _(b"destination '%s' already exists") % dest
703 _(b"destination '%s' already exists") % dest
704 )
704 )
705 elif destvfs.listdir():
705 elif destvfs.listdir():
706 raise error.InputError(
706 raise error.InputError(
707 _(b"destination '%s' is not empty") % dest
707 _(b"destination '%s' is not empty") % dest
708 )
708 )
709
709
710 createopts = {}
710 createopts = {}
711 narrow = False
711 narrow = False
712
712
713 if storeincludepats is not None:
713 if storeincludepats is not None:
714 narrowspec.validatepatterns(storeincludepats)
714 narrowspec.validatepatterns(storeincludepats)
715 narrow = True
715 narrow = True
716
716
717 if storeexcludepats is not None:
717 if storeexcludepats is not None:
718 narrowspec.validatepatterns(storeexcludepats)
718 narrowspec.validatepatterns(storeexcludepats)
719 narrow = True
719 narrow = True
720
720
721 if narrow:
721 if narrow:
722 # Include everything by default if only exclusion patterns defined.
722 # Include everything by default if only exclusion patterns defined.
723 if storeexcludepats and not storeincludepats:
723 if storeexcludepats and not storeincludepats:
724 storeincludepats = {b'path:.'}
724 storeincludepats = {b'path:.'}
725
725
726 createopts[b'narrowfiles'] = True
726 createopts[b'narrowfiles'] = True
727
727
728 if depth:
728 if depth:
729 createopts[b'shallowfilestore'] = True
729 createopts[b'shallowfilestore'] = True
730
730
731 if srcpeer.capable(b'lfs-serve'):
731 if srcpeer.capable(b'lfs-serve'):
732 # Repository creation honors the config if it disabled the extension, so
732 # Repository creation honors the config if it disabled the extension, so
733 # we can't just announce that lfs will be enabled. This check avoids
733 # we can't just announce that lfs will be enabled. This check avoids
734 # saying that lfs will be enabled, and then saying it's an unknown
734 # saying that lfs will be enabled, and then saying it's an unknown
735 # feature. The lfs creation option is set in either case so that a
735 # feature. The lfs creation option is set in either case so that a
736 # requirement is added. If the extension is explicitly disabled but the
736 # requirement is added. If the extension is explicitly disabled but the
737 # requirement is set, the clone aborts early, before transferring any
737 # requirement is set, the clone aborts early, before transferring any
738 # data.
738 # data.
739 createopts[b'lfs'] = True
739 createopts[b'lfs'] = True
740
740
741 if extensions.disabled_help(b'lfs'):
741 if extensions.disabled_help(b'lfs'):
742 ui.status(
742 ui.status(
743 _(
743 _(
744 b'(remote is using large file support (lfs), but it is '
744 b'(remote is using large file support (lfs), but it is '
745 b'explicitly disabled in the local configuration)\n'
745 b'explicitly disabled in the local configuration)\n'
746 )
746 )
747 )
747 )
748 else:
748 else:
749 ui.status(
749 ui.status(
750 _(
750 _(
751 b'(remote is using large file support (lfs); lfs will '
751 b'(remote is using large file support (lfs); lfs will '
752 b'be enabled for this repository)\n'
752 b'be enabled for this repository)\n'
753 )
753 )
754 )
754 )
755
755
756 shareopts = shareopts or {}
756 shareopts = shareopts or {}
757 sharepool = shareopts.get(b'pool')
757 sharepool = shareopts.get(b'pool')
758 sharenamemode = shareopts.get(b'mode')
758 sharenamemode = shareopts.get(b'mode')
759 if sharepool and islocal(dest):
759 if sharepool and islocal(dest):
760 sharepath = None
760 sharepath = None
761 if sharenamemode == b'identity':
761 if sharenamemode == b'identity':
762 # Resolve the name from the initial changeset in the remote
762 # Resolve the name from the initial changeset in the remote
763 # repository. This returns nullid when the remote is empty. It
763 # repository. This returns nullid when the remote is empty. It
764 # raises RepoLookupError if revision 0 is filtered or otherwise
764 # raises RepoLookupError if revision 0 is filtered or otherwise
765 # not available. If we fail to resolve, sharing is not enabled.
765 # not available. If we fail to resolve, sharing is not enabled.
766 try:
766 try:
767 with srcpeer.commandexecutor() as e:
767 with srcpeer.commandexecutor() as e:
768 rootnode = e.callcommand(
768 rootnode = e.callcommand(
769 b'lookup',
769 b'lookup',
770 {
770 {
771 b'key': b'0',
771 b'key': b'0',
772 },
772 },
773 ).result()
773 ).result()
774
774
775 if rootnode != sha1nodeconstants.nullid:
775 if rootnode != sha1nodeconstants.nullid:
776 sharepath = os.path.join(sharepool, hex(rootnode))
776 sharepath = os.path.join(sharepool, hex(rootnode))
777 else:
777 else:
778 ui.status(
778 ui.status(
779 _(
779 _(
780 b'(not using pooled storage: '
780 b'(not using pooled storage: '
781 b'remote appears to be empty)\n'
781 b'remote appears to be empty)\n'
782 )
782 )
783 )
783 )
784 except error.RepoLookupError:
784 except error.RepoLookupError:
785 ui.status(
785 ui.status(
786 _(
786 _(
787 b'(not using pooled storage: '
787 b'(not using pooled storage: '
788 b'unable to resolve identity of remote)\n'
788 b'unable to resolve identity of remote)\n'
789 )
789 )
790 )
790 )
791 elif sharenamemode == b'remote':
791 elif sharenamemode == b'remote':
792 sharepath = os.path.join(
792 sharepath = os.path.join(
793 sharepool, hex(hashutil.sha1(source).digest())
793 sharepool, hex(hashutil.sha1(source).digest())
794 )
794 )
795 else:
795 else:
796 raise error.Abort(
796 raise error.Abort(
797 _(b'unknown share naming mode: %s') % sharenamemode
797 _(b'unknown share naming mode: %s') % sharenamemode
798 )
798 )
799
799
800 # TODO this is a somewhat arbitrary restriction.
800 # TODO this is a somewhat arbitrary restriction.
801 if narrow:
801 if narrow:
802 ui.status(
802 ui.status(
803 _(b'(pooled storage not supported for narrow clones)\n')
803 _(b'(pooled storage not supported for narrow clones)\n')
804 )
804 )
805 sharepath = None
805 sharepath = None
806
806
807 if sharepath:
807 if sharepath:
808 return clonewithshare(
808 return clonewithshare(
809 ui,
809 ui,
810 peeropts,
810 peeropts,
811 sharepath,
811 sharepath,
812 source,
812 source,
813 srcpeer,
813 srcpeer,
814 dest,
814 dest,
815 pull=pull,
815 pull=pull,
816 rev=revs,
816 rev=revs,
817 update=update,
817 update=update,
818 stream=stream,
818 stream=stream,
819 )
819 )
820
820
821 srcrepo = srcpeer.local()
821 srcrepo = srcpeer.local()
822
822
823 abspath = origsource
823 abspath = origsource
824 if islocal(origsource):
824 if islocal(origsource):
825 abspath = os.path.abspath(urlutil.urllocalpath(origsource))
825 abspath = os.path.abspath(urlutil.urllocalpath(origsource))
826
826
827 if islocal(dest):
827 if islocal(dest):
828 cleandir = dest
828 if os.path.exists(dest):
829 # only clean up directories we create ourselves
830 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
831 cleandir = hgdir
832 else:
833 cleandir = dest
829
834
830 copy = False
835 copy = False
831 if (
836 if (
832 srcrepo
837 srcrepo
833 and srcrepo.cancopy()
838 and srcrepo.cancopy()
834 and islocal(dest)
839 and islocal(dest)
835 and not phases.hassecret(srcrepo)
840 and not phases.hassecret(srcrepo)
836 ):
841 ):
837 copy = not pull and not revs
842 copy = not pull and not revs
838
843
839 # TODO this is a somewhat arbitrary restriction.
844 # TODO this is a somewhat arbitrary restriction.
840 if narrow:
845 if narrow:
841 copy = False
846 copy = False
842
847
843 if copy:
848 if copy:
844 try:
849 try:
845 # we use a lock here because if we race with commit, we
850 # we use a lock here because if we race with commit, we
846 # can end up with extra data in the cloned revlogs that's
851 # can end up with extra data in the cloned revlogs that's
847 # not pointed to by changesets, thus causing verify to
852 # not pointed to by changesets, thus causing verify to
848 # fail
853 # fail
849 srclock = srcrepo.lock(wait=False)
854 srclock = srcrepo.lock(wait=False)
850 except error.LockError:
855 except error.LockError:
851 copy = False
856 copy = False
852
857
853 if copy:
858 if copy:
854 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
859 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
855 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
860 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
856 if not os.path.exists(dest):
861 if not os.path.exists(dest):
857 util.makedirs(dest)
862 util.makedirs(dest)
858 else:
859 # only clean up directories we create ourselves
860 cleandir = hgdir
861 try:
863 try:
862 destpath = hgdir
864 destpath = hgdir
863 util.makedir(destpath, notindexed=True)
865 util.makedir(destpath, notindexed=True)
864 except OSError as inst:
866 except OSError as inst:
865 if inst.errno == errno.EEXIST:
867 if inst.errno == errno.EEXIST:
866 cleandir = None
868 cleandir = None
867 raise error.Abort(
869 raise error.Abort(
868 _(b"destination '%s' already exists") % dest
870 _(b"destination '%s' already exists") % dest
869 )
871 )
870 raise
872 raise
871
873
872 destlock = copystore(ui, srcrepo, destpath)
874 destlock = copystore(ui, srcrepo, destpath)
873 # copy bookmarks over
875 # copy bookmarks over
874 srcbookmarks = srcrepo.vfs.join(b'bookmarks')
876 srcbookmarks = srcrepo.vfs.join(b'bookmarks')
875 dstbookmarks = os.path.join(destpath, b'bookmarks')
877 dstbookmarks = os.path.join(destpath, b'bookmarks')
876 if os.path.exists(srcbookmarks):
878 if os.path.exists(srcbookmarks):
877 util.copyfile(srcbookmarks, dstbookmarks)
879 util.copyfile(srcbookmarks, dstbookmarks)
878
880
879 dstcachedir = os.path.join(destpath, b'cache')
881 dstcachedir = os.path.join(destpath, b'cache')
880 for cache in cacheutil.cachetocopy(srcrepo):
882 for cache in cacheutil.cachetocopy(srcrepo):
881 _copycache(srcrepo, dstcachedir, cache)
883 _copycache(srcrepo, dstcachedir, cache)
882
884
883 # we need to re-init the repo after manually copying the data
885 # we need to re-init the repo after manually copying the data
884 # into it
886 # into it
885 destpeer = peer(srcrepo, peeropts, dest)
887 destpeer = peer(srcrepo, peeropts, dest)
886 srcrepo.hook(
888 srcrepo.hook(
887 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
889 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
888 )
890 )
889 else:
891 else:
890 try:
892 try:
891 # only pass ui when no srcrepo
893 # only pass ui when no srcrepo
892 destpeer = peer(
894 destpeer = peer(
893 srcrepo or ui,
895 srcrepo or ui,
894 peeropts,
896 peeropts,
895 dest,
897 dest,
896 create=True,
898 create=True,
897 createopts=createopts,
899 createopts=createopts,
898 )
900 )
899 except OSError as inst:
901 except OSError as inst:
900 if inst.errno == errno.EEXIST:
902 if inst.errno == errno.EEXIST:
901 cleandir = None
903 cleandir = None
902 raise error.Abort(
904 raise error.Abort(
903 _(b"destination '%s' already exists") % dest
905 _(b"destination '%s' already exists") % dest
904 )
906 )
905 raise
907 raise
906
908
907 if revs:
909 if revs:
908 if not srcpeer.capable(b'lookup'):
910 if not srcpeer.capable(b'lookup'):
909 raise error.Abort(
911 raise error.Abort(
910 _(
912 _(
911 b"src repository does not support "
913 b"src repository does not support "
912 b"revision lookup and so doesn't "
914 b"revision lookup and so doesn't "
913 b"support clone by revision"
915 b"support clone by revision"
914 )
916 )
915 )
917 )
916
918
917 # TODO this is batchable.
919 # TODO this is batchable.
918 remoterevs = []
920 remoterevs = []
919 for rev in revs:
921 for rev in revs:
920 with srcpeer.commandexecutor() as e:
922 with srcpeer.commandexecutor() as e:
921 remoterevs.append(
923 remoterevs.append(
922 e.callcommand(
924 e.callcommand(
923 b'lookup',
925 b'lookup',
924 {
926 {
925 b'key': rev,
927 b'key': rev,
926 },
928 },
927 ).result()
929 ).result()
928 )
930 )
929 revs = remoterevs
931 revs = remoterevs
930
932
931 checkout = revs[0]
933 checkout = revs[0]
932 else:
934 else:
933 revs = None
935 revs = None
934 local = destpeer.local()
936 local = destpeer.local()
935 if local:
937 if local:
936 if narrow:
938 if narrow:
937 with local.wlock(), local.lock():
939 with local.wlock(), local.lock():
938 local.setnarrowpats(storeincludepats, storeexcludepats)
940 local.setnarrowpats(storeincludepats, storeexcludepats)
939 narrowspec.copytoworkingcopy(local)
941 narrowspec.copytoworkingcopy(local)
940
942
941 u = urlutil.url(abspath)
943 u = urlutil.url(abspath)
942 defaulturl = bytes(u)
944 defaulturl = bytes(u)
943 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
945 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
944 if not stream:
946 if not stream:
945 if pull:
947 if pull:
946 stream = False
948 stream = False
947 else:
949 else:
948 stream = None
950 stream = None
949 # internal config: ui.quietbookmarkmove
951 # internal config: ui.quietbookmarkmove
950 overrides = {(b'ui', b'quietbookmarkmove'): True}
952 overrides = {(b'ui', b'quietbookmarkmove'): True}
951 with local.ui.configoverride(overrides, b'clone'):
953 with local.ui.configoverride(overrides, b'clone'):
952 exchange.pull(
954 exchange.pull(
953 local,
955 local,
954 srcpeer,
956 srcpeer,
955 revs,
957 revs,
956 streamclonerequested=stream,
958 streamclonerequested=stream,
957 includepats=storeincludepats,
959 includepats=storeincludepats,
958 excludepats=storeexcludepats,
960 excludepats=storeexcludepats,
959 depth=depth,
961 depth=depth,
960 )
962 )
961 elif srcrepo:
963 elif srcrepo:
962 # TODO lift restriction once exchange.push() accepts narrow
964 # TODO lift restriction once exchange.push() accepts narrow
963 # push.
965 # push.
964 if narrow:
966 if narrow:
965 raise error.Abort(
967 raise error.Abort(
966 _(
968 _(
967 b'narrow clone not available for '
969 b'narrow clone not available for '
968 b'remote destinations'
970 b'remote destinations'
969 )
971 )
970 )
972 )
971
973
972 exchange.push(
974 exchange.push(
973 srcrepo,
975 srcrepo,
974 destpeer,
976 destpeer,
975 revs=revs,
977 revs=revs,
976 bookmarks=srcrepo._bookmarks.keys(),
978 bookmarks=srcrepo._bookmarks.keys(),
977 )
979 )
978 else:
980 else:
979 raise error.Abort(
981 raise error.Abort(
980 _(b"clone from remote to remote not supported")
982 _(b"clone from remote to remote not supported")
981 )
983 )
982
984
983 cleandir = None
985 cleandir = None
984
986
985 destrepo = destpeer.local()
987 destrepo = destpeer.local()
986 if destrepo:
988 if destrepo:
987 template = uimod.samplehgrcs[b'cloned']
989 template = uimod.samplehgrcs[b'cloned']
988 u = urlutil.url(abspath)
990 u = urlutil.url(abspath)
989 u.passwd = None
991 u.passwd = None
990 defaulturl = bytes(u)
992 defaulturl = bytes(u)
991 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
993 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
992 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
994 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
993
995
994 if ui.configbool(b'experimental', b'remotenames'):
996 if ui.configbool(b'experimental', b'remotenames'):
995 logexchange.pullremotenames(destrepo, srcpeer)
997 logexchange.pullremotenames(destrepo, srcpeer)
996
998
997 if update:
999 if update:
998 if update is not True:
1000 if update is not True:
999 with srcpeer.commandexecutor() as e:
1001 with srcpeer.commandexecutor() as e:
1000 checkout = e.callcommand(
1002 checkout = e.callcommand(
1001 b'lookup',
1003 b'lookup',
1002 {
1004 {
1003 b'key': update,
1005 b'key': update,
1004 },
1006 },
1005 ).result()
1007 ).result()
1006
1008
1007 uprev = None
1009 uprev = None
1008 status = None
1010 status = None
1009 if checkout is not None:
1011 if checkout is not None:
1010 # Some extensions (at least hg-git and hg-subversion) have
1012 # Some extensions (at least hg-git and hg-subversion) have
1011 # a peer.lookup() implementation that returns a name instead
1013 # a peer.lookup() implementation that returns a name instead
1012 # of a nodeid. We work around it here until we've figured
1014 # of a nodeid. We work around it here until we've figured
1013 # out a better solution.
1015 # out a better solution.
1014 if len(checkout) == 20 and checkout in destrepo:
1016 if len(checkout) == 20 and checkout in destrepo:
1015 uprev = checkout
1017 uprev = checkout
1016 elif scmutil.isrevsymbol(destrepo, checkout):
1018 elif scmutil.isrevsymbol(destrepo, checkout):
1017 uprev = scmutil.revsymbol(destrepo, checkout).node()
1019 uprev = scmutil.revsymbol(destrepo, checkout).node()
1018 else:
1020 else:
1019 if update is not True:
1021 if update is not True:
1020 try:
1022 try:
1021 uprev = destrepo.lookup(update)
1023 uprev = destrepo.lookup(update)
1022 except error.RepoLookupError:
1024 except error.RepoLookupError:
1023 pass
1025 pass
1024 if uprev is None:
1026 if uprev is None:
1025 try:
1027 try:
1026 if destrepo._activebookmark:
1028 if destrepo._activebookmark:
1027 uprev = destrepo.lookup(destrepo._activebookmark)
1029 uprev = destrepo.lookup(destrepo._activebookmark)
1028 update = destrepo._activebookmark
1030 update = destrepo._activebookmark
1029 else:
1031 else:
1030 uprev = destrepo._bookmarks[b'@']
1032 uprev = destrepo._bookmarks[b'@']
1031 update = b'@'
1033 update = b'@'
1032 bn = destrepo[uprev].branch()
1034 bn = destrepo[uprev].branch()
1033 if bn == b'default':
1035 if bn == b'default':
1034 status = _(b"updating to bookmark %s\n" % update)
1036 status = _(b"updating to bookmark %s\n" % update)
1035 else:
1037 else:
1036 status = (
1038 status = (
1037 _(b"updating to bookmark %s on branch %s\n")
1039 _(b"updating to bookmark %s on branch %s\n")
1038 ) % (update, bn)
1040 ) % (update, bn)
1039 except KeyError:
1041 except KeyError:
1040 try:
1042 try:
1041 uprev = destrepo.branchtip(b'default')
1043 uprev = destrepo.branchtip(b'default')
1042 except error.RepoLookupError:
1044 except error.RepoLookupError:
1043 uprev = destrepo.lookup(b'tip')
1045 uprev = destrepo.lookup(b'tip')
1044 if not status:
1046 if not status:
1045 bn = destrepo[uprev].branch()
1047 bn = destrepo[uprev].branch()
1046 status = _(b"updating to branch %s\n") % bn
1048 status = _(b"updating to branch %s\n") % bn
1047 destrepo.ui.status(status)
1049 destrepo.ui.status(status)
1048 _update(destrepo, uprev)
1050 _update(destrepo, uprev)
1049 if update in destrepo._bookmarks:
1051 if update in destrepo._bookmarks:
1050 bookmarks.activate(destrepo, update)
1052 bookmarks.activate(destrepo, update)
1051 if destlock is not None:
1053 if destlock is not None:
1052 release(destlock)
1054 release(destlock)
1053 # here is a tiny windows were someone could end up writing the
1055 # here is a tiny windows were someone could end up writing the
1054 # repository before the cache are sure to be warm. This is "fine"
1056 # repository before the cache are sure to be warm. This is "fine"
1055 # as the only "bad" outcome would be some slowness. That potential
1057 # as the only "bad" outcome would be some slowness. That potential
1056 # slowness already affect reader.
1058 # slowness already affect reader.
1057 with destrepo.lock():
1059 with destrepo.lock():
1058 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1060 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1059 finally:
1061 finally:
1060 release(srclock, destlock)
1062 release(srclock, destlock)
1061 if cleandir is not None:
1063 if cleandir is not None:
1062 shutil.rmtree(cleandir, True)
1064 shutil.rmtree(cleandir, True)
1063 if srcpeer is not None:
1065 if srcpeer is not None:
1064 srcpeer.close()
1066 srcpeer.close()
1065 if destpeer and destpeer.local() is None:
1067 if destpeer and destpeer.local() is None:
1066 destpeer.close()
1068 destpeer.close()
1067 return srcpeer, destpeer
1069 return srcpeer, destpeer
1068
1070
1069
1071
1070 def _showstats(repo, stats, quietempty=False):
1072 def _showstats(repo, stats, quietempty=False):
1071 if quietempty and stats.isempty():
1073 if quietempty and stats.isempty():
1072 return
1074 return
1073 repo.ui.status(
1075 repo.ui.status(
1074 _(
1076 _(
1075 b"%d files updated, %d files merged, "
1077 b"%d files updated, %d files merged, "
1076 b"%d files removed, %d files unresolved\n"
1078 b"%d files removed, %d files unresolved\n"
1077 )
1079 )
1078 % (
1080 % (
1079 stats.updatedcount,
1081 stats.updatedcount,
1080 stats.mergedcount,
1082 stats.mergedcount,
1081 stats.removedcount,
1083 stats.removedcount,
1082 stats.unresolvedcount,
1084 stats.unresolvedcount,
1083 )
1085 )
1084 )
1086 )
1085
1087
1086
1088
1087 def updaterepo(repo, node, overwrite, updatecheck=None):
1089 def updaterepo(repo, node, overwrite, updatecheck=None):
1088 """Update the working directory to node.
1090 """Update the working directory to node.
1089
1091
1090 When overwrite is set, changes are clobbered, merged else
1092 When overwrite is set, changes are clobbered, merged else
1091
1093
1092 returns stats (see pydoc mercurial.merge.applyupdates)"""
1094 returns stats (see pydoc mercurial.merge.applyupdates)"""
1093 repo.ui.deprecwarn(
1095 repo.ui.deprecwarn(
1094 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1096 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1095 b'5.7',
1097 b'5.7',
1096 )
1098 )
1097 return mergemod._update(
1099 return mergemod._update(
1098 repo,
1100 repo,
1099 node,
1101 node,
1100 branchmerge=False,
1102 branchmerge=False,
1101 force=overwrite,
1103 force=overwrite,
1102 labels=[b'working copy', b'destination'],
1104 labels=[b'working copy', b'destination'],
1103 updatecheck=updatecheck,
1105 updatecheck=updatecheck,
1104 )
1106 )
1105
1107
1106
1108
1107 def update(repo, node, quietempty=False, updatecheck=None):
1109 def update(repo, node, quietempty=False, updatecheck=None):
1108 """update the working directory to node"""
1110 """update the working directory to node"""
1109 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1111 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1110 _showstats(repo, stats, quietempty)
1112 _showstats(repo, stats, quietempty)
1111 if stats.unresolvedcount:
1113 if stats.unresolvedcount:
1112 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1114 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1113 return stats.unresolvedcount > 0
1115 return stats.unresolvedcount > 0
1114
1116
1115
1117
1116 # naming conflict in clone()
1118 # naming conflict in clone()
1117 _update = update
1119 _update = update
1118
1120
1119
1121
1120 def clean(repo, node, show_stats=True, quietempty=False):
1122 def clean(repo, node, show_stats=True, quietempty=False):
1121 """forcibly switch the working directory to node, clobbering changes"""
1123 """forcibly switch the working directory to node, clobbering changes"""
1122 stats = mergemod.clean_update(repo[node])
1124 stats = mergemod.clean_update(repo[node])
1123 assert stats.unresolvedcount == 0
1125 assert stats.unresolvedcount == 0
1124 if show_stats:
1126 if show_stats:
1125 _showstats(repo, stats, quietempty)
1127 _showstats(repo, stats, quietempty)
1126 return False
1128 return False
1127
1129
1128
1130
1129 # naming conflict in updatetotally()
1131 # naming conflict in updatetotally()
1130 _clean = clean
1132 _clean = clean
1131
1133
1132 _VALID_UPDATECHECKS = {
1134 _VALID_UPDATECHECKS = {
1133 mergemod.UPDATECHECK_ABORT,
1135 mergemod.UPDATECHECK_ABORT,
1134 mergemod.UPDATECHECK_NONE,
1136 mergemod.UPDATECHECK_NONE,
1135 mergemod.UPDATECHECK_LINEAR,
1137 mergemod.UPDATECHECK_LINEAR,
1136 mergemod.UPDATECHECK_NO_CONFLICT,
1138 mergemod.UPDATECHECK_NO_CONFLICT,
1137 }
1139 }
1138
1140
1139
1141
1140 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1142 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1141 """Update the working directory with extra care for non-file components
1143 """Update the working directory with extra care for non-file components
1142
1144
1143 This takes care of non-file components below:
1145 This takes care of non-file components below:
1144
1146
1145 :bookmark: might be advanced or (in)activated
1147 :bookmark: might be advanced or (in)activated
1146
1148
1147 This takes arguments below:
1149 This takes arguments below:
1148
1150
1149 :checkout: to which revision the working directory is updated
1151 :checkout: to which revision the working directory is updated
1150 :brev: a name, which might be a bookmark to be activated after updating
1152 :brev: a name, which might be a bookmark to be activated after updating
1151 :clean: whether changes in the working directory can be discarded
1153 :clean: whether changes in the working directory can be discarded
1152 :updatecheck: how to deal with a dirty working directory
1154 :updatecheck: how to deal with a dirty working directory
1153
1155
1154 Valid values for updatecheck are the UPDATECHECK_* constants
1156 Valid values for updatecheck are the UPDATECHECK_* constants
1155 defined in the merge module. Passing `None` will result in using the
1157 defined in the merge module. Passing `None` will result in using the
1156 configured default.
1158 configured default.
1157
1159
1158 * ABORT: abort if the working directory is dirty
1160 * ABORT: abort if the working directory is dirty
1159 * NONE: don't check (merge working directory changes into destination)
1161 * NONE: don't check (merge working directory changes into destination)
1160 * LINEAR: check that update is linear before merging working directory
1162 * LINEAR: check that update is linear before merging working directory
1161 changes into destination
1163 changes into destination
1162 * NO_CONFLICT: check that the update does not result in file merges
1164 * NO_CONFLICT: check that the update does not result in file merges
1163
1165
1164 This returns whether conflict is detected at updating or not.
1166 This returns whether conflict is detected at updating or not.
1165 """
1167 """
1166 if updatecheck is None:
1168 if updatecheck is None:
1167 updatecheck = ui.config(b'commands', b'update.check')
1169 updatecheck = ui.config(b'commands', b'update.check')
1168 if updatecheck not in _VALID_UPDATECHECKS:
1170 if updatecheck not in _VALID_UPDATECHECKS:
1169 # If not configured, or invalid value configured
1171 # If not configured, or invalid value configured
1170 updatecheck = mergemod.UPDATECHECK_LINEAR
1172 updatecheck = mergemod.UPDATECHECK_LINEAR
1171 if updatecheck not in _VALID_UPDATECHECKS:
1173 if updatecheck not in _VALID_UPDATECHECKS:
1172 raise ValueError(
1174 raise ValueError(
1173 r'Invalid updatecheck value %r (can accept %r)'
1175 r'Invalid updatecheck value %r (can accept %r)'
1174 % (updatecheck, _VALID_UPDATECHECKS)
1176 % (updatecheck, _VALID_UPDATECHECKS)
1175 )
1177 )
1176 with repo.wlock():
1178 with repo.wlock():
1177 movemarkfrom = None
1179 movemarkfrom = None
1178 warndest = False
1180 warndest = False
1179 if checkout is None:
1181 if checkout is None:
1180 updata = destutil.destupdate(repo, clean=clean)
1182 updata = destutil.destupdate(repo, clean=clean)
1181 checkout, movemarkfrom, brev = updata
1183 checkout, movemarkfrom, brev = updata
1182 warndest = True
1184 warndest = True
1183
1185
1184 if clean:
1186 if clean:
1185 ret = _clean(repo, checkout)
1187 ret = _clean(repo, checkout)
1186 else:
1188 else:
1187 if updatecheck == mergemod.UPDATECHECK_ABORT:
1189 if updatecheck == mergemod.UPDATECHECK_ABORT:
1188 cmdutil.bailifchanged(repo, merge=False)
1190 cmdutil.bailifchanged(repo, merge=False)
1189 updatecheck = mergemod.UPDATECHECK_NONE
1191 updatecheck = mergemod.UPDATECHECK_NONE
1190 ret = _update(repo, checkout, updatecheck=updatecheck)
1192 ret = _update(repo, checkout, updatecheck=updatecheck)
1191
1193
1192 if not ret and movemarkfrom:
1194 if not ret and movemarkfrom:
1193 if movemarkfrom == repo[b'.'].node():
1195 if movemarkfrom == repo[b'.'].node():
1194 pass # no-op update
1196 pass # no-op update
1195 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1197 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1196 b = ui.label(repo._activebookmark, b'bookmarks.active')
1198 b = ui.label(repo._activebookmark, b'bookmarks.active')
1197 ui.status(_(b"updating bookmark %s\n") % b)
1199 ui.status(_(b"updating bookmark %s\n") % b)
1198 else:
1200 else:
1199 # this can happen with a non-linear update
1201 # this can happen with a non-linear update
1200 b = ui.label(repo._activebookmark, b'bookmarks')
1202 b = ui.label(repo._activebookmark, b'bookmarks')
1201 ui.status(_(b"(leaving bookmark %s)\n") % b)
1203 ui.status(_(b"(leaving bookmark %s)\n") % b)
1202 bookmarks.deactivate(repo)
1204 bookmarks.deactivate(repo)
1203 elif brev in repo._bookmarks:
1205 elif brev in repo._bookmarks:
1204 if brev != repo._activebookmark:
1206 if brev != repo._activebookmark:
1205 b = ui.label(brev, b'bookmarks.active')
1207 b = ui.label(brev, b'bookmarks.active')
1206 ui.status(_(b"(activating bookmark %s)\n") % b)
1208 ui.status(_(b"(activating bookmark %s)\n") % b)
1207 bookmarks.activate(repo, brev)
1209 bookmarks.activate(repo, brev)
1208 elif brev:
1210 elif brev:
1209 if repo._activebookmark:
1211 if repo._activebookmark:
1210 b = ui.label(repo._activebookmark, b'bookmarks')
1212 b = ui.label(repo._activebookmark, b'bookmarks')
1211 ui.status(_(b"(leaving bookmark %s)\n") % b)
1213 ui.status(_(b"(leaving bookmark %s)\n") % b)
1212 bookmarks.deactivate(repo)
1214 bookmarks.deactivate(repo)
1213
1215
1214 if warndest:
1216 if warndest:
1215 destutil.statusotherdests(ui, repo)
1217 destutil.statusotherdests(ui, repo)
1216
1218
1217 return ret
1219 return ret
1218
1220
1219
1221
1220 def merge(
1222 def merge(
1221 ctx,
1223 ctx,
1222 force=False,
1224 force=False,
1223 remind=True,
1225 remind=True,
1224 labels=None,
1226 labels=None,
1225 ):
1227 ):
1226 """Branch merge with node, resolving changes. Return true if any
1228 """Branch merge with node, resolving changes. Return true if any
1227 unresolved conflicts."""
1229 unresolved conflicts."""
1228 repo = ctx.repo()
1230 repo = ctx.repo()
1229 stats = mergemod.merge(ctx, force=force, labels=labels)
1231 stats = mergemod.merge(ctx, force=force, labels=labels)
1230 _showstats(repo, stats)
1232 _showstats(repo, stats)
1231 if stats.unresolvedcount:
1233 if stats.unresolvedcount:
1232 repo.ui.status(
1234 repo.ui.status(
1233 _(
1235 _(
1234 b"use 'hg resolve' to retry unresolved file merges "
1236 b"use 'hg resolve' to retry unresolved file merges "
1235 b"or 'hg merge --abort' to abandon\n"
1237 b"or 'hg merge --abort' to abandon\n"
1236 )
1238 )
1237 )
1239 )
1238 elif remind:
1240 elif remind:
1239 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1241 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1240 return stats.unresolvedcount > 0
1242 return stats.unresolvedcount > 0
1241
1243
1242
1244
1243 def abortmerge(ui, repo):
1245 def abortmerge(ui, repo):
1244 ms = mergestatemod.mergestate.read(repo)
1246 ms = mergestatemod.mergestate.read(repo)
1245 if ms.active():
1247 if ms.active():
1246 # there were conflicts
1248 # there were conflicts
1247 node = ms.localctx.hex()
1249 node = ms.localctx.hex()
1248 else:
1250 else:
1249 # there were no conficts, mergestate was not stored
1251 # there were no conficts, mergestate was not stored
1250 node = repo[b'.'].hex()
1252 node = repo[b'.'].hex()
1251
1253
1252 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1254 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1253 stats = mergemod.clean_update(repo[node])
1255 stats = mergemod.clean_update(repo[node])
1254 assert stats.unresolvedcount == 0
1256 assert stats.unresolvedcount == 0
1255 _showstats(repo, stats)
1257 _showstats(repo, stats)
1256
1258
1257
1259
1258 def _incoming(
1260 def _incoming(
1259 displaychlist,
1261 displaychlist,
1260 subreporecurse,
1262 subreporecurse,
1261 ui,
1263 ui,
1262 repo,
1264 repo,
1263 source,
1265 source,
1264 opts,
1266 opts,
1265 buffered=False,
1267 buffered=False,
1266 subpath=None,
1268 subpath=None,
1267 ):
1269 ):
1268 """
1270 """
1269 Helper for incoming / gincoming.
1271 Helper for incoming / gincoming.
1270 displaychlist gets called with
1272 displaychlist gets called with
1271 (remoterepo, incomingchangesetlist, displayer) parameters,
1273 (remoterepo, incomingchangesetlist, displayer) parameters,
1272 and is supposed to contain only code that can't be unified.
1274 and is supposed to contain only code that can't be unified.
1273 """
1275 """
1274 srcs = urlutil.get_pull_paths(repo, ui, [source], opts.get(b'branch'))
1276 srcs = urlutil.get_pull_paths(repo, ui, [source], opts.get(b'branch'))
1275 srcs = list(srcs)
1277 srcs = list(srcs)
1276 if len(srcs) != 1:
1278 if len(srcs) != 1:
1277 msg = _(b'for now, incoming supports only a single source, %d provided')
1279 msg = _(b'for now, incoming supports only a single source, %d provided')
1278 msg %= len(srcs)
1280 msg %= len(srcs)
1279 raise error.Abort(msg)
1281 raise error.Abort(msg)
1280 source, branches = srcs[0]
1282 source, branches = srcs[0]
1281 if subpath is not None:
1283 if subpath is not None:
1282 subpath = urlutil.url(subpath)
1284 subpath = urlutil.url(subpath)
1283 if subpath.isabs():
1285 if subpath.isabs():
1284 source = bytes(subpath)
1286 source = bytes(subpath)
1285 else:
1287 else:
1286 p = urlutil.url(source)
1288 p = urlutil.url(source)
1287 p.path = os.path.normpath(b'%s/%s' % (p.path, subpath))
1289 p.path = os.path.normpath(b'%s/%s' % (p.path, subpath))
1288 source = bytes(p)
1290 source = bytes(p)
1289 other = peer(repo, opts, source)
1291 other = peer(repo, opts, source)
1290 cleanupfn = other.close
1292 cleanupfn = other.close
1291 try:
1293 try:
1292 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source))
1294 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source))
1293 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1295 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1294
1296
1295 if revs:
1297 if revs:
1296 revs = [other.lookup(rev) for rev in revs]
1298 revs = [other.lookup(rev) for rev in revs]
1297 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1299 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1298 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
1300 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
1299 )
1301 )
1300
1302
1301 if not chlist:
1303 if not chlist:
1302 ui.status(_(b"no changes found\n"))
1304 ui.status(_(b"no changes found\n"))
1303 return subreporecurse()
1305 return subreporecurse()
1304 ui.pager(b'incoming')
1306 ui.pager(b'incoming')
1305 displayer = logcmdutil.changesetdisplayer(
1307 displayer = logcmdutil.changesetdisplayer(
1306 ui, other, opts, buffered=buffered
1308 ui, other, opts, buffered=buffered
1307 )
1309 )
1308 displaychlist(other, chlist, displayer)
1310 displaychlist(other, chlist, displayer)
1309 displayer.close()
1311 displayer.close()
1310 finally:
1312 finally:
1311 cleanupfn()
1313 cleanupfn()
1312 subreporecurse()
1314 subreporecurse()
1313 return 0 # exit code is zero since we found incoming changes
1315 return 0 # exit code is zero since we found incoming changes
1314
1316
1315
1317
1316 def incoming(ui, repo, source, opts, subpath=None):
1318 def incoming(ui, repo, source, opts, subpath=None):
1317 def subreporecurse():
1319 def subreporecurse():
1318 ret = 1
1320 ret = 1
1319 if opts.get(b'subrepos'):
1321 if opts.get(b'subrepos'):
1320 ctx = repo[None]
1322 ctx = repo[None]
1321 for subpath in sorted(ctx.substate):
1323 for subpath in sorted(ctx.substate):
1322 sub = ctx.sub(subpath)
1324 sub = ctx.sub(subpath)
1323 ret = min(ret, sub.incoming(ui, source, opts))
1325 ret = min(ret, sub.incoming(ui, source, opts))
1324 return ret
1326 return ret
1325
1327
1326 def display(other, chlist, displayer):
1328 def display(other, chlist, displayer):
1327 limit = logcmdutil.getlimit(opts)
1329 limit = logcmdutil.getlimit(opts)
1328 if opts.get(b'newest_first'):
1330 if opts.get(b'newest_first'):
1329 chlist.reverse()
1331 chlist.reverse()
1330 count = 0
1332 count = 0
1331 for n in chlist:
1333 for n in chlist:
1332 if limit is not None and count >= limit:
1334 if limit is not None and count >= limit:
1333 break
1335 break
1334 parents = [
1336 parents = [
1335 p for p in other.changelog.parents(n) if p != repo.nullid
1337 p for p in other.changelog.parents(n) if p != repo.nullid
1336 ]
1338 ]
1337 if opts.get(b'no_merges') and len(parents) == 2:
1339 if opts.get(b'no_merges') and len(parents) == 2:
1338 continue
1340 continue
1339 count += 1
1341 count += 1
1340 displayer.show(other[n])
1342 displayer.show(other[n])
1341
1343
1342 return _incoming(
1344 return _incoming(
1343 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1345 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1344 )
1346 )
1345
1347
1346
1348
1347 def _outgoing(ui, repo, dests, opts, subpath=None):
1349 def _outgoing(ui, repo, dests, opts, subpath=None):
1348 out = set()
1350 out = set()
1349 others = []
1351 others = []
1350 for path in urlutil.get_push_paths(repo, ui, dests):
1352 for path in urlutil.get_push_paths(repo, ui, dests):
1351 dest = path.pushloc or path.loc
1353 dest = path.pushloc or path.loc
1352 if subpath is not None:
1354 if subpath is not None:
1353 subpath = urlutil.url(subpath)
1355 subpath = urlutil.url(subpath)
1354 if subpath.isabs():
1356 if subpath.isabs():
1355 dest = bytes(subpath)
1357 dest = bytes(subpath)
1356 else:
1358 else:
1357 p = urlutil.url(dest)
1359 p = urlutil.url(dest)
1358 p.path = os.path.normpath(b'%s/%s' % (p.path, subpath))
1360 p.path = os.path.normpath(b'%s/%s' % (p.path, subpath))
1359 dest = bytes(p)
1361 dest = bytes(p)
1360 branches = path.branch, opts.get(b'branch') or []
1362 branches = path.branch, opts.get(b'branch') or []
1361
1363
1362 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1364 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1363 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1365 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1364 if revs:
1366 if revs:
1365 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1367 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1366
1368
1367 other = peer(repo, opts, dest)
1369 other = peer(repo, opts, dest)
1368 try:
1370 try:
1369 outgoing = discovery.findcommonoutgoing(
1371 outgoing = discovery.findcommonoutgoing(
1370 repo, other, revs, force=opts.get(b'force')
1372 repo, other, revs, force=opts.get(b'force')
1371 )
1373 )
1372 o = outgoing.missing
1374 o = outgoing.missing
1373 out.update(o)
1375 out.update(o)
1374 if not o:
1376 if not o:
1375 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1377 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1376 others.append(other)
1378 others.append(other)
1377 except: # re-raises
1379 except: # re-raises
1378 other.close()
1380 other.close()
1379 raise
1381 raise
1380 # make sure this is ordered by revision number
1382 # make sure this is ordered by revision number
1381 outgoing_revs = list(out)
1383 outgoing_revs = list(out)
1382 cl = repo.changelog
1384 cl = repo.changelog
1383 outgoing_revs.sort(key=cl.rev)
1385 outgoing_revs.sort(key=cl.rev)
1384 return outgoing_revs, others
1386 return outgoing_revs, others
1385
1387
1386
1388
1387 def _outgoing_recurse(ui, repo, dests, opts):
1389 def _outgoing_recurse(ui, repo, dests, opts):
1388 ret = 1
1390 ret = 1
1389 if opts.get(b'subrepos'):
1391 if opts.get(b'subrepos'):
1390 ctx = repo[None]
1392 ctx = repo[None]
1391 for subpath in sorted(ctx.substate):
1393 for subpath in sorted(ctx.substate):
1392 sub = ctx.sub(subpath)
1394 sub = ctx.sub(subpath)
1393 ret = min(ret, sub.outgoing(ui, dests, opts))
1395 ret = min(ret, sub.outgoing(ui, dests, opts))
1394 return ret
1396 return ret
1395
1397
1396
1398
1397 def _outgoing_filter(repo, revs, opts):
1399 def _outgoing_filter(repo, revs, opts):
1398 """apply revision filtering/ordering option for outgoing"""
1400 """apply revision filtering/ordering option for outgoing"""
1399 limit = logcmdutil.getlimit(opts)
1401 limit = logcmdutil.getlimit(opts)
1400 no_merges = opts.get(b'no_merges')
1402 no_merges = opts.get(b'no_merges')
1401 if opts.get(b'newest_first'):
1403 if opts.get(b'newest_first'):
1402 revs.reverse()
1404 revs.reverse()
1403 if limit is None and not no_merges:
1405 if limit is None and not no_merges:
1404 for r in revs:
1406 for r in revs:
1405 yield r
1407 yield r
1406 return
1408 return
1407
1409
1408 count = 0
1410 count = 0
1409 cl = repo.changelog
1411 cl = repo.changelog
1410 for n in revs:
1412 for n in revs:
1411 if limit is not None and count >= limit:
1413 if limit is not None and count >= limit:
1412 break
1414 break
1413 parents = [p for p in cl.parents(n) if p != repo.nullid]
1415 parents = [p for p in cl.parents(n) if p != repo.nullid]
1414 if no_merges and len(parents) == 2:
1416 if no_merges and len(parents) == 2:
1415 continue
1417 continue
1416 count += 1
1418 count += 1
1417 yield n
1419 yield n
1418
1420
1419
1421
1420 def outgoing(ui, repo, dests, opts, subpath=None):
1422 def outgoing(ui, repo, dests, opts, subpath=None):
1421 if opts.get(b'graph'):
1423 if opts.get(b'graph'):
1422 logcmdutil.checkunsupportedgraphflags([], opts)
1424 logcmdutil.checkunsupportedgraphflags([], opts)
1423 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1425 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1424 ret = 1
1426 ret = 1
1425 try:
1427 try:
1426 if o:
1428 if o:
1427 ret = 0
1429 ret = 0
1428
1430
1429 if opts.get(b'graph'):
1431 if opts.get(b'graph'):
1430 revdag = logcmdutil.graphrevs(repo, o, opts)
1432 revdag = logcmdutil.graphrevs(repo, o, opts)
1431 ui.pager(b'outgoing')
1433 ui.pager(b'outgoing')
1432 displayer = logcmdutil.changesetdisplayer(
1434 displayer = logcmdutil.changesetdisplayer(
1433 ui, repo, opts, buffered=True
1435 ui, repo, opts, buffered=True
1434 )
1436 )
1435 logcmdutil.displaygraph(
1437 logcmdutil.displaygraph(
1436 ui, repo, revdag, displayer, graphmod.asciiedges
1438 ui, repo, revdag, displayer, graphmod.asciiedges
1437 )
1439 )
1438 else:
1440 else:
1439 ui.pager(b'outgoing')
1441 ui.pager(b'outgoing')
1440 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1442 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1441 for n in _outgoing_filter(repo, o, opts):
1443 for n in _outgoing_filter(repo, o, opts):
1442 displayer.show(repo[n])
1444 displayer.show(repo[n])
1443 displayer.close()
1445 displayer.close()
1444 for oth in others:
1446 for oth in others:
1445 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1447 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1446 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1448 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1447 return ret # exit code is zero since we found outgoing changes
1449 return ret # exit code is zero since we found outgoing changes
1448 finally:
1450 finally:
1449 for oth in others:
1451 for oth in others:
1450 oth.close()
1452 oth.close()
1451
1453
1452
1454
1453 def verify(repo, level=None):
1455 def verify(repo, level=None):
1454 """verify the consistency of a repository"""
1456 """verify the consistency of a repository"""
1455 ret = verifymod.verify(repo, level=level)
1457 ret = verifymod.verify(repo, level=level)
1456
1458
1457 # Broken subrepo references in hidden csets don't seem worth worrying about,
1459 # Broken subrepo references in hidden csets don't seem worth worrying about,
1458 # since they can't be pushed/pulled, and --hidden can be used if they are a
1460 # since they can't be pushed/pulled, and --hidden can be used if they are a
1459 # concern.
1461 # concern.
1460
1462
1461 # pathto() is needed for -R case
1463 # pathto() is needed for -R case
1462 revs = repo.revs(
1464 revs = repo.revs(
1463 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1465 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1464 )
1466 )
1465
1467
1466 if revs:
1468 if revs:
1467 repo.ui.status(_(b'checking subrepo links\n'))
1469 repo.ui.status(_(b'checking subrepo links\n'))
1468 for rev in revs:
1470 for rev in revs:
1469 ctx = repo[rev]
1471 ctx = repo[rev]
1470 try:
1472 try:
1471 for subpath in ctx.substate:
1473 for subpath in ctx.substate:
1472 try:
1474 try:
1473 ret = (
1475 ret = (
1474 ctx.sub(subpath, allowcreate=False).verify() or ret
1476 ctx.sub(subpath, allowcreate=False).verify() or ret
1475 )
1477 )
1476 except error.RepoError as e:
1478 except error.RepoError as e:
1477 repo.ui.warn(b'%d: %s\n' % (rev, e))
1479 repo.ui.warn(b'%d: %s\n' % (rev, e))
1478 except Exception:
1480 except Exception:
1479 repo.ui.warn(
1481 repo.ui.warn(
1480 _(b'.hgsubstate is corrupt in revision %s\n')
1482 _(b'.hgsubstate is corrupt in revision %s\n')
1481 % short(ctx.node())
1483 % short(ctx.node())
1482 )
1484 )
1483
1485
1484 return ret
1486 return ret
1485
1487
1486
1488
1487 def remoteui(src, opts):
1489 def remoteui(src, opts):
1488 """build a remote ui from ui or repo and opts"""
1490 """build a remote ui from ui or repo and opts"""
1489 if util.safehasattr(src, b'baseui'): # looks like a repository
1491 if util.safehasattr(src, b'baseui'): # looks like a repository
1490 dst = src.baseui.copy() # drop repo-specific config
1492 dst = src.baseui.copy() # drop repo-specific config
1491 src = src.ui # copy target options from repo
1493 src = src.ui # copy target options from repo
1492 else: # assume it's a global ui object
1494 else: # assume it's a global ui object
1493 dst = src.copy() # keep all global options
1495 dst = src.copy() # keep all global options
1494
1496
1495 # copy ssh-specific options
1497 # copy ssh-specific options
1496 for o in b'ssh', b'remotecmd':
1498 for o in b'ssh', b'remotecmd':
1497 v = opts.get(o) or src.config(b'ui', o)
1499 v = opts.get(o) or src.config(b'ui', o)
1498 if v:
1500 if v:
1499 dst.setconfig(b"ui", o, v, b'copied')
1501 dst.setconfig(b"ui", o, v, b'copied')
1500
1502
1501 # copy bundle-specific options
1503 # copy bundle-specific options
1502 r = src.config(b'bundle', b'mainreporoot')
1504 r = src.config(b'bundle', b'mainreporoot')
1503 if r:
1505 if r:
1504 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1506 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1505
1507
1506 # copy selected local settings to the remote ui
1508 # copy selected local settings to the remote ui
1507 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1509 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1508 for key, val in src.configitems(sect):
1510 for key, val in src.configitems(sect):
1509 dst.setconfig(sect, key, val, b'copied')
1511 dst.setconfig(sect, key, val, b'copied')
1510 v = src.config(b'web', b'cacerts')
1512 v = src.config(b'web', b'cacerts')
1511 if v:
1513 if v:
1512 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1514 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1513
1515
1514 return dst
1516 return dst
1515
1517
1516
1518
1517 # Files of interest
1519 # Files of interest
1518 # Used to check if the repository has changed looking at mtime and size of
1520 # Used to check if the repository has changed looking at mtime and size of
1519 # these files.
1521 # these files.
1520 foi = [
1522 foi = [
1521 (b'spath', b'00changelog.i'),
1523 (b'spath', b'00changelog.i'),
1522 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1524 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1523 (b'spath', b'obsstore'),
1525 (b'spath', b'obsstore'),
1524 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1526 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1525 ]
1527 ]
1526
1528
1527
1529
1528 class cachedlocalrepo(object):
1530 class cachedlocalrepo(object):
1529 """Holds a localrepository that can be cached and reused."""
1531 """Holds a localrepository that can be cached and reused."""
1530
1532
1531 def __init__(self, repo):
1533 def __init__(self, repo):
1532 """Create a new cached repo from an existing repo.
1534 """Create a new cached repo from an existing repo.
1533
1535
1534 We assume the passed in repo was recently created. If the
1536 We assume the passed in repo was recently created. If the
1535 repo has changed between when it was created and when it was
1537 repo has changed between when it was created and when it was
1536 turned into a cache, it may not refresh properly.
1538 turned into a cache, it may not refresh properly.
1537 """
1539 """
1538 assert isinstance(repo, localrepo.localrepository)
1540 assert isinstance(repo, localrepo.localrepository)
1539 self._repo = repo
1541 self._repo = repo
1540 self._state, self.mtime = self._repostate()
1542 self._state, self.mtime = self._repostate()
1541 self._filtername = repo.filtername
1543 self._filtername = repo.filtername
1542
1544
1543 def fetch(self):
1545 def fetch(self):
1544 """Refresh (if necessary) and return a repository.
1546 """Refresh (if necessary) and return a repository.
1545
1547
1546 If the cached instance is out of date, it will be recreated
1548 If the cached instance is out of date, it will be recreated
1547 automatically and returned.
1549 automatically and returned.
1548
1550
1549 Returns a tuple of the repo and a boolean indicating whether a new
1551 Returns a tuple of the repo and a boolean indicating whether a new
1550 repo instance was created.
1552 repo instance was created.
1551 """
1553 """
1552 # We compare the mtimes and sizes of some well-known files to
1554 # We compare the mtimes and sizes of some well-known files to
1553 # determine if the repo changed. This is not precise, as mtimes
1555 # determine if the repo changed. This is not precise, as mtimes
1554 # are susceptible to clock skew and imprecise filesystems and
1556 # are susceptible to clock skew and imprecise filesystems and
1555 # file content can change while maintaining the same size.
1557 # file content can change while maintaining the same size.
1556
1558
1557 state, mtime = self._repostate()
1559 state, mtime = self._repostate()
1558 if state == self._state:
1560 if state == self._state:
1559 return self._repo, False
1561 return self._repo, False
1560
1562
1561 repo = repository(self._repo.baseui, self._repo.url())
1563 repo = repository(self._repo.baseui, self._repo.url())
1562 if self._filtername:
1564 if self._filtername:
1563 self._repo = repo.filtered(self._filtername)
1565 self._repo = repo.filtered(self._filtername)
1564 else:
1566 else:
1565 self._repo = repo.unfiltered()
1567 self._repo = repo.unfiltered()
1566 self._state = state
1568 self._state = state
1567 self.mtime = mtime
1569 self.mtime = mtime
1568
1570
1569 return self._repo, True
1571 return self._repo, True
1570
1572
1571 def _repostate(self):
1573 def _repostate(self):
1572 state = []
1574 state = []
1573 maxmtime = -1
1575 maxmtime = -1
1574 for attr, fname in foi:
1576 for attr, fname in foi:
1575 prefix = getattr(self._repo, attr)
1577 prefix = getattr(self._repo, attr)
1576 p = os.path.join(prefix, fname)
1578 p = os.path.join(prefix, fname)
1577 try:
1579 try:
1578 st = os.stat(p)
1580 st = os.stat(p)
1579 except OSError:
1581 except OSError:
1580 st = os.stat(prefix)
1582 st = os.stat(prefix)
1581 state.append((st[stat.ST_MTIME], st.st_size))
1583 state.append((st[stat.ST_MTIME], st.st_size))
1582 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1584 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1583
1585
1584 return tuple(state), maxmtime
1586 return tuple(state), maxmtime
1585
1587
1586 def copy(self):
1588 def copy(self):
1587 """Obtain a copy of this class instance.
1589 """Obtain a copy of this class instance.
1588
1590
1589 A new localrepository instance is obtained. The new instance should be
1591 A new localrepository instance is obtained. The new instance should be
1590 completely independent of the original.
1592 completely independent of the original.
1591 """
1593 """
1592 repo = repository(self._repo.baseui, self._repo.origroot)
1594 repo = repository(self._repo.baseui, self._repo.origroot)
1593 if self._filtername:
1595 if self._filtername:
1594 repo = repo.filtered(self._filtername)
1596 repo = repo.filtered(self._filtername)
1595 else:
1597 else:
1596 repo = repo.unfiltered()
1598 repo = repo.unfiltered()
1597 c = cachedlocalrepo(repo)
1599 c = cachedlocalrepo(repo)
1598 c._state = self._state
1600 c._state = self._state
1599 c.mtime = self.mtime
1601 c.mtime = self.mtime
1600 return c
1602 return c
General Comments 0
You need to be logged in to leave comments. Login now