##// END OF EJS Templates
subrepo: make -S work again on Windows for incoming/outgoing to remote repos...
Matt Harbison -
r49400:f98d4d0a stable
parent child Browse files
Show More
@@ -1,1608 +1,1617 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import os
12 import os
13 import posixpath
13 import shutil
14 import shutil
14 import stat
15 import stat
15 import weakref
16 import weakref
16
17
17 from .i18n import _
18 from .i18n import _
18 from .node import (
19 from .node import (
19 hex,
20 hex,
20 sha1nodeconstants,
21 sha1nodeconstants,
21 short,
22 short,
22 )
23 )
23 from .pycompat import getattr
24 from .pycompat import getattr
24
25
25 from . import (
26 from . import (
26 bookmarks,
27 bookmarks,
27 bundlerepo,
28 bundlerepo,
28 cmdutil,
29 cmdutil,
29 destutil,
30 destutil,
30 discovery,
31 discovery,
31 error,
32 error,
32 exchange,
33 exchange,
33 extensions,
34 extensions,
34 graphmod,
35 graphmod,
35 httppeer,
36 httppeer,
36 localrepo,
37 localrepo,
37 lock,
38 lock,
38 logcmdutil,
39 logcmdutil,
39 logexchange,
40 logexchange,
40 merge as mergemod,
41 merge as mergemod,
41 mergestate as mergestatemod,
42 mergestate as mergestatemod,
42 narrowspec,
43 narrowspec,
43 phases,
44 phases,
44 requirements,
45 requirements,
45 scmutil,
46 scmutil,
46 sshpeer,
47 sshpeer,
47 statichttprepo,
48 statichttprepo,
48 ui as uimod,
49 ui as uimod,
49 unionrepo,
50 unionrepo,
50 url,
51 url,
51 util,
52 util,
52 verify as verifymod,
53 verify as verifymod,
53 vfs as vfsmod,
54 vfs as vfsmod,
54 )
55 )
55 from .interfaces import repository as repositorymod
56 from .interfaces import repository as repositorymod
56 from .utils import (
57 from .utils import (
57 hashutil,
58 hashutil,
58 stringutil,
59 stringutil,
59 urlutil,
60 urlutil,
60 )
61 )
61
62
62
63
63 release = lock.release
64 release = lock.release
64
65
65 # shared features
66 # shared features
66 sharedbookmarks = b'bookmarks'
67 sharedbookmarks = b'bookmarks'
67
68
68
69
69 def _local(path):
70 def _local(path):
70 path = util.expandpath(urlutil.urllocalpath(path))
71 path = util.expandpath(urlutil.urllocalpath(path))
71
72
72 try:
73 try:
73 # we use os.stat() directly here instead of os.path.isfile()
74 # we use os.stat() directly here instead of os.path.isfile()
74 # because the latter started returning `False` on invalid path
75 # because the latter started returning `False` on invalid path
75 # exceptions starting in 3.8 and we care about handling
76 # exceptions starting in 3.8 and we care about handling
76 # invalid paths specially here.
77 # invalid paths specially here.
77 st = os.stat(path)
78 st = os.stat(path)
78 isfile = stat.S_ISREG(st.st_mode)
79 isfile = stat.S_ISREG(st.st_mode)
79 # Python 2 raises TypeError, Python 3 ValueError.
80 # Python 2 raises TypeError, Python 3 ValueError.
80 except (TypeError, ValueError) as e:
81 except (TypeError, ValueError) as e:
81 raise error.Abort(
82 raise error.Abort(
82 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
83 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
83 )
84 )
84 except OSError:
85 except OSError:
85 isfile = False
86 isfile = False
86
87
87 return isfile and bundlerepo or localrepo
88 return isfile and bundlerepo or localrepo
88
89
89
90
90 def addbranchrevs(lrepo, other, branches, revs):
91 def addbranchrevs(lrepo, other, branches, revs):
91 peer = other.peer() # a courtesy to callers using a localrepo for other
92 peer = other.peer() # a courtesy to callers using a localrepo for other
92 hashbranch, branches = branches
93 hashbranch, branches = branches
93 if not hashbranch and not branches:
94 if not hashbranch and not branches:
94 x = revs or None
95 x = revs or None
95 if revs:
96 if revs:
96 y = revs[0]
97 y = revs[0]
97 else:
98 else:
98 y = None
99 y = None
99 return x, y
100 return x, y
100 if revs:
101 if revs:
101 revs = list(revs)
102 revs = list(revs)
102 else:
103 else:
103 revs = []
104 revs = []
104
105
105 if not peer.capable(b'branchmap'):
106 if not peer.capable(b'branchmap'):
106 if branches:
107 if branches:
107 raise error.Abort(_(b"remote branch lookup not supported"))
108 raise error.Abort(_(b"remote branch lookup not supported"))
108 revs.append(hashbranch)
109 revs.append(hashbranch)
109 return revs, revs[0]
110 return revs, revs[0]
110
111
111 with peer.commandexecutor() as e:
112 with peer.commandexecutor() as e:
112 branchmap = e.callcommand(b'branchmap', {}).result()
113 branchmap = e.callcommand(b'branchmap', {}).result()
113
114
114 def primary(branch):
115 def primary(branch):
115 if branch == b'.':
116 if branch == b'.':
116 if not lrepo:
117 if not lrepo:
117 raise error.Abort(_(b"dirstate branch not accessible"))
118 raise error.Abort(_(b"dirstate branch not accessible"))
118 branch = lrepo.dirstate.branch()
119 branch = lrepo.dirstate.branch()
119 if branch in branchmap:
120 if branch in branchmap:
120 revs.extend(hex(r) for r in reversed(branchmap[branch]))
121 revs.extend(hex(r) for r in reversed(branchmap[branch]))
121 return True
122 return True
122 else:
123 else:
123 return False
124 return False
124
125
125 for branch in branches:
126 for branch in branches:
126 if not primary(branch):
127 if not primary(branch):
127 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
128 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
128 if hashbranch:
129 if hashbranch:
129 if not primary(hashbranch):
130 if not primary(hashbranch):
130 revs.append(hashbranch)
131 revs.append(hashbranch)
131 return revs, revs[0]
132 return revs, revs[0]
132
133
133
134
134 def parseurl(path, branches=None):
135 def parseurl(path, branches=None):
135 '''parse url#branch, returning (url, (branch, branches))'''
136 '''parse url#branch, returning (url, (branch, branches))'''
136 msg = b'parseurl(...) moved to mercurial.utils.urlutil'
137 msg = b'parseurl(...) moved to mercurial.utils.urlutil'
137 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
138 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
138 return urlutil.parseurl(path, branches=branches)
139 return urlutil.parseurl(path, branches=branches)
139
140
140
141
141 schemes = {
142 schemes = {
142 b'bundle': bundlerepo,
143 b'bundle': bundlerepo,
143 b'union': unionrepo,
144 b'union': unionrepo,
144 b'file': _local,
145 b'file': _local,
145 b'http': httppeer,
146 b'http': httppeer,
146 b'https': httppeer,
147 b'https': httppeer,
147 b'ssh': sshpeer,
148 b'ssh': sshpeer,
148 b'static-http': statichttprepo,
149 b'static-http': statichttprepo,
149 }
150 }
150
151
151
152
152 def _peerlookup(path):
153 def _peerlookup(path):
153 u = urlutil.url(path)
154 u = urlutil.url(path)
154 scheme = u.scheme or b'file'
155 scheme = u.scheme or b'file'
155 thing = schemes.get(scheme) or schemes[b'file']
156 thing = schemes.get(scheme) or schemes[b'file']
156 try:
157 try:
157 return thing(path)
158 return thing(path)
158 except TypeError:
159 except TypeError:
159 # we can't test callable(thing) because 'thing' can be an unloaded
160 # we can't test callable(thing) because 'thing' can be an unloaded
160 # module that implements __call__
161 # module that implements __call__
161 if not util.safehasattr(thing, b'instance'):
162 if not util.safehasattr(thing, b'instance'):
162 raise
163 raise
163 return thing
164 return thing
164
165
165
166
166 def islocal(repo):
167 def islocal(repo):
167 '''return true if repo (or path pointing to repo) is local'''
168 '''return true if repo (or path pointing to repo) is local'''
168 if isinstance(repo, bytes):
169 if isinstance(repo, bytes):
169 try:
170 try:
170 return _peerlookup(repo).islocal(repo)
171 return _peerlookup(repo).islocal(repo)
171 except AttributeError:
172 except AttributeError:
172 return False
173 return False
173 return repo.local()
174 return repo.local()
174
175
175
176
176 def openpath(ui, path, sendaccept=True):
177 def openpath(ui, path, sendaccept=True):
177 '''open path with open if local, url.open if remote'''
178 '''open path with open if local, url.open if remote'''
178 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
179 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
179 if pathurl.islocal():
180 if pathurl.islocal():
180 return util.posixfile(pathurl.localpath(), b'rb')
181 return util.posixfile(pathurl.localpath(), b'rb')
181 else:
182 else:
182 return url.open(ui, path, sendaccept=sendaccept)
183 return url.open(ui, path, sendaccept=sendaccept)
183
184
184
185
185 # a list of (ui, repo) functions called for wire peer initialization
186 # a list of (ui, repo) functions called for wire peer initialization
186 wirepeersetupfuncs = []
187 wirepeersetupfuncs = []
187
188
188
189
189 def _peerorrepo(
190 def _peerorrepo(
190 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
191 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
191 ):
192 ):
192 """return a repository object for the specified path"""
193 """return a repository object for the specified path"""
193 obj = _peerlookup(path).instance(
194 obj = _peerlookup(path).instance(
194 ui, path, create, intents=intents, createopts=createopts
195 ui, path, create, intents=intents, createopts=createopts
195 )
196 )
196 ui = getattr(obj, "ui", ui)
197 ui = getattr(obj, "ui", ui)
197 for f in presetupfuncs or []:
198 for f in presetupfuncs or []:
198 f(ui, obj)
199 f(ui, obj)
199 ui.log(b'extension', b'- executing reposetup hooks\n')
200 ui.log(b'extension', b'- executing reposetup hooks\n')
200 with util.timedcm('all reposetup') as allreposetupstats:
201 with util.timedcm('all reposetup') as allreposetupstats:
201 for name, module in extensions.extensions(ui):
202 for name, module in extensions.extensions(ui):
202 ui.log(b'extension', b' - running reposetup for %s\n', name)
203 ui.log(b'extension', b' - running reposetup for %s\n', name)
203 hook = getattr(module, 'reposetup', None)
204 hook = getattr(module, 'reposetup', None)
204 if hook:
205 if hook:
205 with util.timedcm('reposetup %r', name) as stats:
206 with util.timedcm('reposetup %r', name) as stats:
206 hook(ui, obj)
207 hook(ui, obj)
207 ui.log(
208 ui.log(
208 b'extension', b' > reposetup for %s took %s\n', name, stats
209 b'extension', b' > reposetup for %s took %s\n', name, stats
209 )
210 )
210 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
211 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
211 if not obj.local():
212 if not obj.local():
212 for f in wirepeersetupfuncs:
213 for f in wirepeersetupfuncs:
213 f(ui, obj)
214 f(ui, obj)
214 return obj
215 return obj
215
216
216
217
217 def repository(
218 def repository(
218 ui,
219 ui,
219 path=b'',
220 path=b'',
220 create=False,
221 create=False,
221 presetupfuncs=None,
222 presetupfuncs=None,
222 intents=None,
223 intents=None,
223 createopts=None,
224 createopts=None,
224 ):
225 ):
225 """return a repository object for the specified path"""
226 """return a repository object for the specified path"""
226 peer = _peerorrepo(
227 peer = _peerorrepo(
227 ui,
228 ui,
228 path,
229 path,
229 create,
230 create,
230 presetupfuncs=presetupfuncs,
231 presetupfuncs=presetupfuncs,
231 intents=intents,
232 intents=intents,
232 createopts=createopts,
233 createopts=createopts,
233 )
234 )
234 repo = peer.local()
235 repo = peer.local()
235 if not repo:
236 if not repo:
236 raise error.Abort(
237 raise error.Abort(
237 _(b"repository '%s' is not local") % (path or peer.url())
238 _(b"repository '%s' is not local") % (path or peer.url())
238 )
239 )
239 return repo.filtered(b'visible')
240 return repo.filtered(b'visible')
240
241
241
242
242 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
243 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
243 '''return a repository peer for the specified path'''
244 '''return a repository peer for the specified path'''
244 rui = remoteui(uiorrepo, opts)
245 rui = remoteui(uiorrepo, opts)
245 return _peerorrepo(
246 return _peerorrepo(
246 rui, path, create, intents=intents, createopts=createopts
247 rui, path, create, intents=intents, createopts=createopts
247 ).peer()
248 ).peer()
248
249
249
250
250 def defaultdest(source):
251 def defaultdest(source):
251 """return default destination of clone if none is given
252 """return default destination of clone if none is given
252
253
253 >>> defaultdest(b'foo')
254 >>> defaultdest(b'foo')
254 'foo'
255 'foo'
255 >>> defaultdest(b'/foo/bar')
256 >>> defaultdest(b'/foo/bar')
256 'bar'
257 'bar'
257 >>> defaultdest(b'/')
258 >>> defaultdest(b'/')
258 ''
259 ''
259 >>> defaultdest(b'')
260 >>> defaultdest(b'')
260 ''
261 ''
261 >>> defaultdest(b'http://example.org/')
262 >>> defaultdest(b'http://example.org/')
262 ''
263 ''
263 >>> defaultdest(b'http://example.org/foo/')
264 >>> defaultdest(b'http://example.org/foo/')
264 'foo'
265 'foo'
265 """
266 """
266 path = urlutil.url(source).path
267 path = urlutil.url(source).path
267 if not path:
268 if not path:
268 return b''
269 return b''
269 return os.path.basename(os.path.normpath(path))
270 return os.path.basename(os.path.normpath(path))
270
271
271
272
272 def sharedreposource(repo):
273 def sharedreposource(repo):
273 """Returns repository object for source repository of a shared repo.
274 """Returns repository object for source repository of a shared repo.
274
275
275 If repo is not a shared repository, returns None.
276 If repo is not a shared repository, returns None.
276 """
277 """
277 if repo.sharedpath == repo.path:
278 if repo.sharedpath == repo.path:
278 return None
279 return None
279
280
280 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
281 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
281 return repo.srcrepo
282 return repo.srcrepo
282
283
283 # the sharedpath always ends in the .hg; we want the path to the repo
284 # the sharedpath always ends in the .hg; we want the path to the repo
284 source = repo.vfs.split(repo.sharedpath)[0]
285 source = repo.vfs.split(repo.sharedpath)[0]
285 srcurl, branches = urlutil.parseurl(source)
286 srcurl, branches = urlutil.parseurl(source)
286 srcrepo = repository(repo.ui, srcurl)
287 srcrepo = repository(repo.ui, srcurl)
287 repo.srcrepo = srcrepo
288 repo.srcrepo = srcrepo
288 return srcrepo
289 return srcrepo
289
290
290
291
291 def share(
292 def share(
292 ui,
293 ui,
293 source,
294 source,
294 dest=None,
295 dest=None,
295 update=True,
296 update=True,
296 bookmarks=True,
297 bookmarks=True,
297 defaultpath=None,
298 defaultpath=None,
298 relative=False,
299 relative=False,
299 ):
300 ):
300 '''create a shared repository'''
301 '''create a shared repository'''
301
302
302 if not islocal(source):
303 if not islocal(source):
303 raise error.Abort(_(b'can only share local repositories'))
304 raise error.Abort(_(b'can only share local repositories'))
304
305
305 if not dest:
306 if not dest:
306 dest = defaultdest(source)
307 dest = defaultdest(source)
307 else:
308 else:
308 dest = urlutil.get_clone_path(ui, dest)[1]
309 dest = urlutil.get_clone_path(ui, dest)[1]
309
310
310 if isinstance(source, bytes):
311 if isinstance(source, bytes):
311 origsource, source, branches = urlutil.get_clone_path(ui, source)
312 origsource, source, branches = urlutil.get_clone_path(ui, source)
312 srcrepo = repository(ui, source)
313 srcrepo = repository(ui, source)
313 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
314 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
314 else:
315 else:
315 srcrepo = source.local()
316 srcrepo = source.local()
316 checkout = None
317 checkout = None
317
318
318 shareditems = set()
319 shareditems = set()
319 if bookmarks:
320 if bookmarks:
320 shareditems.add(sharedbookmarks)
321 shareditems.add(sharedbookmarks)
321
322
322 r = repository(
323 r = repository(
323 ui,
324 ui,
324 dest,
325 dest,
325 create=True,
326 create=True,
326 createopts={
327 createopts={
327 b'sharedrepo': srcrepo,
328 b'sharedrepo': srcrepo,
328 b'sharedrelative': relative,
329 b'sharedrelative': relative,
329 b'shareditems': shareditems,
330 b'shareditems': shareditems,
330 },
331 },
331 )
332 )
332
333
333 postshare(srcrepo, r, defaultpath=defaultpath)
334 postshare(srcrepo, r, defaultpath=defaultpath)
334 r = repository(ui, dest)
335 r = repository(ui, dest)
335 _postshareupdate(r, update, checkout=checkout)
336 _postshareupdate(r, update, checkout=checkout)
336 return r
337 return r
337
338
338
339
339 def _prependsourcehgrc(repo):
340 def _prependsourcehgrc(repo):
340 """copies the source repo config and prepend it in current repo .hg/hgrc
341 """copies the source repo config and prepend it in current repo .hg/hgrc
341 on unshare. This is only done if the share was perfomed using share safe
342 on unshare. This is only done if the share was perfomed using share safe
342 method where we share config of source in shares"""
343 method where we share config of source in shares"""
343 srcvfs = vfsmod.vfs(repo.sharedpath)
344 srcvfs = vfsmod.vfs(repo.sharedpath)
344 dstvfs = vfsmod.vfs(repo.path)
345 dstvfs = vfsmod.vfs(repo.path)
345
346
346 if not srcvfs.exists(b'hgrc'):
347 if not srcvfs.exists(b'hgrc'):
347 return
348 return
348
349
349 currentconfig = b''
350 currentconfig = b''
350 if dstvfs.exists(b'hgrc'):
351 if dstvfs.exists(b'hgrc'):
351 currentconfig = dstvfs.read(b'hgrc')
352 currentconfig = dstvfs.read(b'hgrc')
352
353
353 with dstvfs(b'hgrc', b'wb') as fp:
354 with dstvfs(b'hgrc', b'wb') as fp:
354 sourceconfig = srcvfs.read(b'hgrc')
355 sourceconfig = srcvfs.read(b'hgrc')
355 fp.write(b"# Config copied from shared source\n")
356 fp.write(b"# Config copied from shared source\n")
356 fp.write(sourceconfig)
357 fp.write(sourceconfig)
357 fp.write(b'\n')
358 fp.write(b'\n')
358 fp.write(currentconfig)
359 fp.write(currentconfig)
359
360
360
361
361 def unshare(ui, repo):
362 def unshare(ui, repo):
362 """convert a shared repository to a normal one
363 """convert a shared repository to a normal one
363
364
364 Copy the store data to the repo and remove the sharedpath data.
365 Copy the store data to the repo and remove the sharedpath data.
365
366
366 Returns a new repository object representing the unshared repository.
367 Returns a new repository object representing the unshared repository.
367
368
368 The passed repository object is not usable after this function is
369 The passed repository object is not usable after this function is
369 called.
370 called.
370 """
371 """
371
372
372 with repo.lock():
373 with repo.lock():
373 # we use locks here because if we race with commit, we
374 # we use locks here because if we race with commit, we
374 # can end up with extra data in the cloned revlogs that's
375 # can end up with extra data in the cloned revlogs that's
375 # not pointed to by changesets, thus causing verify to
376 # not pointed to by changesets, thus causing verify to
376 # fail
377 # fail
377 destlock = copystore(ui, repo, repo.path)
378 destlock = copystore(ui, repo, repo.path)
378 with destlock or util.nullcontextmanager():
379 with destlock or util.nullcontextmanager():
379 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
380 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
380 # we were sharing .hg/hgrc of the share source with the current
381 # we were sharing .hg/hgrc of the share source with the current
381 # repo. We need to copy that while unsharing otherwise it can
382 # repo. We need to copy that while unsharing otherwise it can
382 # disable hooks and other checks
383 # disable hooks and other checks
383 _prependsourcehgrc(repo)
384 _prependsourcehgrc(repo)
384
385
385 sharefile = repo.vfs.join(b'sharedpath')
386 sharefile = repo.vfs.join(b'sharedpath')
386 util.rename(sharefile, sharefile + b'.old')
387 util.rename(sharefile, sharefile + b'.old')
387
388
388 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
389 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
389 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
390 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
390 scmutil.writereporequirements(repo)
391 scmutil.writereporequirements(repo)
391
392
392 # Removing share changes some fundamental properties of the repo instance.
393 # Removing share changes some fundamental properties of the repo instance.
393 # So we instantiate a new repo object and operate on it rather than
394 # So we instantiate a new repo object and operate on it rather than
394 # try to keep the existing repo usable.
395 # try to keep the existing repo usable.
395 newrepo = repository(repo.baseui, repo.root, create=False)
396 newrepo = repository(repo.baseui, repo.root, create=False)
396
397
397 # TODO: figure out how to access subrepos that exist, but were previously
398 # TODO: figure out how to access subrepos that exist, but were previously
398 # removed from .hgsub
399 # removed from .hgsub
399 c = newrepo[b'.']
400 c = newrepo[b'.']
400 subs = c.substate
401 subs = c.substate
401 for s in sorted(subs):
402 for s in sorted(subs):
402 c.sub(s).unshare()
403 c.sub(s).unshare()
403
404
404 localrepo.poisonrepository(repo)
405 localrepo.poisonrepository(repo)
405
406
406 return newrepo
407 return newrepo
407
408
408
409
409 def postshare(sourcerepo, destrepo, defaultpath=None):
410 def postshare(sourcerepo, destrepo, defaultpath=None):
410 """Called after a new shared repo is created.
411 """Called after a new shared repo is created.
411
412
412 The new repo only has a requirements file and pointer to the source.
413 The new repo only has a requirements file and pointer to the source.
413 This function configures additional shared data.
414 This function configures additional shared data.
414
415
415 Extensions can wrap this function and write additional entries to
416 Extensions can wrap this function and write additional entries to
416 destrepo/.hg/shared to indicate additional pieces of data to be shared.
417 destrepo/.hg/shared to indicate additional pieces of data to be shared.
417 """
418 """
418 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
419 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
419 if default:
420 if default:
420 template = b'[paths]\ndefault = %s\n'
421 template = b'[paths]\ndefault = %s\n'
421 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
422 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
422 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
423 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
423 with destrepo.wlock():
424 with destrepo.wlock():
424 narrowspec.copytoworkingcopy(destrepo)
425 narrowspec.copytoworkingcopy(destrepo)
425
426
426
427
427 def _postshareupdate(repo, update, checkout=None):
428 def _postshareupdate(repo, update, checkout=None):
428 """Maybe perform a working directory update after a shared repo is created.
429 """Maybe perform a working directory update after a shared repo is created.
429
430
430 ``update`` can be a boolean or a revision to update to.
431 ``update`` can be a boolean or a revision to update to.
431 """
432 """
432 if not update:
433 if not update:
433 return
434 return
434
435
435 repo.ui.status(_(b"updating working directory\n"))
436 repo.ui.status(_(b"updating working directory\n"))
436 if update is not True:
437 if update is not True:
437 checkout = update
438 checkout = update
438 for test in (checkout, b'default', b'tip'):
439 for test in (checkout, b'default', b'tip'):
439 if test is None:
440 if test is None:
440 continue
441 continue
441 try:
442 try:
442 uprev = repo.lookup(test)
443 uprev = repo.lookup(test)
443 break
444 break
444 except error.RepoLookupError:
445 except error.RepoLookupError:
445 continue
446 continue
446 _update(repo, uprev)
447 _update(repo, uprev)
447
448
448
449
449 def copystore(ui, srcrepo, destpath):
450 def copystore(ui, srcrepo, destpath):
450 """copy files from store of srcrepo in destpath
451 """copy files from store of srcrepo in destpath
451
452
452 returns destlock
453 returns destlock
453 """
454 """
454 destlock = None
455 destlock = None
455 try:
456 try:
456 hardlink = None
457 hardlink = None
457 topic = _(b'linking') if hardlink else _(b'copying')
458 topic = _(b'linking') if hardlink else _(b'copying')
458 with ui.makeprogress(topic, unit=_(b'files')) as progress:
459 with ui.makeprogress(topic, unit=_(b'files')) as progress:
459 num = 0
460 num = 0
460 srcpublishing = srcrepo.publishing()
461 srcpublishing = srcrepo.publishing()
461 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
462 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
462 dstvfs = vfsmod.vfs(destpath)
463 dstvfs = vfsmod.vfs(destpath)
463 for f in srcrepo.store.copylist():
464 for f in srcrepo.store.copylist():
464 if srcpublishing and f.endswith(b'phaseroots'):
465 if srcpublishing and f.endswith(b'phaseroots'):
465 continue
466 continue
466 dstbase = os.path.dirname(f)
467 dstbase = os.path.dirname(f)
467 if dstbase and not dstvfs.exists(dstbase):
468 if dstbase and not dstvfs.exists(dstbase):
468 dstvfs.mkdir(dstbase)
469 dstvfs.mkdir(dstbase)
469 if srcvfs.exists(f):
470 if srcvfs.exists(f):
470 if f.endswith(b'data'):
471 if f.endswith(b'data'):
471 # 'dstbase' may be empty (e.g. revlog format 0)
472 # 'dstbase' may be empty (e.g. revlog format 0)
472 lockfile = os.path.join(dstbase, b"lock")
473 lockfile = os.path.join(dstbase, b"lock")
473 # lock to avoid premature writing to the target
474 # lock to avoid premature writing to the target
474 destlock = lock.lock(dstvfs, lockfile)
475 destlock = lock.lock(dstvfs, lockfile)
475 hardlink, n = util.copyfiles(
476 hardlink, n = util.copyfiles(
476 srcvfs.join(f), dstvfs.join(f), hardlink, progress
477 srcvfs.join(f), dstvfs.join(f), hardlink, progress
477 )
478 )
478 num += n
479 num += n
479 if hardlink:
480 if hardlink:
480 ui.debug(b"linked %d files\n" % num)
481 ui.debug(b"linked %d files\n" % num)
481 else:
482 else:
482 ui.debug(b"copied %d files\n" % num)
483 ui.debug(b"copied %d files\n" % num)
483 return destlock
484 return destlock
484 except: # re-raises
485 except: # re-raises
485 release(destlock)
486 release(destlock)
486 raise
487 raise
487
488
488
489
489 def clonewithshare(
490 def clonewithshare(
490 ui,
491 ui,
491 peeropts,
492 peeropts,
492 sharepath,
493 sharepath,
493 source,
494 source,
494 srcpeer,
495 srcpeer,
495 dest,
496 dest,
496 pull=False,
497 pull=False,
497 rev=None,
498 rev=None,
498 update=True,
499 update=True,
499 stream=False,
500 stream=False,
500 ):
501 ):
501 """Perform a clone using a shared repo.
502 """Perform a clone using a shared repo.
502
503
503 The store for the repository will be located at <sharepath>/.hg. The
504 The store for the repository will be located at <sharepath>/.hg. The
504 specified revisions will be cloned or pulled from "source". A shared repo
505 specified revisions will be cloned or pulled from "source". A shared repo
505 will be created at "dest" and a working copy will be created if "update" is
506 will be created at "dest" and a working copy will be created if "update" is
506 True.
507 True.
507 """
508 """
508 revs = None
509 revs = None
509 if rev:
510 if rev:
510 if not srcpeer.capable(b'lookup'):
511 if not srcpeer.capable(b'lookup'):
511 raise error.Abort(
512 raise error.Abort(
512 _(
513 _(
513 b"src repository does not support "
514 b"src repository does not support "
514 b"revision lookup and so doesn't "
515 b"revision lookup and so doesn't "
515 b"support clone by revision"
516 b"support clone by revision"
516 )
517 )
517 )
518 )
518
519
519 # TODO this is batchable.
520 # TODO this is batchable.
520 remoterevs = []
521 remoterevs = []
521 for r in rev:
522 for r in rev:
522 with srcpeer.commandexecutor() as e:
523 with srcpeer.commandexecutor() as e:
523 remoterevs.append(
524 remoterevs.append(
524 e.callcommand(
525 e.callcommand(
525 b'lookup',
526 b'lookup',
526 {
527 {
527 b'key': r,
528 b'key': r,
528 },
529 },
529 ).result()
530 ).result()
530 )
531 )
531 revs = remoterevs
532 revs = remoterevs
532
533
533 # Obtain a lock before checking for or cloning the pooled repo otherwise
534 # Obtain a lock before checking for or cloning the pooled repo otherwise
534 # 2 clients may race creating or populating it.
535 # 2 clients may race creating or populating it.
535 pooldir = os.path.dirname(sharepath)
536 pooldir = os.path.dirname(sharepath)
536 # lock class requires the directory to exist.
537 # lock class requires the directory to exist.
537 try:
538 try:
538 util.makedir(pooldir, False)
539 util.makedir(pooldir, False)
539 except OSError as e:
540 except OSError as e:
540 if e.errno != errno.EEXIST:
541 if e.errno != errno.EEXIST:
541 raise
542 raise
542
543
543 poolvfs = vfsmod.vfs(pooldir)
544 poolvfs = vfsmod.vfs(pooldir)
544 basename = os.path.basename(sharepath)
545 basename = os.path.basename(sharepath)
545
546
546 with lock.lock(poolvfs, b'%s.lock' % basename):
547 with lock.lock(poolvfs, b'%s.lock' % basename):
547 if os.path.exists(sharepath):
548 if os.path.exists(sharepath):
548 ui.status(
549 ui.status(
549 _(b'(sharing from existing pooled repository %s)\n') % basename
550 _(b'(sharing from existing pooled repository %s)\n') % basename
550 )
551 )
551 else:
552 else:
552 ui.status(
553 ui.status(
553 _(b'(sharing from new pooled repository %s)\n') % basename
554 _(b'(sharing from new pooled repository %s)\n') % basename
554 )
555 )
555 # Always use pull mode because hardlinks in share mode don't work
556 # Always use pull mode because hardlinks in share mode don't work
556 # well. Never update because working copies aren't necessary in
557 # well. Never update because working copies aren't necessary in
557 # share mode.
558 # share mode.
558 clone(
559 clone(
559 ui,
560 ui,
560 peeropts,
561 peeropts,
561 source,
562 source,
562 dest=sharepath,
563 dest=sharepath,
563 pull=True,
564 pull=True,
564 revs=rev,
565 revs=rev,
565 update=False,
566 update=False,
566 stream=stream,
567 stream=stream,
567 )
568 )
568
569
569 # Resolve the value to put in [paths] section for the source.
570 # Resolve the value to put in [paths] section for the source.
570 if islocal(source):
571 if islocal(source):
571 defaultpath = util.abspath(urlutil.urllocalpath(source))
572 defaultpath = util.abspath(urlutil.urllocalpath(source))
572 else:
573 else:
573 defaultpath = source
574 defaultpath = source
574
575
575 sharerepo = repository(ui, path=sharepath)
576 sharerepo = repository(ui, path=sharepath)
576 destrepo = share(
577 destrepo = share(
577 ui,
578 ui,
578 sharerepo,
579 sharerepo,
579 dest=dest,
580 dest=dest,
580 update=False,
581 update=False,
581 bookmarks=False,
582 bookmarks=False,
582 defaultpath=defaultpath,
583 defaultpath=defaultpath,
583 )
584 )
584
585
585 # We need to perform a pull against the dest repo to fetch bookmarks
586 # We need to perform a pull against the dest repo to fetch bookmarks
586 # and other non-store data that isn't shared by default. In the case of
587 # and other non-store data that isn't shared by default. In the case of
587 # non-existing shared repo, this means we pull from the remote twice. This
588 # non-existing shared repo, this means we pull from the remote twice. This
588 # is a bit weird. But at the time it was implemented, there wasn't an easy
589 # is a bit weird. But at the time it was implemented, there wasn't an easy
589 # way to pull just non-changegroup data.
590 # way to pull just non-changegroup data.
590 exchange.pull(destrepo, srcpeer, heads=revs)
591 exchange.pull(destrepo, srcpeer, heads=revs)
591
592
592 _postshareupdate(destrepo, update)
593 _postshareupdate(destrepo, update)
593
594
594 return srcpeer, peer(ui, peeropts, dest)
595 return srcpeer, peer(ui, peeropts, dest)
595
596
596
597
597 # Recomputing caches is often slow on big repos, so copy them.
598 # Recomputing caches is often slow on big repos, so copy them.
598 def _copycache(srcrepo, dstcachedir, fname):
599 def _copycache(srcrepo, dstcachedir, fname):
599 """copy a cache from srcrepo to destcachedir (if it exists)"""
600 """copy a cache from srcrepo to destcachedir (if it exists)"""
600 srcfname = srcrepo.cachevfs.join(fname)
601 srcfname = srcrepo.cachevfs.join(fname)
601 dstfname = os.path.join(dstcachedir, fname)
602 dstfname = os.path.join(dstcachedir, fname)
602 if os.path.exists(srcfname):
603 if os.path.exists(srcfname):
603 if not os.path.exists(dstcachedir):
604 if not os.path.exists(dstcachedir):
604 os.mkdir(dstcachedir)
605 os.mkdir(dstcachedir)
605 util.copyfile(srcfname, dstfname)
606 util.copyfile(srcfname, dstfname)
606
607
607
608
608 def clone(
609 def clone(
609 ui,
610 ui,
610 peeropts,
611 peeropts,
611 source,
612 source,
612 dest=None,
613 dest=None,
613 pull=False,
614 pull=False,
614 revs=None,
615 revs=None,
615 update=True,
616 update=True,
616 stream=False,
617 stream=False,
617 branch=None,
618 branch=None,
618 shareopts=None,
619 shareopts=None,
619 storeincludepats=None,
620 storeincludepats=None,
620 storeexcludepats=None,
621 storeexcludepats=None,
621 depth=None,
622 depth=None,
622 ):
623 ):
623 """Make a copy of an existing repository.
624 """Make a copy of an existing repository.
624
625
625 Create a copy of an existing repository in a new directory. The
626 Create a copy of an existing repository in a new directory. The
626 source and destination are URLs, as passed to the repository
627 source and destination are URLs, as passed to the repository
627 function. Returns a pair of repository peers, the source and
628 function. Returns a pair of repository peers, the source and
628 newly created destination.
629 newly created destination.
629
630
630 The location of the source is added to the new repository's
631 The location of the source is added to the new repository's
631 .hg/hgrc file, as the default to be used for future pulls and
632 .hg/hgrc file, as the default to be used for future pulls and
632 pushes.
633 pushes.
633
634
634 If an exception is raised, the partly cloned/updated destination
635 If an exception is raised, the partly cloned/updated destination
635 repository will be deleted.
636 repository will be deleted.
636
637
637 Arguments:
638 Arguments:
638
639
639 source: repository object or URL
640 source: repository object or URL
640
641
641 dest: URL of destination repository to create (defaults to base
642 dest: URL of destination repository to create (defaults to base
642 name of source repository)
643 name of source repository)
643
644
644 pull: always pull from source repository, even in local case or if the
645 pull: always pull from source repository, even in local case or if the
645 server prefers streaming
646 server prefers streaming
646
647
647 stream: stream raw data uncompressed from repository (fast over
648 stream: stream raw data uncompressed from repository (fast over
648 LAN, slow over WAN)
649 LAN, slow over WAN)
649
650
650 revs: revision to clone up to (implies pull=True)
651 revs: revision to clone up to (implies pull=True)
651
652
652 update: update working directory after clone completes, if
653 update: update working directory after clone completes, if
653 destination is local repository (True means update to default rev,
654 destination is local repository (True means update to default rev,
654 anything else is treated as a revision)
655 anything else is treated as a revision)
655
656
656 branch: branches to clone
657 branch: branches to clone
657
658
658 shareopts: dict of options to control auto sharing behavior. The "pool" key
659 shareopts: dict of options to control auto sharing behavior. The "pool" key
659 activates auto sharing mode and defines the directory for stores. The
660 activates auto sharing mode and defines the directory for stores. The
660 "mode" key determines how to construct the directory name of the shared
661 "mode" key determines how to construct the directory name of the shared
661 repository. "identity" means the name is derived from the node of the first
662 repository. "identity" means the name is derived from the node of the first
662 changeset in the repository. "remote" means the name is derived from the
663 changeset in the repository. "remote" means the name is derived from the
663 remote's path/URL. Defaults to "identity."
664 remote's path/URL. Defaults to "identity."
664
665
665 storeincludepats and storeexcludepats: sets of file patterns to include and
666 storeincludepats and storeexcludepats: sets of file patterns to include and
666 exclude in the repository copy, respectively. If not defined, all files
667 exclude in the repository copy, respectively. If not defined, all files
667 will be included (a "full" clone). Otherwise a "narrow" clone containing
668 will be included (a "full" clone). Otherwise a "narrow" clone containing
668 only the requested files will be performed. If ``storeincludepats`` is not
669 only the requested files will be performed. If ``storeincludepats`` is not
669 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
670 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
670 ``path:.``. If both are empty sets, no files will be cloned.
671 ``path:.``. If both are empty sets, no files will be cloned.
671 """
672 """
672
673
673 if isinstance(source, bytes):
674 if isinstance(source, bytes):
674 src = urlutil.get_clone_path(ui, source, branch)
675 src = urlutil.get_clone_path(ui, source, branch)
675 origsource, source, branches = src
676 origsource, source, branches = src
676 srcpeer = peer(ui, peeropts, source)
677 srcpeer = peer(ui, peeropts, source)
677 else:
678 else:
678 srcpeer = source.peer() # in case we were called with a localrepo
679 srcpeer = source.peer() # in case we were called with a localrepo
679 branches = (None, branch or [])
680 branches = (None, branch or [])
680 origsource = source = srcpeer.url()
681 origsource = source = srcpeer.url()
681 srclock = destlock = destwlock = cleandir = None
682 srclock = destlock = destwlock = cleandir = None
682 destpeer = None
683 destpeer = None
683 try:
684 try:
684 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
685 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
685
686
686 if dest is None:
687 if dest is None:
687 dest = defaultdest(source)
688 dest = defaultdest(source)
688 if dest:
689 if dest:
689 ui.status(_(b"destination directory: %s\n") % dest)
690 ui.status(_(b"destination directory: %s\n") % dest)
690 else:
691 else:
691 dest = urlutil.get_clone_path(ui, dest)[0]
692 dest = urlutil.get_clone_path(ui, dest)[0]
692
693
693 dest = urlutil.urllocalpath(dest)
694 dest = urlutil.urllocalpath(dest)
694 source = urlutil.urllocalpath(source)
695 source = urlutil.urllocalpath(source)
695
696
696 if not dest:
697 if not dest:
697 raise error.InputError(_(b"empty destination path is not valid"))
698 raise error.InputError(_(b"empty destination path is not valid"))
698
699
699 destvfs = vfsmod.vfs(dest, expandpath=True)
700 destvfs = vfsmod.vfs(dest, expandpath=True)
700 if destvfs.lexists():
701 if destvfs.lexists():
701 if not destvfs.isdir():
702 if not destvfs.isdir():
702 raise error.InputError(
703 raise error.InputError(
703 _(b"destination '%s' already exists") % dest
704 _(b"destination '%s' already exists") % dest
704 )
705 )
705 elif destvfs.listdir():
706 elif destvfs.listdir():
706 raise error.InputError(
707 raise error.InputError(
707 _(b"destination '%s' is not empty") % dest
708 _(b"destination '%s' is not empty") % dest
708 )
709 )
709
710
710 createopts = {}
711 createopts = {}
711 narrow = False
712 narrow = False
712
713
713 if storeincludepats is not None:
714 if storeincludepats is not None:
714 narrowspec.validatepatterns(storeincludepats)
715 narrowspec.validatepatterns(storeincludepats)
715 narrow = True
716 narrow = True
716
717
717 if storeexcludepats is not None:
718 if storeexcludepats is not None:
718 narrowspec.validatepatterns(storeexcludepats)
719 narrowspec.validatepatterns(storeexcludepats)
719 narrow = True
720 narrow = True
720
721
721 if narrow:
722 if narrow:
722 # Include everything by default if only exclusion patterns defined.
723 # Include everything by default if only exclusion patterns defined.
723 if storeexcludepats and not storeincludepats:
724 if storeexcludepats and not storeincludepats:
724 storeincludepats = {b'path:.'}
725 storeincludepats = {b'path:.'}
725
726
726 createopts[b'narrowfiles'] = True
727 createopts[b'narrowfiles'] = True
727
728
728 if depth:
729 if depth:
729 createopts[b'shallowfilestore'] = True
730 createopts[b'shallowfilestore'] = True
730
731
731 if srcpeer.capable(b'lfs-serve'):
732 if srcpeer.capable(b'lfs-serve'):
732 # Repository creation honors the config if it disabled the extension, so
733 # Repository creation honors the config if it disabled the extension, so
733 # we can't just announce that lfs will be enabled. This check avoids
734 # we can't just announce that lfs will be enabled. This check avoids
734 # saying that lfs will be enabled, and then saying it's an unknown
735 # saying that lfs will be enabled, and then saying it's an unknown
735 # feature. The lfs creation option is set in either case so that a
736 # feature. The lfs creation option is set in either case so that a
736 # requirement is added. If the extension is explicitly disabled but the
737 # requirement is added. If the extension is explicitly disabled but the
737 # requirement is set, the clone aborts early, before transferring any
738 # requirement is set, the clone aborts early, before transferring any
738 # data.
739 # data.
739 createopts[b'lfs'] = True
740 createopts[b'lfs'] = True
740
741
741 if extensions.disabled_help(b'lfs'):
742 if extensions.disabled_help(b'lfs'):
742 ui.status(
743 ui.status(
743 _(
744 _(
744 b'(remote is using large file support (lfs), but it is '
745 b'(remote is using large file support (lfs), but it is '
745 b'explicitly disabled in the local configuration)\n'
746 b'explicitly disabled in the local configuration)\n'
746 )
747 )
747 )
748 )
748 else:
749 else:
749 ui.status(
750 ui.status(
750 _(
751 _(
751 b'(remote is using large file support (lfs); lfs will '
752 b'(remote is using large file support (lfs); lfs will '
752 b'be enabled for this repository)\n'
753 b'be enabled for this repository)\n'
753 )
754 )
754 )
755 )
755
756
756 shareopts = shareopts or {}
757 shareopts = shareopts or {}
757 sharepool = shareopts.get(b'pool')
758 sharepool = shareopts.get(b'pool')
758 sharenamemode = shareopts.get(b'mode')
759 sharenamemode = shareopts.get(b'mode')
759 if sharepool and islocal(dest):
760 if sharepool and islocal(dest):
760 sharepath = None
761 sharepath = None
761 if sharenamemode == b'identity':
762 if sharenamemode == b'identity':
762 # Resolve the name from the initial changeset in the remote
763 # Resolve the name from the initial changeset in the remote
763 # repository. This returns nullid when the remote is empty. It
764 # repository. This returns nullid when the remote is empty. It
764 # raises RepoLookupError if revision 0 is filtered or otherwise
765 # raises RepoLookupError if revision 0 is filtered or otherwise
765 # not available. If we fail to resolve, sharing is not enabled.
766 # not available. If we fail to resolve, sharing is not enabled.
766 try:
767 try:
767 with srcpeer.commandexecutor() as e:
768 with srcpeer.commandexecutor() as e:
768 rootnode = e.callcommand(
769 rootnode = e.callcommand(
769 b'lookup',
770 b'lookup',
770 {
771 {
771 b'key': b'0',
772 b'key': b'0',
772 },
773 },
773 ).result()
774 ).result()
774
775
775 if rootnode != sha1nodeconstants.nullid:
776 if rootnode != sha1nodeconstants.nullid:
776 sharepath = os.path.join(sharepool, hex(rootnode))
777 sharepath = os.path.join(sharepool, hex(rootnode))
777 else:
778 else:
778 ui.status(
779 ui.status(
779 _(
780 _(
780 b'(not using pooled storage: '
781 b'(not using pooled storage: '
781 b'remote appears to be empty)\n'
782 b'remote appears to be empty)\n'
782 )
783 )
783 )
784 )
784 except error.RepoLookupError:
785 except error.RepoLookupError:
785 ui.status(
786 ui.status(
786 _(
787 _(
787 b'(not using pooled storage: '
788 b'(not using pooled storage: '
788 b'unable to resolve identity of remote)\n'
789 b'unable to resolve identity of remote)\n'
789 )
790 )
790 )
791 )
791 elif sharenamemode == b'remote':
792 elif sharenamemode == b'remote':
792 sharepath = os.path.join(
793 sharepath = os.path.join(
793 sharepool, hex(hashutil.sha1(source).digest())
794 sharepool, hex(hashutil.sha1(source).digest())
794 )
795 )
795 else:
796 else:
796 raise error.Abort(
797 raise error.Abort(
797 _(b'unknown share naming mode: %s') % sharenamemode
798 _(b'unknown share naming mode: %s') % sharenamemode
798 )
799 )
799
800
800 # TODO this is a somewhat arbitrary restriction.
801 # TODO this is a somewhat arbitrary restriction.
801 if narrow:
802 if narrow:
802 ui.status(
803 ui.status(
803 _(b'(pooled storage not supported for narrow clones)\n')
804 _(b'(pooled storage not supported for narrow clones)\n')
804 )
805 )
805 sharepath = None
806 sharepath = None
806
807
807 if sharepath:
808 if sharepath:
808 return clonewithshare(
809 return clonewithshare(
809 ui,
810 ui,
810 peeropts,
811 peeropts,
811 sharepath,
812 sharepath,
812 source,
813 source,
813 srcpeer,
814 srcpeer,
814 dest,
815 dest,
815 pull=pull,
816 pull=pull,
816 rev=revs,
817 rev=revs,
817 update=update,
818 update=update,
818 stream=stream,
819 stream=stream,
819 )
820 )
820
821
821 srcrepo = srcpeer.local()
822 srcrepo = srcpeer.local()
822
823
823 abspath = origsource
824 abspath = origsource
824 if islocal(origsource):
825 if islocal(origsource):
825 abspath = util.abspath(urlutil.urllocalpath(origsource))
826 abspath = util.abspath(urlutil.urllocalpath(origsource))
826
827
827 if islocal(dest):
828 if islocal(dest):
828 if os.path.exists(dest):
829 if os.path.exists(dest):
829 # only clean up directories we create ourselves
830 # only clean up directories we create ourselves
830 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
831 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
831 cleandir = hgdir
832 cleandir = hgdir
832 else:
833 else:
833 cleandir = dest
834 cleandir = dest
834
835
835 copy = False
836 copy = False
836 if (
837 if (
837 srcrepo
838 srcrepo
838 and srcrepo.cancopy()
839 and srcrepo.cancopy()
839 and islocal(dest)
840 and islocal(dest)
840 and not phases.hassecret(srcrepo)
841 and not phases.hassecret(srcrepo)
841 ):
842 ):
842 copy = not pull and not revs
843 copy = not pull and not revs
843
844
844 # TODO this is a somewhat arbitrary restriction.
845 # TODO this is a somewhat arbitrary restriction.
845 if narrow:
846 if narrow:
846 copy = False
847 copy = False
847
848
848 if copy:
849 if copy:
849 try:
850 try:
850 # we use a lock here because if we race with commit, we
851 # we use a lock here because if we race with commit, we
851 # can end up with extra data in the cloned revlogs that's
852 # can end up with extra data in the cloned revlogs that's
852 # not pointed to by changesets, thus causing verify to
853 # not pointed to by changesets, thus causing verify to
853 # fail
854 # fail
854 srclock = srcrepo.lock(wait=False)
855 srclock = srcrepo.lock(wait=False)
855 except error.LockError:
856 except error.LockError:
856 copy = False
857 copy = False
857
858
858 if copy:
859 if copy:
859 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
860 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
860
861
861 destrootpath = urlutil.urllocalpath(dest)
862 destrootpath = urlutil.urllocalpath(dest)
862 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
863 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
863 localrepo.createrepository(
864 localrepo.createrepository(
864 ui,
865 ui,
865 destrootpath,
866 destrootpath,
866 requirements=dest_reqs,
867 requirements=dest_reqs,
867 )
868 )
868 destrepo = localrepo.makelocalrepository(ui, destrootpath)
869 destrepo = localrepo.makelocalrepository(ui, destrootpath)
869
870
870 destwlock = destrepo.wlock()
871 destwlock = destrepo.wlock()
871 destlock = destrepo.lock()
872 destlock = destrepo.lock()
872 from . import streamclone # avoid cycle
873 from . import streamclone # avoid cycle
873
874
874 streamclone.local_copy(srcrepo, destrepo)
875 streamclone.local_copy(srcrepo, destrepo)
875
876
876 # we need to re-init the repo after manually copying the data
877 # we need to re-init the repo after manually copying the data
877 # into it
878 # into it
878 destpeer = peer(srcrepo, peeropts, dest)
879 destpeer = peer(srcrepo, peeropts, dest)
879
880
880 # make the peer aware that is it already locked
881 # make the peer aware that is it already locked
881 #
882 #
882 # important:
883 # important:
883 #
884 #
884 # We still need to release that lock at the end of the function
885 # We still need to release that lock at the end of the function
885 destpeer.local()._lockref = weakref.ref(destlock)
886 destpeer.local()._lockref = weakref.ref(destlock)
886 destpeer.local()._wlockref = weakref.ref(destwlock)
887 destpeer.local()._wlockref = weakref.ref(destwlock)
887 # dirstate also needs to be copied because `_wlockref` has a reference
888 # dirstate also needs to be copied because `_wlockref` has a reference
888 # to it: this dirstate is saved to disk when the wlock is released
889 # to it: this dirstate is saved to disk when the wlock is released
889 destpeer.local().dirstate = destrepo.dirstate
890 destpeer.local().dirstate = destrepo.dirstate
890
891
891 srcrepo.hook(
892 srcrepo.hook(
892 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
893 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
893 )
894 )
894 else:
895 else:
895 try:
896 try:
896 # only pass ui when no srcrepo
897 # only pass ui when no srcrepo
897 destpeer = peer(
898 destpeer = peer(
898 srcrepo or ui,
899 srcrepo or ui,
899 peeropts,
900 peeropts,
900 dest,
901 dest,
901 create=True,
902 create=True,
902 createopts=createopts,
903 createopts=createopts,
903 )
904 )
904 except OSError as inst:
905 except OSError as inst:
905 if inst.errno == errno.EEXIST:
906 if inst.errno == errno.EEXIST:
906 cleandir = None
907 cleandir = None
907 raise error.Abort(
908 raise error.Abort(
908 _(b"destination '%s' already exists") % dest
909 _(b"destination '%s' already exists") % dest
909 )
910 )
910 raise
911 raise
911
912
912 if revs:
913 if revs:
913 if not srcpeer.capable(b'lookup'):
914 if not srcpeer.capable(b'lookup'):
914 raise error.Abort(
915 raise error.Abort(
915 _(
916 _(
916 b"src repository does not support "
917 b"src repository does not support "
917 b"revision lookup and so doesn't "
918 b"revision lookup and so doesn't "
918 b"support clone by revision"
919 b"support clone by revision"
919 )
920 )
920 )
921 )
921
922
922 # TODO this is batchable.
923 # TODO this is batchable.
923 remoterevs = []
924 remoterevs = []
924 for rev in revs:
925 for rev in revs:
925 with srcpeer.commandexecutor() as e:
926 with srcpeer.commandexecutor() as e:
926 remoterevs.append(
927 remoterevs.append(
927 e.callcommand(
928 e.callcommand(
928 b'lookup',
929 b'lookup',
929 {
930 {
930 b'key': rev,
931 b'key': rev,
931 },
932 },
932 ).result()
933 ).result()
933 )
934 )
934 revs = remoterevs
935 revs = remoterevs
935
936
936 checkout = revs[0]
937 checkout = revs[0]
937 else:
938 else:
938 revs = None
939 revs = None
939 local = destpeer.local()
940 local = destpeer.local()
940 if local:
941 if local:
941 if narrow:
942 if narrow:
942 with local.wlock(), local.lock():
943 with local.wlock(), local.lock():
943 local.setnarrowpats(storeincludepats, storeexcludepats)
944 local.setnarrowpats(storeincludepats, storeexcludepats)
944 narrowspec.copytoworkingcopy(local)
945 narrowspec.copytoworkingcopy(local)
945
946
946 u = urlutil.url(abspath)
947 u = urlutil.url(abspath)
947 defaulturl = bytes(u)
948 defaulturl = bytes(u)
948 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
949 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
949 if not stream:
950 if not stream:
950 if pull:
951 if pull:
951 stream = False
952 stream = False
952 else:
953 else:
953 stream = None
954 stream = None
954 # internal config: ui.quietbookmarkmove
955 # internal config: ui.quietbookmarkmove
955 overrides = {(b'ui', b'quietbookmarkmove'): True}
956 overrides = {(b'ui', b'quietbookmarkmove'): True}
956 with local.ui.configoverride(overrides, b'clone'):
957 with local.ui.configoverride(overrides, b'clone'):
957 exchange.pull(
958 exchange.pull(
958 local,
959 local,
959 srcpeer,
960 srcpeer,
960 heads=revs,
961 heads=revs,
961 streamclonerequested=stream,
962 streamclonerequested=stream,
962 includepats=storeincludepats,
963 includepats=storeincludepats,
963 excludepats=storeexcludepats,
964 excludepats=storeexcludepats,
964 depth=depth,
965 depth=depth,
965 )
966 )
966 elif srcrepo:
967 elif srcrepo:
967 # TODO lift restriction once exchange.push() accepts narrow
968 # TODO lift restriction once exchange.push() accepts narrow
968 # push.
969 # push.
969 if narrow:
970 if narrow:
970 raise error.Abort(
971 raise error.Abort(
971 _(
972 _(
972 b'narrow clone not available for '
973 b'narrow clone not available for '
973 b'remote destinations'
974 b'remote destinations'
974 )
975 )
975 )
976 )
976
977
977 exchange.push(
978 exchange.push(
978 srcrepo,
979 srcrepo,
979 destpeer,
980 destpeer,
980 revs=revs,
981 revs=revs,
981 bookmarks=srcrepo._bookmarks.keys(),
982 bookmarks=srcrepo._bookmarks.keys(),
982 )
983 )
983 else:
984 else:
984 raise error.Abort(
985 raise error.Abort(
985 _(b"clone from remote to remote not supported")
986 _(b"clone from remote to remote not supported")
986 )
987 )
987
988
988 cleandir = None
989 cleandir = None
989
990
990 destrepo = destpeer.local()
991 destrepo = destpeer.local()
991 if destrepo:
992 if destrepo:
992 template = uimod.samplehgrcs[b'cloned']
993 template = uimod.samplehgrcs[b'cloned']
993 u = urlutil.url(abspath)
994 u = urlutil.url(abspath)
994 u.passwd = None
995 u.passwd = None
995 defaulturl = bytes(u)
996 defaulturl = bytes(u)
996 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
997 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
997 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
998 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
998
999
999 if ui.configbool(b'experimental', b'remotenames'):
1000 if ui.configbool(b'experimental', b'remotenames'):
1000 logexchange.pullremotenames(destrepo, srcpeer)
1001 logexchange.pullremotenames(destrepo, srcpeer)
1001
1002
1002 if update:
1003 if update:
1003 if update is not True:
1004 if update is not True:
1004 with srcpeer.commandexecutor() as e:
1005 with srcpeer.commandexecutor() as e:
1005 checkout = e.callcommand(
1006 checkout = e.callcommand(
1006 b'lookup',
1007 b'lookup',
1007 {
1008 {
1008 b'key': update,
1009 b'key': update,
1009 },
1010 },
1010 ).result()
1011 ).result()
1011
1012
1012 uprev = None
1013 uprev = None
1013 status = None
1014 status = None
1014 if checkout is not None:
1015 if checkout is not None:
1015 # Some extensions (at least hg-git and hg-subversion) have
1016 # Some extensions (at least hg-git and hg-subversion) have
1016 # a peer.lookup() implementation that returns a name instead
1017 # a peer.lookup() implementation that returns a name instead
1017 # of a nodeid. We work around it here until we've figured
1018 # of a nodeid. We work around it here until we've figured
1018 # out a better solution.
1019 # out a better solution.
1019 if len(checkout) == 20 and checkout in destrepo:
1020 if len(checkout) == 20 and checkout in destrepo:
1020 uprev = checkout
1021 uprev = checkout
1021 elif scmutil.isrevsymbol(destrepo, checkout):
1022 elif scmutil.isrevsymbol(destrepo, checkout):
1022 uprev = scmutil.revsymbol(destrepo, checkout).node()
1023 uprev = scmutil.revsymbol(destrepo, checkout).node()
1023 else:
1024 else:
1024 if update is not True:
1025 if update is not True:
1025 try:
1026 try:
1026 uprev = destrepo.lookup(update)
1027 uprev = destrepo.lookup(update)
1027 except error.RepoLookupError:
1028 except error.RepoLookupError:
1028 pass
1029 pass
1029 if uprev is None:
1030 if uprev is None:
1030 try:
1031 try:
1031 if destrepo._activebookmark:
1032 if destrepo._activebookmark:
1032 uprev = destrepo.lookup(destrepo._activebookmark)
1033 uprev = destrepo.lookup(destrepo._activebookmark)
1033 update = destrepo._activebookmark
1034 update = destrepo._activebookmark
1034 else:
1035 else:
1035 uprev = destrepo._bookmarks[b'@']
1036 uprev = destrepo._bookmarks[b'@']
1036 update = b'@'
1037 update = b'@'
1037 bn = destrepo[uprev].branch()
1038 bn = destrepo[uprev].branch()
1038 if bn == b'default':
1039 if bn == b'default':
1039 status = _(b"updating to bookmark %s\n" % update)
1040 status = _(b"updating to bookmark %s\n" % update)
1040 else:
1041 else:
1041 status = (
1042 status = (
1042 _(b"updating to bookmark %s on branch %s\n")
1043 _(b"updating to bookmark %s on branch %s\n")
1043 ) % (update, bn)
1044 ) % (update, bn)
1044 except KeyError:
1045 except KeyError:
1045 try:
1046 try:
1046 uprev = destrepo.branchtip(b'default')
1047 uprev = destrepo.branchtip(b'default')
1047 except error.RepoLookupError:
1048 except error.RepoLookupError:
1048 uprev = destrepo.lookup(b'tip')
1049 uprev = destrepo.lookup(b'tip')
1049 if not status:
1050 if not status:
1050 bn = destrepo[uprev].branch()
1051 bn = destrepo[uprev].branch()
1051 status = _(b"updating to branch %s\n") % bn
1052 status = _(b"updating to branch %s\n") % bn
1052 destrepo.ui.status(status)
1053 destrepo.ui.status(status)
1053 _update(destrepo, uprev)
1054 _update(destrepo, uprev)
1054 if update in destrepo._bookmarks:
1055 if update in destrepo._bookmarks:
1055 bookmarks.activate(destrepo, update)
1056 bookmarks.activate(destrepo, update)
1056 if destlock is not None:
1057 if destlock is not None:
1057 release(destlock)
1058 release(destlock)
1058 if destwlock is not None:
1059 if destwlock is not None:
1059 release(destlock)
1060 release(destlock)
1060 # here is a tiny windows were someone could end up writing the
1061 # here is a tiny windows were someone could end up writing the
1061 # repository before the cache are sure to be warm. This is "fine"
1062 # repository before the cache are sure to be warm. This is "fine"
1062 # as the only "bad" outcome would be some slowness. That potential
1063 # as the only "bad" outcome would be some slowness. That potential
1063 # slowness already affect reader.
1064 # slowness already affect reader.
1064 with destrepo.lock():
1065 with destrepo.lock():
1065 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1066 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1066 finally:
1067 finally:
1067 release(srclock, destlock, destwlock)
1068 release(srclock, destlock, destwlock)
1068 if cleandir is not None:
1069 if cleandir is not None:
1069 shutil.rmtree(cleandir, True)
1070 shutil.rmtree(cleandir, True)
1070 if srcpeer is not None:
1071 if srcpeer is not None:
1071 srcpeer.close()
1072 srcpeer.close()
1072 if destpeer and destpeer.local() is None:
1073 if destpeer and destpeer.local() is None:
1073 destpeer.close()
1074 destpeer.close()
1074 return srcpeer, destpeer
1075 return srcpeer, destpeer
1075
1076
1076
1077
1077 def _showstats(repo, stats, quietempty=False):
1078 def _showstats(repo, stats, quietempty=False):
1078 if quietempty and stats.isempty():
1079 if quietempty and stats.isempty():
1079 return
1080 return
1080 repo.ui.status(
1081 repo.ui.status(
1081 _(
1082 _(
1082 b"%d files updated, %d files merged, "
1083 b"%d files updated, %d files merged, "
1083 b"%d files removed, %d files unresolved\n"
1084 b"%d files removed, %d files unresolved\n"
1084 )
1085 )
1085 % (
1086 % (
1086 stats.updatedcount,
1087 stats.updatedcount,
1087 stats.mergedcount,
1088 stats.mergedcount,
1088 stats.removedcount,
1089 stats.removedcount,
1089 stats.unresolvedcount,
1090 stats.unresolvedcount,
1090 )
1091 )
1091 )
1092 )
1092
1093
1093
1094
1094 def updaterepo(repo, node, overwrite, updatecheck=None):
1095 def updaterepo(repo, node, overwrite, updatecheck=None):
1095 """Update the working directory to node.
1096 """Update the working directory to node.
1096
1097
1097 When overwrite is set, changes are clobbered, merged else
1098 When overwrite is set, changes are clobbered, merged else
1098
1099
1099 returns stats (see pydoc mercurial.merge.applyupdates)"""
1100 returns stats (see pydoc mercurial.merge.applyupdates)"""
1100 repo.ui.deprecwarn(
1101 repo.ui.deprecwarn(
1101 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1102 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1102 b'5.7',
1103 b'5.7',
1103 )
1104 )
1104 return mergemod._update(
1105 return mergemod._update(
1105 repo,
1106 repo,
1106 node,
1107 node,
1107 branchmerge=False,
1108 branchmerge=False,
1108 force=overwrite,
1109 force=overwrite,
1109 labels=[b'working copy', b'destination'],
1110 labels=[b'working copy', b'destination'],
1110 updatecheck=updatecheck,
1111 updatecheck=updatecheck,
1111 )
1112 )
1112
1113
1113
1114
1114 def update(repo, node, quietempty=False, updatecheck=None):
1115 def update(repo, node, quietempty=False, updatecheck=None):
1115 """update the working directory to node"""
1116 """update the working directory to node"""
1116 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1117 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1117 _showstats(repo, stats, quietempty)
1118 _showstats(repo, stats, quietempty)
1118 if stats.unresolvedcount:
1119 if stats.unresolvedcount:
1119 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1120 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1120 return stats.unresolvedcount > 0
1121 return stats.unresolvedcount > 0
1121
1122
1122
1123
1123 # naming conflict in clone()
1124 # naming conflict in clone()
1124 _update = update
1125 _update = update
1125
1126
1126
1127
1127 def clean(repo, node, show_stats=True, quietempty=False):
1128 def clean(repo, node, show_stats=True, quietempty=False):
1128 """forcibly switch the working directory to node, clobbering changes"""
1129 """forcibly switch the working directory to node, clobbering changes"""
1129 stats = mergemod.clean_update(repo[node])
1130 stats = mergemod.clean_update(repo[node])
1130 assert stats.unresolvedcount == 0
1131 assert stats.unresolvedcount == 0
1131 if show_stats:
1132 if show_stats:
1132 _showstats(repo, stats, quietempty)
1133 _showstats(repo, stats, quietempty)
1133 return False
1134 return False
1134
1135
1135
1136
1136 # naming conflict in updatetotally()
1137 # naming conflict in updatetotally()
1137 _clean = clean
1138 _clean = clean
1138
1139
1139 _VALID_UPDATECHECKS = {
1140 _VALID_UPDATECHECKS = {
1140 mergemod.UPDATECHECK_ABORT,
1141 mergemod.UPDATECHECK_ABORT,
1141 mergemod.UPDATECHECK_NONE,
1142 mergemod.UPDATECHECK_NONE,
1142 mergemod.UPDATECHECK_LINEAR,
1143 mergemod.UPDATECHECK_LINEAR,
1143 mergemod.UPDATECHECK_NO_CONFLICT,
1144 mergemod.UPDATECHECK_NO_CONFLICT,
1144 }
1145 }
1145
1146
1146
1147
1147 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1148 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1148 """Update the working directory with extra care for non-file components
1149 """Update the working directory with extra care for non-file components
1149
1150
1150 This takes care of non-file components below:
1151 This takes care of non-file components below:
1151
1152
1152 :bookmark: might be advanced or (in)activated
1153 :bookmark: might be advanced or (in)activated
1153
1154
1154 This takes arguments below:
1155 This takes arguments below:
1155
1156
1156 :checkout: to which revision the working directory is updated
1157 :checkout: to which revision the working directory is updated
1157 :brev: a name, which might be a bookmark to be activated after updating
1158 :brev: a name, which might be a bookmark to be activated after updating
1158 :clean: whether changes in the working directory can be discarded
1159 :clean: whether changes in the working directory can be discarded
1159 :updatecheck: how to deal with a dirty working directory
1160 :updatecheck: how to deal with a dirty working directory
1160
1161
1161 Valid values for updatecheck are the UPDATECHECK_* constants
1162 Valid values for updatecheck are the UPDATECHECK_* constants
1162 defined in the merge module. Passing `None` will result in using the
1163 defined in the merge module. Passing `None` will result in using the
1163 configured default.
1164 configured default.
1164
1165
1165 * ABORT: abort if the working directory is dirty
1166 * ABORT: abort if the working directory is dirty
1166 * NONE: don't check (merge working directory changes into destination)
1167 * NONE: don't check (merge working directory changes into destination)
1167 * LINEAR: check that update is linear before merging working directory
1168 * LINEAR: check that update is linear before merging working directory
1168 changes into destination
1169 changes into destination
1169 * NO_CONFLICT: check that the update does not result in file merges
1170 * NO_CONFLICT: check that the update does not result in file merges
1170
1171
1171 This returns whether conflict is detected at updating or not.
1172 This returns whether conflict is detected at updating or not.
1172 """
1173 """
1173 if updatecheck is None:
1174 if updatecheck is None:
1174 updatecheck = ui.config(b'commands', b'update.check')
1175 updatecheck = ui.config(b'commands', b'update.check')
1175 if updatecheck not in _VALID_UPDATECHECKS:
1176 if updatecheck not in _VALID_UPDATECHECKS:
1176 # If not configured, or invalid value configured
1177 # If not configured, or invalid value configured
1177 updatecheck = mergemod.UPDATECHECK_LINEAR
1178 updatecheck = mergemod.UPDATECHECK_LINEAR
1178 if updatecheck not in _VALID_UPDATECHECKS:
1179 if updatecheck not in _VALID_UPDATECHECKS:
1179 raise ValueError(
1180 raise ValueError(
1180 r'Invalid updatecheck value %r (can accept %r)'
1181 r'Invalid updatecheck value %r (can accept %r)'
1181 % (updatecheck, _VALID_UPDATECHECKS)
1182 % (updatecheck, _VALID_UPDATECHECKS)
1182 )
1183 )
1183 with repo.wlock():
1184 with repo.wlock():
1184 movemarkfrom = None
1185 movemarkfrom = None
1185 warndest = False
1186 warndest = False
1186 if checkout is None:
1187 if checkout is None:
1187 updata = destutil.destupdate(repo, clean=clean)
1188 updata = destutil.destupdate(repo, clean=clean)
1188 checkout, movemarkfrom, brev = updata
1189 checkout, movemarkfrom, brev = updata
1189 warndest = True
1190 warndest = True
1190
1191
1191 if clean:
1192 if clean:
1192 ret = _clean(repo, checkout)
1193 ret = _clean(repo, checkout)
1193 else:
1194 else:
1194 if updatecheck == mergemod.UPDATECHECK_ABORT:
1195 if updatecheck == mergemod.UPDATECHECK_ABORT:
1195 cmdutil.bailifchanged(repo, merge=False)
1196 cmdutil.bailifchanged(repo, merge=False)
1196 updatecheck = mergemod.UPDATECHECK_NONE
1197 updatecheck = mergemod.UPDATECHECK_NONE
1197 ret = _update(repo, checkout, updatecheck=updatecheck)
1198 ret = _update(repo, checkout, updatecheck=updatecheck)
1198
1199
1199 if not ret and movemarkfrom:
1200 if not ret and movemarkfrom:
1200 if movemarkfrom == repo[b'.'].node():
1201 if movemarkfrom == repo[b'.'].node():
1201 pass # no-op update
1202 pass # no-op update
1202 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1203 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1203 b = ui.label(repo._activebookmark, b'bookmarks.active')
1204 b = ui.label(repo._activebookmark, b'bookmarks.active')
1204 ui.status(_(b"updating bookmark %s\n") % b)
1205 ui.status(_(b"updating bookmark %s\n") % b)
1205 else:
1206 else:
1206 # this can happen with a non-linear update
1207 # this can happen with a non-linear update
1207 b = ui.label(repo._activebookmark, b'bookmarks')
1208 b = ui.label(repo._activebookmark, b'bookmarks')
1208 ui.status(_(b"(leaving bookmark %s)\n") % b)
1209 ui.status(_(b"(leaving bookmark %s)\n") % b)
1209 bookmarks.deactivate(repo)
1210 bookmarks.deactivate(repo)
1210 elif brev in repo._bookmarks:
1211 elif brev in repo._bookmarks:
1211 if brev != repo._activebookmark:
1212 if brev != repo._activebookmark:
1212 b = ui.label(brev, b'bookmarks.active')
1213 b = ui.label(brev, b'bookmarks.active')
1213 ui.status(_(b"(activating bookmark %s)\n") % b)
1214 ui.status(_(b"(activating bookmark %s)\n") % b)
1214 bookmarks.activate(repo, brev)
1215 bookmarks.activate(repo, brev)
1215 elif brev:
1216 elif brev:
1216 if repo._activebookmark:
1217 if repo._activebookmark:
1217 b = ui.label(repo._activebookmark, b'bookmarks')
1218 b = ui.label(repo._activebookmark, b'bookmarks')
1218 ui.status(_(b"(leaving bookmark %s)\n") % b)
1219 ui.status(_(b"(leaving bookmark %s)\n") % b)
1219 bookmarks.deactivate(repo)
1220 bookmarks.deactivate(repo)
1220
1221
1221 if warndest:
1222 if warndest:
1222 destutil.statusotherdests(ui, repo)
1223 destutil.statusotherdests(ui, repo)
1223
1224
1224 return ret
1225 return ret
1225
1226
1226
1227
1227 def merge(
1228 def merge(
1228 ctx,
1229 ctx,
1229 force=False,
1230 force=False,
1230 remind=True,
1231 remind=True,
1231 labels=None,
1232 labels=None,
1232 ):
1233 ):
1233 """Branch merge with node, resolving changes. Return true if any
1234 """Branch merge with node, resolving changes. Return true if any
1234 unresolved conflicts."""
1235 unresolved conflicts."""
1235 repo = ctx.repo()
1236 repo = ctx.repo()
1236 stats = mergemod.merge(ctx, force=force, labels=labels)
1237 stats = mergemod.merge(ctx, force=force, labels=labels)
1237 _showstats(repo, stats)
1238 _showstats(repo, stats)
1238 if stats.unresolvedcount:
1239 if stats.unresolvedcount:
1239 repo.ui.status(
1240 repo.ui.status(
1240 _(
1241 _(
1241 b"use 'hg resolve' to retry unresolved file merges "
1242 b"use 'hg resolve' to retry unresolved file merges "
1242 b"or 'hg merge --abort' to abandon\n"
1243 b"or 'hg merge --abort' to abandon\n"
1243 )
1244 )
1244 )
1245 )
1245 elif remind:
1246 elif remind:
1246 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1247 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1247 return stats.unresolvedcount > 0
1248 return stats.unresolvedcount > 0
1248
1249
1249
1250
1250 def abortmerge(ui, repo):
1251 def abortmerge(ui, repo):
1251 ms = mergestatemod.mergestate.read(repo)
1252 ms = mergestatemod.mergestate.read(repo)
1252 if ms.active():
1253 if ms.active():
1253 # there were conflicts
1254 # there were conflicts
1254 node = ms.localctx.hex()
1255 node = ms.localctx.hex()
1255 else:
1256 else:
1256 # there were no conficts, mergestate was not stored
1257 # there were no conficts, mergestate was not stored
1257 node = repo[b'.'].hex()
1258 node = repo[b'.'].hex()
1258
1259
1259 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1260 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1260 stats = mergemod.clean_update(repo[node])
1261 stats = mergemod.clean_update(repo[node])
1261 assert stats.unresolvedcount == 0
1262 assert stats.unresolvedcount == 0
1262 _showstats(repo, stats)
1263 _showstats(repo, stats)
1263
1264
1264
1265
1265 def _incoming(
1266 def _incoming(
1266 displaychlist,
1267 displaychlist,
1267 subreporecurse,
1268 subreporecurse,
1268 ui,
1269 ui,
1269 repo,
1270 repo,
1270 source,
1271 source,
1271 opts,
1272 opts,
1272 buffered=False,
1273 buffered=False,
1273 subpath=None,
1274 subpath=None,
1274 ):
1275 ):
1275 """
1276 """
1276 Helper for incoming / gincoming.
1277 Helper for incoming / gincoming.
1277 displaychlist gets called with
1278 displaychlist gets called with
1278 (remoterepo, incomingchangesetlist, displayer) parameters,
1279 (remoterepo, incomingchangesetlist, displayer) parameters,
1279 and is supposed to contain only code that can't be unified.
1280 and is supposed to contain only code that can't be unified.
1280 """
1281 """
1281 srcs = urlutil.get_pull_paths(repo, ui, [source])
1282 srcs = urlutil.get_pull_paths(repo, ui, [source])
1282 srcs = list(srcs)
1283 srcs = list(srcs)
1283 if len(srcs) != 1:
1284 if len(srcs) != 1:
1284 msg = _(b'for now, incoming supports only a single source, %d provided')
1285 msg = _(b'for now, incoming supports only a single source, %d provided')
1285 msg %= len(srcs)
1286 msg %= len(srcs)
1286 raise error.Abort(msg)
1287 raise error.Abort(msg)
1287 path = srcs[0]
1288 path = srcs[0]
1288 source, branches = urlutil.parseurl(path.rawloc, opts.get(b'branch'))
1289 source, branches = urlutil.parseurl(path.rawloc, opts.get(b'branch'))
1289 if subpath is not None:
1290 if subpath is not None:
1290 subpath = urlutil.url(subpath)
1291 subpath = urlutil.url(subpath)
1291 if subpath.isabs():
1292 if subpath.isabs():
1292 source = bytes(subpath)
1293 source = bytes(subpath)
1293 else:
1294 else:
1294 p = urlutil.url(source)
1295 p = urlutil.url(source)
1295 p.path = os.path.normpath(b'%s/%s' % (p.path, subpath))
1296 if p.islocal():
1297 normpath = os.path.normpath
1298 else:
1299 normpath = posixpath.normpath
1300 p.path = normpath(b'%s/%s' % (p.path, subpath))
1296 source = bytes(p)
1301 source = bytes(p)
1297 other = peer(repo, opts, source)
1302 other = peer(repo, opts, source)
1298 cleanupfn = other.close
1303 cleanupfn = other.close
1299 try:
1304 try:
1300 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source))
1305 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source))
1301 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1306 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1302
1307
1303 if revs:
1308 if revs:
1304 revs = [other.lookup(rev) for rev in revs]
1309 revs = [other.lookup(rev) for rev in revs]
1305 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1310 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1306 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1311 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1307 )
1312 )
1308
1313
1309 if not chlist:
1314 if not chlist:
1310 ui.status(_(b"no changes found\n"))
1315 ui.status(_(b"no changes found\n"))
1311 return subreporecurse()
1316 return subreporecurse()
1312 ui.pager(b'incoming')
1317 ui.pager(b'incoming')
1313 displayer = logcmdutil.changesetdisplayer(
1318 displayer = logcmdutil.changesetdisplayer(
1314 ui, other, opts, buffered=buffered
1319 ui, other, opts, buffered=buffered
1315 )
1320 )
1316 displaychlist(other, chlist, displayer)
1321 displaychlist(other, chlist, displayer)
1317 displayer.close()
1322 displayer.close()
1318 finally:
1323 finally:
1319 cleanupfn()
1324 cleanupfn()
1320 subreporecurse()
1325 subreporecurse()
1321 return 0 # exit code is zero since we found incoming changes
1326 return 0 # exit code is zero since we found incoming changes
1322
1327
1323
1328
1324 def incoming(ui, repo, source, opts, subpath=None):
1329 def incoming(ui, repo, source, opts, subpath=None):
1325 def subreporecurse():
1330 def subreporecurse():
1326 ret = 1
1331 ret = 1
1327 if opts.get(b'subrepos'):
1332 if opts.get(b'subrepos'):
1328 ctx = repo[None]
1333 ctx = repo[None]
1329 for subpath in sorted(ctx.substate):
1334 for subpath in sorted(ctx.substate):
1330 sub = ctx.sub(subpath)
1335 sub = ctx.sub(subpath)
1331 ret = min(ret, sub.incoming(ui, source, opts))
1336 ret = min(ret, sub.incoming(ui, source, opts))
1332 return ret
1337 return ret
1333
1338
1334 def display(other, chlist, displayer):
1339 def display(other, chlist, displayer):
1335 limit = logcmdutil.getlimit(opts)
1340 limit = logcmdutil.getlimit(opts)
1336 if opts.get(b'newest_first'):
1341 if opts.get(b'newest_first'):
1337 chlist.reverse()
1342 chlist.reverse()
1338 count = 0
1343 count = 0
1339 for n in chlist:
1344 for n in chlist:
1340 if limit is not None and count >= limit:
1345 if limit is not None and count >= limit:
1341 break
1346 break
1342 parents = [
1347 parents = [
1343 p for p in other.changelog.parents(n) if p != repo.nullid
1348 p for p in other.changelog.parents(n) if p != repo.nullid
1344 ]
1349 ]
1345 if opts.get(b'no_merges') and len(parents) == 2:
1350 if opts.get(b'no_merges') and len(parents) == 2:
1346 continue
1351 continue
1347 count += 1
1352 count += 1
1348 displayer.show(other[n])
1353 displayer.show(other[n])
1349
1354
1350 return _incoming(
1355 return _incoming(
1351 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1356 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1352 )
1357 )
1353
1358
1354
1359
1355 def _outgoing(ui, repo, dests, opts, subpath=None):
1360 def _outgoing(ui, repo, dests, opts, subpath=None):
1356 out = set()
1361 out = set()
1357 others = []
1362 others = []
1358 for path in urlutil.get_push_paths(repo, ui, dests):
1363 for path in urlutil.get_push_paths(repo, ui, dests):
1359 dest = path.pushloc or path.loc
1364 dest = path.pushloc or path.loc
1360 if subpath is not None:
1365 if subpath is not None:
1361 subpath = urlutil.url(subpath)
1366 subpath = urlutil.url(subpath)
1362 if subpath.isabs():
1367 if subpath.isabs():
1363 dest = bytes(subpath)
1368 dest = bytes(subpath)
1364 else:
1369 else:
1365 p = urlutil.url(dest)
1370 p = urlutil.url(dest)
1366 p.path = os.path.normpath(b'%s/%s' % (p.path, subpath))
1371 if p.islocal():
1372 normpath = os.path.normpath
1373 else:
1374 normpath = posixpath.normpath
1375 p.path = normpath(b'%s/%s' % (p.path, subpath))
1367 dest = bytes(p)
1376 dest = bytes(p)
1368 branches = path.branch, opts.get(b'branch') or []
1377 branches = path.branch, opts.get(b'branch') or []
1369
1378
1370 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1379 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1371 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1380 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1372 if revs:
1381 if revs:
1373 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1382 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1374
1383
1375 other = peer(repo, opts, dest)
1384 other = peer(repo, opts, dest)
1376 try:
1385 try:
1377 outgoing = discovery.findcommonoutgoing(
1386 outgoing = discovery.findcommonoutgoing(
1378 repo, other, revs, force=opts.get(b'force')
1387 repo, other, revs, force=opts.get(b'force')
1379 )
1388 )
1380 o = outgoing.missing
1389 o = outgoing.missing
1381 out.update(o)
1390 out.update(o)
1382 if not o:
1391 if not o:
1383 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1392 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1384 others.append(other)
1393 others.append(other)
1385 except: # re-raises
1394 except: # re-raises
1386 other.close()
1395 other.close()
1387 raise
1396 raise
1388 # make sure this is ordered by revision number
1397 # make sure this is ordered by revision number
1389 outgoing_revs = list(out)
1398 outgoing_revs = list(out)
1390 cl = repo.changelog
1399 cl = repo.changelog
1391 outgoing_revs.sort(key=cl.rev)
1400 outgoing_revs.sort(key=cl.rev)
1392 return outgoing_revs, others
1401 return outgoing_revs, others
1393
1402
1394
1403
1395 def _outgoing_recurse(ui, repo, dests, opts):
1404 def _outgoing_recurse(ui, repo, dests, opts):
1396 ret = 1
1405 ret = 1
1397 if opts.get(b'subrepos'):
1406 if opts.get(b'subrepos'):
1398 ctx = repo[None]
1407 ctx = repo[None]
1399 for subpath in sorted(ctx.substate):
1408 for subpath in sorted(ctx.substate):
1400 sub = ctx.sub(subpath)
1409 sub = ctx.sub(subpath)
1401 ret = min(ret, sub.outgoing(ui, dests, opts))
1410 ret = min(ret, sub.outgoing(ui, dests, opts))
1402 return ret
1411 return ret
1403
1412
1404
1413
1405 def _outgoing_filter(repo, revs, opts):
1414 def _outgoing_filter(repo, revs, opts):
1406 """apply revision filtering/ordering option for outgoing"""
1415 """apply revision filtering/ordering option for outgoing"""
1407 limit = logcmdutil.getlimit(opts)
1416 limit = logcmdutil.getlimit(opts)
1408 no_merges = opts.get(b'no_merges')
1417 no_merges = opts.get(b'no_merges')
1409 if opts.get(b'newest_first'):
1418 if opts.get(b'newest_first'):
1410 revs.reverse()
1419 revs.reverse()
1411 if limit is None and not no_merges:
1420 if limit is None and not no_merges:
1412 for r in revs:
1421 for r in revs:
1413 yield r
1422 yield r
1414 return
1423 return
1415
1424
1416 count = 0
1425 count = 0
1417 cl = repo.changelog
1426 cl = repo.changelog
1418 for n in revs:
1427 for n in revs:
1419 if limit is not None and count >= limit:
1428 if limit is not None and count >= limit:
1420 break
1429 break
1421 parents = [p for p in cl.parents(n) if p != repo.nullid]
1430 parents = [p for p in cl.parents(n) if p != repo.nullid]
1422 if no_merges and len(parents) == 2:
1431 if no_merges and len(parents) == 2:
1423 continue
1432 continue
1424 count += 1
1433 count += 1
1425 yield n
1434 yield n
1426
1435
1427
1436
1428 def outgoing(ui, repo, dests, opts, subpath=None):
1437 def outgoing(ui, repo, dests, opts, subpath=None):
1429 if opts.get(b'graph'):
1438 if opts.get(b'graph'):
1430 logcmdutil.checkunsupportedgraphflags([], opts)
1439 logcmdutil.checkunsupportedgraphflags([], opts)
1431 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1440 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1432 ret = 1
1441 ret = 1
1433 try:
1442 try:
1434 if o:
1443 if o:
1435 ret = 0
1444 ret = 0
1436
1445
1437 if opts.get(b'graph'):
1446 if opts.get(b'graph'):
1438 revdag = logcmdutil.graphrevs(repo, o, opts)
1447 revdag = logcmdutil.graphrevs(repo, o, opts)
1439 ui.pager(b'outgoing')
1448 ui.pager(b'outgoing')
1440 displayer = logcmdutil.changesetdisplayer(
1449 displayer = logcmdutil.changesetdisplayer(
1441 ui, repo, opts, buffered=True
1450 ui, repo, opts, buffered=True
1442 )
1451 )
1443 logcmdutil.displaygraph(
1452 logcmdutil.displaygraph(
1444 ui, repo, revdag, displayer, graphmod.asciiedges
1453 ui, repo, revdag, displayer, graphmod.asciiedges
1445 )
1454 )
1446 else:
1455 else:
1447 ui.pager(b'outgoing')
1456 ui.pager(b'outgoing')
1448 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1457 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1449 for n in _outgoing_filter(repo, o, opts):
1458 for n in _outgoing_filter(repo, o, opts):
1450 displayer.show(repo[n])
1459 displayer.show(repo[n])
1451 displayer.close()
1460 displayer.close()
1452 for oth in others:
1461 for oth in others:
1453 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1462 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1454 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1463 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1455 return ret # exit code is zero since we found outgoing changes
1464 return ret # exit code is zero since we found outgoing changes
1456 finally:
1465 finally:
1457 for oth in others:
1466 for oth in others:
1458 oth.close()
1467 oth.close()
1459
1468
1460
1469
1461 def verify(repo, level=None):
1470 def verify(repo, level=None):
1462 """verify the consistency of a repository"""
1471 """verify the consistency of a repository"""
1463 ret = verifymod.verify(repo, level=level)
1472 ret = verifymod.verify(repo, level=level)
1464
1473
1465 # Broken subrepo references in hidden csets don't seem worth worrying about,
1474 # Broken subrepo references in hidden csets don't seem worth worrying about,
1466 # since they can't be pushed/pulled, and --hidden can be used if they are a
1475 # since they can't be pushed/pulled, and --hidden can be used if they are a
1467 # concern.
1476 # concern.
1468
1477
1469 # pathto() is needed for -R case
1478 # pathto() is needed for -R case
1470 revs = repo.revs(
1479 revs = repo.revs(
1471 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1480 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1472 )
1481 )
1473
1482
1474 if revs:
1483 if revs:
1475 repo.ui.status(_(b'checking subrepo links\n'))
1484 repo.ui.status(_(b'checking subrepo links\n'))
1476 for rev in revs:
1485 for rev in revs:
1477 ctx = repo[rev]
1486 ctx = repo[rev]
1478 try:
1487 try:
1479 for subpath in ctx.substate:
1488 for subpath in ctx.substate:
1480 try:
1489 try:
1481 ret = (
1490 ret = (
1482 ctx.sub(subpath, allowcreate=False).verify() or ret
1491 ctx.sub(subpath, allowcreate=False).verify() or ret
1483 )
1492 )
1484 except error.RepoError as e:
1493 except error.RepoError as e:
1485 repo.ui.warn(b'%d: %s\n' % (rev, e))
1494 repo.ui.warn(b'%d: %s\n' % (rev, e))
1486 except Exception:
1495 except Exception:
1487 repo.ui.warn(
1496 repo.ui.warn(
1488 _(b'.hgsubstate is corrupt in revision %s\n')
1497 _(b'.hgsubstate is corrupt in revision %s\n')
1489 % short(ctx.node())
1498 % short(ctx.node())
1490 )
1499 )
1491
1500
1492 return ret
1501 return ret
1493
1502
1494
1503
1495 def remoteui(src, opts):
1504 def remoteui(src, opts):
1496 """build a remote ui from ui or repo and opts"""
1505 """build a remote ui from ui or repo and opts"""
1497 if util.safehasattr(src, b'baseui'): # looks like a repository
1506 if util.safehasattr(src, b'baseui'): # looks like a repository
1498 dst = src.baseui.copy() # drop repo-specific config
1507 dst = src.baseui.copy() # drop repo-specific config
1499 src = src.ui # copy target options from repo
1508 src = src.ui # copy target options from repo
1500 else: # assume it's a global ui object
1509 else: # assume it's a global ui object
1501 dst = src.copy() # keep all global options
1510 dst = src.copy() # keep all global options
1502
1511
1503 # copy ssh-specific options
1512 # copy ssh-specific options
1504 for o in b'ssh', b'remotecmd':
1513 for o in b'ssh', b'remotecmd':
1505 v = opts.get(o) or src.config(b'ui', o)
1514 v = opts.get(o) or src.config(b'ui', o)
1506 if v:
1515 if v:
1507 dst.setconfig(b"ui", o, v, b'copied')
1516 dst.setconfig(b"ui", o, v, b'copied')
1508
1517
1509 # copy bundle-specific options
1518 # copy bundle-specific options
1510 r = src.config(b'bundle', b'mainreporoot')
1519 r = src.config(b'bundle', b'mainreporoot')
1511 if r:
1520 if r:
1512 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1521 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1513
1522
1514 # copy selected local settings to the remote ui
1523 # copy selected local settings to the remote ui
1515 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1524 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1516 for key, val in src.configitems(sect):
1525 for key, val in src.configitems(sect):
1517 dst.setconfig(sect, key, val, b'copied')
1526 dst.setconfig(sect, key, val, b'copied')
1518 v = src.config(b'web', b'cacerts')
1527 v = src.config(b'web', b'cacerts')
1519 if v:
1528 if v:
1520 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1529 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1521
1530
1522 return dst
1531 return dst
1523
1532
1524
1533
1525 # Files of interest
1534 # Files of interest
1526 # Used to check if the repository has changed looking at mtime and size of
1535 # Used to check if the repository has changed looking at mtime and size of
1527 # these files.
1536 # these files.
1528 foi = [
1537 foi = [
1529 (b'spath', b'00changelog.i'),
1538 (b'spath', b'00changelog.i'),
1530 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1539 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1531 (b'spath', b'obsstore'),
1540 (b'spath', b'obsstore'),
1532 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1541 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1533 ]
1542 ]
1534
1543
1535
1544
1536 class cachedlocalrepo(object):
1545 class cachedlocalrepo(object):
1537 """Holds a localrepository that can be cached and reused."""
1546 """Holds a localrepository that can be cached and reused."""
1538
1547
1539 def __init__(self, repo):
1548 def __init__(self, repo):
1540 """Create a new cached repo from an existing repo.
1549 """Create a new cached repo from an existing repo.
1541
1550
1542 We assume the passed in repo was recently created. If the
1551 We assume the passed in repo was recently created. If the
1543 repo has changed between when it was created and when it was
1552 repo has changed between when it was created and when it was
1544 turned into a cache, it may not refresh properly.
1553 turned into a cache, it may not refresh properly.
1545 """
1554 """
1546 assert isinstance(repo, localrepo.localrepository)
1555 assert isinstance(repo, localrepo.localrepository)
1547 self._repo = repo
1556 self._repo = repo
1548 self._state, self.mtime = self._repostate()
1557 self._state, self.mtime = self._repostate()
1549 self._filtername = repo.filtername
1558 self._filtername = repo.filtername
1550
1559
1551 def fetch(self):
1560 def fetch(self):
1552 """Refresh (if necessary) and return a repository.
1561 """Refresh (if necessary) and return a repository.
1553
1562
1554 If the cached instance is out of date, it will be recreated
1563 If the cached instance is out of date, it will be recreated
1555 automatically and returned.
1564 automatically and returned.
1556
1565
1557 Returns a tuple of the repo and a boolean indicating whether a new
1566 Returns a tuple of the repo and a boolean indicating whether a new
1558 repo instance was created.
1567 repo instance was created.
1559 """
1568 """
1560 # We compare the mtimes and sizes of some well-known files to
1569 # We compare the mtimes and sizes of some well-known files to
1561 # determine if the repo changed. This is not precise, as mtimes
1570 # determine if the repo changed. This is not precise, as mtimes
1562 # are susceptible to clock skew and imprecise filesystems and
1571 # are susceptible to clock skew and imprecise filesystems and
1563 # file content can change while maintaining the same size.
1572 # file content can change while maintaining the same size.
1564
1573
1565 state, mtime = self._repostate()
1574 state, mtime = self._repostate()
1566 if state == self._state:
1575 if state == self._state:
1567 return self._repo, False
1576 return self._repo, False
1568
1577
1569 repo = repository(self._repo.baseui, self._repo.url())
1578 repo = repository(self._repo.baseui, self._repo.url())
1570 if self._filtername:
1579 if self._filtername:
1571 self._repo = repo.filtered(self._filtername)
1580 self._repo = repo.filtered(self._filtername)
1572 else:
1581 else:
1573 self._repo = repo.unfiltered()
1582 self._repo = repo.unfiltered()
1574 self._state = state
1583 self._state = state
1575 self.mtime = mtime
1584 self.mtime = mtime
1576
1585
1577 return self._repo, True
1586 return self._repo, True
1578
1587
1579 def _repostate(self):
1588 def _repostate(self):
1580 state = []
1589 state = []
1581 maxmtime = -1
1590 maxmtime = -1
1582 for attr, fname in foi:
1591 for attr, fname in foi:
1583 prefix = getattr(self._repo, attr)
1592 prefix = getattr(self._repo, attr)
1584 p = os.path.join(prefix, fname)
1593 p = os.path.join(prefix, fname)
1585 try:
1594 try:
1586 st = os.stat(p)
1595 st = os.stat(p)
1587 except OSError:
1596 except OSError:
1588 st = os.stat(prefix)
1597 st = os.stat(prefix)
1589 state.append((st[stat.ST_MTIME], st.st_size))
1598 state.append((st[stat.ST_MTIME], st.st_size))
1590 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1599 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1591
1600
1592 return tuple(state), maxmtime
1601 return tuple(state), maxmtime
1593
1602
1594 def copy(self):
1603 def copy(self):
1595 """Obtain a copy of this class instance.
1604 """Obtain a copy of this class instance.
1596
1605
1597 A new localrepository instance is obtained. The new instance should be
1606 A new localrepository instance is obtained. The new instance should be
1598 completely independent of the original.
1607 completely independent of the original.
1599 """
1608 """
1600 repo = repository(self._repo.baseui, self._repo.origroot)
1609 repo = repository(self._repo.baseui, self._repo.origroot)
1601 if self._filtername:
1610 if self._filtername:
1602 repo = repo.filtered(self._filtername)
1611 repo = repo.filtered(self._filtername)
1603 else:
1612 else:
1604 repo = repo.unfiltered()
1613 repo = repo.unfiltered()
1605 c = cachedlocalrepo(repo)
1614 c = cachedlocalrepo(repo)
1606 c._state = self._state
1615 c._state = self._state
1607 c.mtime = self.mtime
1616 c.mtime = self.mtime
1608 return c
1617 return c
General Comments 0
You need to be logged in to leave comments. Login now