##// END OF EJS Templates
incoming: use bytes for an error message...
Matt Harbison -
r47768:27602e03 stable
parent child Browse files
Show More
@@ -1,1596 +1,1596 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import os
12 import os
13 import shutil
13 import shutil
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import (
17 from .node import (
18 hex,
18 hex,
19 nullhex,
19 nullhex,
20 nullid,
20 nullid,
21 short,
21 short,
22 )
22 )
23 from .pycompat import getattr
23 from .pycompat import getattr
24
24
25 from . import (
25 from . import (
26 bookmarks,
26 bookmarks,
27 bundlerepo,
27 bundlerepo,
28 cacheutil,
28 cacheutil,
29 cmdutil,
29 cmdutil,
30 destutil,
30 destutil,
31 discovery,
31 discovery,
32 error,
32 error,
33 exchange,
33 exchange,
34 extensions,
34 extensions,
35 graphmod,
35 graphmod,
36 httppeer,
36 httppeer,
37 localrepo,
37 localrepo,
38 lock,
38 lock,
39 logcmdutil,
39 logcmdutil,
40 logexchange,
40 logexchange,
41 merge as mergemod,
41 merge as mergemod,
42 mergestate as mergestatemod,
42 mergestate as mergestatemod,
43 narrowspec,
43 narrowspec,
44 phases,
44 phases,
45 requirements,
45 requirements,
46 scmutil,
46 scmutil,
47 sshpeer,
47 sshpeer,
48 statichttprepo,
48 statichttprepo,
49 ui as uimod,
49 ui as uimod,
50 unionrepo,
50 unionrepo,
51 url,
51 url,
52 util,
52 util,
53 verify as verifymod,
53 verify as verifymod,
54 vfs as vfsmod,
54 vfs as vfsmod,
55 )
55 )
56 from .utils import (
56 from .utils import (
57 hashutil,
57 hashutil,
58 stringutil,
58 stringutil,
59 urlutil,
59 urlutil,
60 )
60 )
61
61
62
62
63 release = lock.release
63 release = lock.release
64
64
65 # shared features
65 # shared features
66 sharedbookmarks = b'bookmarks'
66 sharedbookmarks = b'bookmarks'
67
67
68
68
69 def _local(path):
69 def _local(path):
70 path = util.expandpath(urlutil.urllocalpath(path))
70 path = util.expandpath(urlutil.urllocalpath(path))
71
71
72 try:
72 try:
73 # we use os.stat() directly here instead of os.path.isfile()
73 # we use os.stat() directly here instead of os.path.isfile()
74 # because the latter started returning `False` on invalid path
74 # because the latter started returning `False` on invalid path
75 # exceptions starting in 3.8 and we care about handling
75 # exceptions starting in 3.8 and we care about handling
76 # invalid paths specially here.
76 # invalid paths specially here.
77 st = os.stat(path)
77 st = os.stat(path)
78 isfile = stat.S_ISREG(st.st_mode)
78 isfile = stat.S_ISREG(st.st_mode)
79 # Python 2 raises TypeError, Python 3 ValueError.
79 # Python 2 raises TypeError, Python 3 ValueError.
80 except (TypeError, ValueError) as e:
80 except (TypeError, ValueError) as e:
81 raise error.Abort(
81 raise error.Abort(
82 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
82 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
83 )
83 )
84 except OSError:
84 except OSError:
85 isfile = False
85 isfile = False
86
86
87 return isfile and bundlerepo or localrepo
87 return isfile and bundlerepo or localrepo
88
88
89
89
90 def addbranchrevs(lrepo, other, branches, revs):
90 def addbranchrevs(lrepo, other, branches, revs):
91 peer = other.peer() # a courtesy to callers using a localrepo for other
91 peer = other.peer() # a courtesy to callers using a localrepo for other
92 hashbranch, branches = branches
92 hashbranch, branches = branches
93 if not hashbranch and not branches:
93 if not hashbranch and not branches:
94 x = revs or None
94 x = revs or None
95 if revs:
95 if revs:
96 y = revs[0]
96 y = revs[0]
97 else:
97 else:
98 y = None
98 y = None
99 return x, y
99 return x, y
100 if revs:
100 if revs:
101 revs = list(revs)
101 revs = list(revs)
102 else:
102 else:
103 revs = []
103 revs = []
104
104
105 if not peer.capable(b'branchmap'):
105 if not peer.capable(b'branchmap'):
106 if branches:
106 if branches:
107 raise error.Abort(_(b"remote branch lookup not supported"))
107 raise error.Abort(_(b"remote branch lookup not supported"))
108 revs.append(hashbranch)
108 revs.append(hashbranch)
109 return revs, revs[0]
109 return revs, revs[0]
110
110
111 with peer.commandexecutor() as e:
111 with peer.commandexecutor() as e:
112 branchmap = e.callcommand(b'branchmap', {}).result()
112 branchmap = e.callcommand(b'branchmap', {}).result()
113
113
114 def primary(branch):
114 def primary(branch):
115 if branch == b'.':
115 if branch == b'.':
116 if not lrepo:
116 if not lrepo:
117 raise error.Abort(_(b"dirstate branch not accessible"))
117 raise error.Abort(_(b"dirstate branch not accessible"))
118 branch = lrepo.dirstate.branch()
118 branch = lrepo.dirstate.branch()
119 if branch in branchmap:
119 if branch in branchmap:
120 revs.extend(hex(r) for r in reversed(branchmap[branch]))
120 revs.extend(hex(r) for r in reversed(branchmap[branch]))
121 return True
121 return True
122 else:
122 else:
123 return False
123 return False
124
124
125 for branch in branches:
125 for branch in branches:
126 if not primary(branch):
126 if not primary(branch):
127 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
127 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
128 if hashbranch:
128 if hashbranch:
129 if not primary(hashbranch):
129 if not primary(hashbranch):
130 revs.append(hashbranch)
130 revs.append(hashbranch)
131 return revs, revs[0]
131 return revs, revs[0]
132
132
133
133
134 def parseurl(path, branches=None):
134 def parseurl(path, branches=None):
135 '''parse url#branch, returning (url, (branch, branches))'''
135 '''parse url#branch, returning (url, (branch, branches))'''
136 msg = b'parseurl(...) moved to mercurial.utils.urlutil'
136 msg = b'parseurl(...) moved to mercurial.utils.urlutil'
137 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
137 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
138 return urlutil.parseurl(path, branches=branches)
138 return urlutil.parseurl(path, branches=branches)
139
139
140
140
141 schemes = {
141 schemes = {
142 b'bundle': bundlerepo,
142 b'bundle': bundlerepo,
143 b'union': unionrepo,
143 b'union': unionrepo,
144 b'file': _local,
144 b'file': _local,
145 b'http': httppeer,
145 b'http': httppeer,
146 b'https': httppeer,
146 b'https': httppeer,
147 b'ssh': sshpeer,
147 b'ssh': sshpeer,
148 b'static-http': statichttprepo,
148 b'static-http': statichttprepo,
149 }
149 }
150
150
151
151
152 def _peerlookup(path):
152 def _peerlookup(path):
153 u = urlutil.url(path)
153 u = urlutil.url(path)
154 scheme = u.scheme or b'file'
154 scheme = u.scheme or b'file'
155 thing = schemes.get(scheme) or schemes[b'file']
155 thing = schemes.get(scheme) or schemes[b'file']
156 try:
156 try:
157 return thing(path)
157 return thing(path)
158 except TypeError:
158 except TypeError:
159 # we can't test callable(thing) because 'thing' can be an unloaded
159 # we can't test callable(thing) because 'thing' can be an unloaded
160 # module that implements __call__
160 # module that implements __call__
161 if not util.safehasattr(thing, b'instance'):
161 if not util.safehasattr(thing, b'instance'):
162 raise
162 raise
163 return thing
163 return thing
164
164
165
165
166 def islocal(repo):
166 def islocal(repo):
167 '''return true if repo (or path pointing to repo) is local'''
167 '''return true if repo (or path pointing to repo) is local'''
168 if isinstance(repo, bytes):
168 if isinstance(repo, bytes):
169 try:
169 try:
170 return _peerlookup(repo).islocal(repo)
170 return _peerlookup(repo).islocal(repo)
171 except AttributeError:
171 except AttributeError:
172 return False
172 return False
173 return repo.local()
173 return repo.local()
174
174
175
175
176 def openpath(ui, path, sendaccept=True):
176 def openpath(ui, path, sendaccept=True):
177 '''open path with open if local, url.open if remote'''
177 '''open path with open if local, url.open if remote'''
178 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
178 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
179 if pathurl.islocal():
179 if pathurl.islocal():
180 return util.posixfile(pathurl.localpath(), b'rb')
180 return util.posixfile(pathurl.localpath(), b'rb')
181 else:
181 else:
182 return url.open(ui, path, sendaccept=sendaccept)
182 return url.open(ui, path, sendaccept=sendaccept)
183
183
184
184
185 # a list of (ui, repo) functions called for wire peer initialization
185 # a list of (ui, repo) functions called for wire peer initialization
186 wirepeersetupfuncs = []
186 wirepeersetupfuncs = []
187
187
188
188
189 def _peerorrepo(
189 def _peerorrepo(
190 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
190 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
191 ):
191 ):
192 """return a repository object for the specified path"""
192 """return a repository object for the specified path"""
193 obj = _peerlookup(path).instance(
193 obj = _peerlookup(path).instance(
194 ui, path, create, intents=intents, createopts=createopts
194 ui, path, create, intents=intents, createopts=createopts
195 )
195 )
196 ui = getattr(obj, "ui", ui)
196 ui = getattr(obj, "ui", ui)
197 for f in presetupfuncs or []:
197 for f in presetupfuncs or []:
198 f(ui, obj)
198 f(ui, obj)
199 ui.log(b'extension', b'- executing reposetup hooks\n')
199 ui.log(b'extension', b'- executing reposetup hooks\n')
200 with util.timedcm('all reposetup') as allreposetupstats:
200 with util.timedcm('all reposetup') as allreposetupstats:
201 for name, module in extensions.extensions(ui):
201 for name, module in extensions.extensions(ui):
202 ui.log(b'extension', b' - running reposetup for %s\n', name)
202 ui.log(b'extension', b' - running reposetup for %s\n', name)
203 hook = getattr(module, 'reposetup', None)
203 hook = getattr(module, 'reposetup', None)
204 if hook:
204 if hook:
205 with util.timedcm('reposetup %r', name) as stats:
205 with util.timedcm('reposetup %r', name) as stats:
206 hook(ui, obj)
206 hook(ui, obj)
207 ui.log(
207 ui.log(
208 b'extension', b' > reposetup for %s took %s\n', name, stats
208 b'extension', b' > reposetup for %s took %s\n', name, stats
209 )
209 )
210 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
210 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
211 if not obj.local():
211 if not obj.local():
212 for f in wirepeersetupfuncs:
212 for f in wirepeersetupfuncs:
213 f(ui, obj)
213 f(ui, obj)
214 return obj
214 return obj
215
215
216
216
217 def repository(
217 def repository(
218 ui,
218 ui,
219 path=b'',
219 path=b'',
220 create=False,
220 create=False,
221 presetupfuncs=None,
221 presetupfuncs=None,
222 intents=None,
222 intents=None,
223 createopts=None,
223 createopts=None,
224 ):
224 ):
225 """return a repository object for the specified path"""
225 """return a repository object for the specified path"""
226 peer = _peerorrepo(
226 peer = _peerorrepo(
227 ui,
227 ui,
228 path,
228 path,
229 create,
229 create,
230 presetupfuncs=presetupfuncs,
230 presetupfuncs=presetupfuncs,
231 intents=intents,
231 intents=intents,
232 createopts=createopts,
232 createopts=createopts,
233 )
233 )
234 repo = peer.local()
234 repo = peer.local()
235 if not repo:
235 if not repo:
236 raise error.Abort(
236 raise error.Abort(
237 _(b"repository '%s' is not local") % (path or peer.url())
237 _(b"repository '%s' is not local") % (path or peer.url())
238 )
238 )
239 return repo.filtered(b'visible')
239 return repo.filtered(b'visible')
240
240
241
241
242 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
242 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
243 '''return a repository peer for the specified path'''
243 '''return a repository peer for the specified path'''
244 rui = remoteui(uiorrepo, opts)
244 rui = remoteui(uiorrepo, opts)
245 return _peerorrepo(
245 return _peerorrepo(
246 rui, path, create, intents=intents, createopts=createopts
246 rui, path, create, intents=intents, createopts=createopts
247 ).peer()
247 ).peer()
248
248
249
249
250 def defaultdest(source):
250 def defaultdest(source):
251 """return default destination of clone if none is given
251 """return default destination of clone if none is given
252
252
253 >>> defaultdest(b'foo')
253 >>> defaultdest(b'foo')
254 'foo'
254 'foo'
255 >>> defaultdest(b'/foo/bar')
255 >>> defaultdest(b'/foo/bar')
256 'bar'
256 'bar'
257 >>> defaultdest(b'/')
257 >>> defaultdest(b'/')
258 ''
258 ''
259 >>> defaultdest(b'')
259 >>> defaultdest(b'')
260 ''
260 ''
261 >>> defaultdest(b'http://example.org/')
261 >>> defaultdest(b'http://example.org/')
262 ''
262 ''
263 >>> defaultdest(b'http://example.org/foo/')
263 >>> defaultdest(b'http://example.org/foo/')
264 'foo'
264 'foo'
265 """
265 """
266 path = urlutil.url(source).path
266 path = urlutil.url(source).path
267 if not path:
267 if not path:
268 return b''
268 return b''
269 return os.path.basename(os.path.normpath(path))
269 return os.path.basename(os.path.normpath(path))
270
270
271
271
272 def sharedreposource(repo):
272 def sharedreposource(repo):
273 """Returns repository object for source repository of a shared repo.
273 """Returns repository object for source repository of a shared repo.
274
274
275 If repo is not a shared repository, returns None.
275 If repo is not a shared repository, returns None.
276 """
276 """
277 if repo.sharedpath == repo.path:
277 if repo.sharedpath == repo.path:
278 return None
278 return None
279
279
280 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
280 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
281 return repo.srcrepo
281 return repo.srcrepo
282
282
283 # the sharedpath always ends in the .hg; we want the path to the repo
283 # the sharedpath always ends in the .hg; we want the path to the repo
284 source = repo.vfs.split(repo.sharedpath)[0]
284 source = repo.vfs.split(repo.sharedpath)[0]
285 srcurl, branches = urlutil.parseurl(source)
285 srcurl, branches = urlutil.parseurl(source)
286 srcrepo = repository(repo.ui, srcurl)
286 srcrepo = repository(repo.ui, srcurl)
287 repo.srcrepo = srcrepo
287 repo.srcrepo = srcrepo
288 return srcrepo
288 return srcrepo
289
289
290
290
291 def share(
291 def share(
292 ui,
292 ui,
293 source,
293 source,
294 dest=None,
294 dest=None,
295 update=True,
295 update=True,
296 bookmarks=True,
296 bookmarks=True,
297 defaultpath=None,
297 defaultpath=None,
298 relative=False,
298 relative=False,
299 ):
299 ):
300 '''create a shared repository'''
300 '''create a shared repository'''
301
301
302 if not islocal(source):
302 if not islocal(source):
303 raise error.Abort(_(b'can only share local repositories'))
303 raise error.Abort(_(b'can only share local repositories'))
304
304
305 if not dest:
305 if not dest:
306 dest = defaultdest(source)
306 dest = defaultdest(source)
307 else:
307 else:
308 dest = urlutil.get_clone_path(ui, dest)[1]
308 dest = urlutil.get_clone_path(ui, dest)[1]
309
309
310 if isinstance(source, bytes):
310 if isinstance(source, bytes):
311 origsource, source, branches = urlutil.get_clone_path(ui, source)
311 origsource, source, branches = urlutil.get_clone_path(ui, source)
312 srcrepo = repository(ui, source)
312 srcrepo = repository(ui, source)
313 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
313 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
314 else:
314 else:
315 srcrepo = source.local()
315 srcrepo = source.local()
316 checkout = None
316 checkout = None
317
317
318 shareditems = set()
318 shareditems = set()
319 if bookmarks:
319 if bookmarks:
320 shareditems.add(sharedbookmarks)
320 shareditems.add(sharedbookmarks)
321
321
322 r = repository(
322 r = repository(
323 ui,
323 ui,
324 dest,
324 dest,
325 create=True,
325 create=True,
326 createopts={
326 createopts={
327 b'sharedrepo': srcrepo,
327 b'sharedrepo': srcrepo,
328 b'sharedrelative': relative,
328 b'sharedrelative': relative,
329 b'shareditems': shareditems,
329 b'shareditems': shareditems,
330 },
330 },
331 )
331 )
332
332
333 postshare(srcrepo, r, defaultpath=defaultpath)
333 postshare(srcrepo, r, defaultpath=defaultpath)
334 r = repository(ui, dest)
334 r = repository(ui, dest)
335 _postshareupdate(r, update, checkout=checkout)
335 _postshareupdate(r, update, checkout=checkout)
336 return r
336 return r
337
337
338
338
339 def _prependsourcehgrc(repo):
339 def _prependsourcehgrc(repo):
340 """copies the source repo config and prepend it in current repo .hg/hgrc
340 """copies the source repo config and prepend it in current repo .hg/hgrc
341 on unshare. This is only done if the share was perfomed using share safe
341 on unshare. This is only done if the share was perfomed using share safe
342 method where we share config of source in shares"""
342 method where we share config of source in shares"""
343 srcvfs = vfsmod.vfs(repo.sharedpath)
343 srcvfs = vfsmod.vfs(repo.sharedpath)
344 dstvfs = vfsmod.vfs(repo.path)
344 dstvfs = vfsmod.vfs(repo.path)
345
345
346 if not srcvfs.exists(b'hgrc'):
346 if not srcvfs.exists(b'hgrc'):
347 return
347 return
348
348
349 currentconfig = b''
349 currentconfig = b''
350 if dstvfs.exists(b'hgrc'):
350 if dstvfs.exists(b'hgrc'):
351 currentconfig = dstvfs.read(b'hgrc')
351 currentconfig = dstvfs.read(b'hgrc')
352
352
353 with dstvfs(b'hgrc', b'wb') as fp:
353 with dstvfs(b'hgrc', b'wb') as fp:
354 sourceconfig = srcvfs.read(b'hgrc')
354 sourceconfig = srcvfs.read(b'hgrc')
355 fp.write(b"# Config copied from shared source\n")
355 fp.write(b"# Config copied from shared source\n")
356 fp.write(sourceconfig)
356 fp.write(sourceconfig)
357 fp.write(b'\n')
357 fp.write(b'\n')
358 fp.write(currentconfig)
358 fp.write(currentconfig)
359
359
360
360
361 def unshare(ui, repo):
361 def unshare(ui, repo):
362 """convert a shared repository to a normal one
362 """convert a shared repository to a normal one
363
363
364 Copy the store data to the repo and remove the sharedpath data.
364 Copy the store data to the repo and remove the sharedpath data.
365
365
366 Returns a new repository object representing the unshared repository.
366 Returns a new repository object representing the unshared repository.
367
367
368 The passed repository object is not usable after this function is
368 The passed repository object is not usable after this function is
369 called.
369 called.
370 """
370 """
371
371
372 with repo.lock():
372 with repo.lock():
373 # we use locks here because if we race with commit, we
373 # we use locks here because if we race with commit, we
374 # can end up with extra data in the cloned revlogs that's
374 # can end up with extra data in the cloned revlogs that's
375 # not pointed to by changesets, thus causing verify to
375 # not pointed to by changesets, thus causing verify to
376 # fail
376 # fail
377 destlock = copystore(ui, repo, repo.path)
377 destlock = copystore(ui, repo, repo.path)
378 with destlock or util.nullcontextmanager():
378 with destlock or util.nullcontextmanager():
379 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
379 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
380 # we were sharing .hg/hgrc of the share source with the current
380 # we were sharing .hg/hgrc of the share source with the current
381 # repo. We need to copy that while unsharing otherwise it can
381 # repo. We need to copy that while unsharing otherwise it can
382 # disable hooks and other checks
382 # disable hooks and other checks
383 _prependsourcehgrc(repo)
383 _prependsourcehgrc(repo)
384
384
385 sharefile = repo.vfs.join(b'sharedpath')
385 sharefile = repo.vfs.join(b'sharedpath')
386 util.rename(sharefile, sharefile + b'.old')
386 util.rename(sharefile, sharefile + b'.old')
387
387
388 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
388 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
389 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
389 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
390 scmutil.writereporequirements(repo)
390 scmutil.writereporequirements(repo)
391
391
392 # Removing share changes some fundamental properties of the repo instance.
392 # Removing share changes some fundamental properties of the repo instance.
393 # So we instantiate a new repo object and operate on it rather than
393 # So we instantiate a new repo object and operate on it rather than
394 # try to keep the existing repo usable.
394 # try to keep the existing repo usable.
395 newrepo = repository(repo.baseui, repo.root, create=False)
395 newrepo = repository(repo.baseui, repo.root, create=False)
396
396
397 # TODO: figure out how to access subrepos that exist, but were previously
397 # TODO: figure out how to access subrepos that exist, but were previously
398 # removed from .hgsub
398 # removed from .hgsub
399 c = newrepo[b'.']
399 c = newrepo[b'.']
400 subs = c.substate
400 subs = c.substate
401 for s in sorted(subs):
401 for s in sorted(subs):
402 c.sub(s).unshare()
402 c.sub(s).unshare()
403
403
404 localrepo.poisonrepository(repo)
404 localrepo.poisonrepository(repo)
405
405
406 return newrepo
406 return newrepo
407
407
408
408
409 def postshare(sourcerepo, destrepo, defaultpath=None):
409 def postshare(sourcerepo, destrepo, defaultpath=None):
410 """Called after a new shared repo is created.
410 """Called after a new shared repo is created.
411
411
412 The new repo only has a requirements file and pointer to the source.
412 The new repo only has a requirements file and pointer to the source.
413 This function configures additional shared data.
413 This function configures additional shared data.
414
414
415 Extensions can wrap this function and write additional entries to
415 Extensions can wrap this function and write additional entries to
416 destrepo/.hg/shared to indicate additional pieces of data to be shared.
416 destrepo/.hg/shared to indicate additional pieces of data to be shared.
417 """
417 """
418 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
418 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
419 if default:
419 if default:
420 template = b'[paths]\ndefault = %s\n'
420 template = b'[paths]\ndefault = %s\n'
421 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
421 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
422 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
422 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
423 with destrepo.wlock():
423 with destrepo.wlock():
424 narrowspec.copytoworkingcopy(destrepo)
424 narrowspec.copytoworkingcopy(destrepo)
425
425
426
426
427 def _postshareupdate(repo, update, checkout=None):
427 def _postshareupdate(repo, update, checkout=None):
428 """Maybe perform a working directory update after a shared repo is created.
428 """Maybe perform a working directory update after a shared repo is created.
429
429
430 ``update`` can be a boolean or a revision to update to.
430 ``update`` can be a boolean or a revision to update to.
431 """
431 """
432 if not update:
432 if not update:
433 return
433 return
434
434
435 repo.ui.status(_(b"updating working directory\n"))
435 repo.ui.status(_(b"updating working directory\n"))
436 if update is not True:
436 if update is not True:
437 checkout = update
437 checkout = update
438 for test in (checkout, b'default', b'tip'):
438 for test in (checkout, b'default', b'tip'):
439 if test is None:
439 if test is None:
440 continue
440 continue
441 try:
441 try:
442 uprev = repo.lookup(test)
442 uprev = repo.lookup(test)
443 break
443 break
444 except error.RepoLookupError:
444 except error.RepoLookupError:
445 continue
445 continue
446 _update(repo, uprev)
446 _update(repo, uprev)
447
447
448
448
449 def copystore(ui, srcrepo, destpath):
449 def copystore(ui, srcrepo, destpath):
450 """copy files from store of srcrepo in destpath
450 """copy files from store of srcrepo in destpath
451
451
452 returns destlock
452 returns destlock
453 """
453 """
454 destlock = None
454 destlock = None
455 try:
455 try:
456 hardlink = None
456 hardlink = None
457 topic = _(b'linking') if hardlink else _(b'copying')
457 topic = _(b'linking') if hardlink else _(b'copying')
458 with ui.makeprogress(topic, unit=_(b'files')) as progress:
458 with ui.makeprogress(topic, unit=_(b'files')) as progress:
459 num = 0
459 num = 0
460 srcpublishing = srcrepo.publishing()
460 srcpublishing = srcrepo.publishing()
461 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
461 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
462 dstvfs = vfsmod.vfs(destpath)
462 dstvfs = vfsmod.vfs(destpath)
463 for f in srcrepo.store.copylist():
463 for f in srcrepo.store.copylist():
464 if srcpublishing and f.endswith(b'phaseroots'):
464 if srcpublishing and f.endswith(b'phaseroots'):
465 continue
465 continue
466 dstbase = os.path.dirname(f)
466 dstbase = os.path.dirname(f)
467 if dstbase and not dstvfs.exists(dstbase):
467 if dstbase and not dstvfs.exists(dstbase):
468 dstvfs.mkdir(dstbase)
468 dstvfs.mkdir(dstbase)
469 if srcvfs.exists(f):
469 if srcvfs.exists(f):
470 if f.endswith(b'data'):
470 if f.endswith(b'data'):
471 # 'dstbase' may be empty (e.g. revlog format 0)
471 # 'dstbase' may be empty (e.g. revlog format 0)
472 lockfile = os.path.join(dstbase, b"lock")
472 lockfile = os.path.join(dstbase, b"lock")
473 # lock to avoid premature writing to the target
473 # lock to avoid premature writing to the target
474 destlock = lock.lock(dstvfs, lockfile)
474 destlock = lock.lock(dstvfs, lockfile)
475 hardlink, n = util.copyfiles(
475 hardlink, n = util.copyfiles(
476 srcvfs.join(f), dstvfs.join(f), hardlink, progress
476 srcvfs.join(f), dstvfs.join(f), hardlink, progress
477 )
477 )
478 num += n
478 num += n
479 if hardlink:
479 if hardlink:
480 ui.debug(b"linked %d files\n" % num)
480 ui.debug(b"linked %d files\n" % num)
481 else:
481 else:
482 ui.debug(b"copied %d files\n" % num)
482 ui.debug(b"copied %d files\n" % num)
483 return destlock
483 return destlock
484 except: # re-raises
484 except: # re-raises
485 release(destlock)
485 release(destlock)
486 raise
486 raise
487
487
488
488
489 def clonewithshare(
489 def clonewithshare(
490 ui,
490 ui,
491 peeropts,
491 peeropts,
492 sharepath,
492 sharepath,
493 source,
493 source,
494 srcpeer,
494 srcpeer,
495 dest,
495 dest,
496 pull=False,
496 pull=False,
497 rev=None,
497 rev=None,
498 update=True,
498 update=True,
499 stream=False,
499 stream=False,
500 ):
500 ):
501 """Perform a clone using a shared repo.
501 """Perform a clone using a shared repo.
502
502
503 The store for the repository will be located at <sharepath>/.hg. The
503 The store for the repository will be located at <sharepath>/.hg. The
504 specified revisions will be cloned or pulled from "source". A shared repo
504 specified revisions will be cloned or pulled from "source". A shared repo
505 will be created at "dest" and a working copy will be created if "update" is
505 will be created at "dest" and a working copy will be created if "update" is
506 True.
506 True.
507 """
507 """
508 revs = None
508 revs = None
509 if rev:
509 if rev:
510 if not srcpeer.capable(b'lookup'):
510 if not srcpeer.capable(b'lookup'):
511 raise error.Abort(
511 raise error.Abort(
512 _(
512 _(
513 b"src repository does not support "
513 b"src repository does not support "
514 b"revision lookup and so doesn't "
514 b"revision lookup and so doesn't "
515 b"support clone by revision"
515 b"support clone by revision"
516 )
516 )
517 )
517 )
518
518
519 # TODO this is batchable.
519 # TODO this is batchable.
520 remoterevs = []
520 remoterevs = []
521 for r in rev:
521 for r in rev:
522 with srcpeer.commandexecutor() as e:
522 with srcpeer.commandexecutor() as e:
523 remoterevs.append(
523 remoterevs.append(
524 e.callcommand(
524 e.callcommand(
525 b'lookup',
525 b'lookup',
526 {
526 {
527 b'key': r,
527 b'key': r,
528 },
528 },
529 ).result()
529 ).result()
530 )
530 )
531 revs = remoterevs
531 revs = remoterevs
532
532
533 # Obtain a lock before checking for or cloning the pooled repo otherwise
533 # Obtain a lock before checking for or cloning the pooled repo otherwise
534 # 2 clients may race creating or populating it.
534 # 2 clients may race creating or populating it.
535 pooldir = os.path.dirname(sharepath)
535 pooldir = os.path.dirname(sharepath)
536 # lock class requires the directory to exist.
536 # lock class requires the directory to exist.
537 try:
537 try:
538 util.makedir(pooldir, False)
538 util.makedir(pooldir, False)
539 except OSError as e:
539 except OSError as e:
540 if e.errno != errno.EEXIST:
540 if e.errno != errno.EEXIST:
541 raise
541 raise
542
542
543 poolvfs = vfsmod.vfs(pooldir)
543 poolvfs = vfsmod.vfs(pooldir)
544 basename = os.path.basename(sharepath)
544 basename = os.path.basename(sharepath)
545
545
546 with lock.lock(poolvfs, b'%s.lock' % basename):
546 with lock.lock(poolvfs, b'%s.lock' % basename):
547 if os.path.exists(sharepath):
547 if os.path.exists(sharepath):
548 ui.status(
548 ui.status(
549 _(b'(sharing from existing pooled repository %s)\n') % basename
549 _(b'(sharing from existing pooled repository %s)\n') % basename
550 )
550 )
551 else:
551 else:
552 ui.status(
552 ui.status(
553 _(b'(sharing from new pooled repository %s)\n') % basename
553 _(b'(sharing from new pooled repository %s)\n') % basename
554 )
554 )
555 # Always use pull mode because hardlinks in share mode don't work
555 # Always use pull mode because hardlinks in share mode don't work
556 # well. Never update because working copies aren't necessary in
556 # well. Never update because working copies aren't necessary in
557 # share mode.
557 # share mode.
558 clone(
558 clone(
559 ui,
559 ui,
560 peeropts,
560 peeropts,
561 source,
561 source,
562 dest=sharepath,
562 dest=sharepath,
563 pull=True,
563 pull=True,
564 revs=rev,
564 revs=rev,
565 update=False,
565 update=False,
566 stream=stream,
566 stream=stream,
567 )
567 )
568
568
569 # Resolve the value to put in [paths] section for the source.
569 # Resolve the value to put in [paths] section for the source.
570 if islocal(source):
570 if islocal(source):
571 defaultpath = os.path.abspath(urlutil.urllocalpath(source))
571 defaultpath = os.path.abspath(urlutil.urllocalpath(source))
572 else:
572 else:
573 defaultpath = source
573 defaultpath = source
574
574
575 sharerepo = repository(ui, path=sharepath)
575 sharerepo = repository(ui, path=sharepath)
576 destrepo = share(
576 destrepo = share(
577 ui,
577 ui,
578 sharerepo,
578 sharerepo,
579 dest=dest,
579 dest=dest,
580 update=False,
580 update=False,
581 bookmarks=False,
581 bookmarks=False,
582 defaultpath=defaultpath,
582 defaultpath=defaultpath,
583 )
583 )
584
584
585 # We need to perform a pull against the dest repo to fetch bookmarks
585 # We need to perform a pull against the dest repo to fetch bookmarks
586 # and other non-store data that isn't shared by default. In the case of
586 # and other non-store data that isn't shared by default. In the case of
587 # non-existing shared repo, this means we pull from the remote twice. This
587 # non-existing shared repo, this means we pull from the remote twice. This
588 # is a bit weird. But at the time it was implemented, there wasn't an easy
588 # is a bit weird. But at the time it was implemented, there wasn't an easy
589 # way to pull just non-changegroup data.
589 # way to pull just non-changegroup data.
590 exchange.pull(destrepo, srcpeer, heads=revs)
590 exchange.pull(destrepo, srcpeer, heads=revs)
591
591
592 _postshareupdate(destrepo, update)
592 _postshareupdate(destrepo, update)
593
593
594 return srcpeer, peer(ui, peeropts, dest)
594 return srcpeer, peer(ui, peeropts, dest)
595
595
596
596
597 # Recomputing caches is often slow on big repos, so copy them.
597 # Recomputing caches is often slow on big repos, so copy them.
598 def _copycache(srcrepo, dstcachedir, fname):
598 def _copycache(srcrepo, dstcachedir, fname):
599 """copy a cache from srcrepo to destcachedir (if it exists)"""
599 """copy a cache from srcrepo to destcachedir (if it exists)"""
600 srcfname = srcrepo.cachevfs.join(fname)
600 srcfname = srcrepo.cachevfs.join(fname)
601 dstfname = os.path.join(dstcachedir, fname)
601 dstfname = os.path.join(dstcachedir, fname)
602 if os.path.exists(srcfname):
602 if os.path.exists(srcfname):
603 if not os.path.exists(dstcachedir):
603 if not os.path.exists(dstcachedir):
604 os.mkdir(dstcachedir)
604 os.mkdir(dstcachedir)
605 util.copyfile(srcfname, dstfname)
605 util.copyfile(srcfname, dstfname)
606
606
607
607
608 def clone(
608 def clone(
609 ui,
609 ui,
610 peeropts,
610 peeropts,
611 source,
611 source,
612 dest=None,
612 dest=None,
613 pull=False,
613 pull=False,
614 revs=None,
614 revs=None,
615 update=True,
615 update=True,
616 stream=False,
616 stream=False,
617 branch=None,
617 branch=None,
618 shareopts=None,
618 shareopts=None,
619 storeincludepats=None,
619 storeincludepats=None,
620 storeexcludepats=None,
620 storeexcludepats=None,
621 depth=None,
621 depth=None,
622 ):
622 ):
623 """Make a copy of an existing repository.
623 """Make a copy of an existing repository.
624
624
625 Create a copy of an existing repository in a new directory. The
625 Create a copy of an existing repository in a new directory. The
626 source and destination are URLs, as passed to the repository
626 source and destination are URLs, as passed to the repository
627 function. Returns a pair of repository peers, the source and
627 function. Returns a pair of repository peers, the source and
628 newly created destination.
628 newly created destination.
629
629
630 The location of the source is added to the new repository's
630 The location of the source is added to the new repository's
631 .hg/hgrc file, as the default to be used for future pulls and
631 .hg/hgrc file, as the default to be used for future pulls and
632 pushes.
632 pushes.
633
633
634 If an exception is raised, the partly cloned/updated destination
634 If an exception is raised, the partly cloned/updated destination
635 repository will be deleted.
635 repository will be deleted.
636
636
637 Arguments:
637 Arguments:
638
638
639 source: repository object or URL
639 source: repository object or URL
640
640
641 dest: URL of destination repository to create (defaults to base
641 dest: URL of destination repository to create (defaults to base
642 name of source repository)
642 name of source repository)
643
643
644 pull: always pull from source repository, even in local case or if the
644 pull: always pull from source repository, even in local case or if the
645 server prefers streaming
645 server prefers streaming
646
646
647 stream: stream raw data uncompressed from repository (fast over
647 stream: stream raw data uncompressed from repository (fast over
648 LAN, slow over WAN)
648 LAN, slow over WAN)
649
649
650 revs: revision to clone up to (implies pull=True)
650 revs: revision to clone up to (implies pull=True)
651
651
652 update: update working directory after clone completes, if
652 update: update working directory after clone completes, if
653 destination is local repository (True means update to default rev,
653 destination is local repository (True means update to default rev,
654 anything else is treated as a revision)
654 anything else is treated as a revision)
655
655
656 branch: branches to clone
656 branch: branches to clone
657
657
658 shareopts: dict of options to control auto sharing behavior. The "pool" key
658 shareopts: dict of options to control auto sharing behavior. The "pool" key
659 activates auto sharing mode and defines the directory for stores. The
659 activates auto sharing mode and defines the directory for stores. The
660 "mode" key determines how to construct the directory name of the shared
660 "mode" key determines how to construct the directory name of the shared
661 repository. "identity" means the name is derived from the node of the first
661 repository. "identity" means the name is derived from the node of the first
662 changeset in the repository. "remote" means the name is derived from the
662 changeset in the repository. "remote" means the name is derived from the
663 remote's path/URL. Defaults to "identity."
663 remote's path/URL. Defaults to "identity."
664
664
665 storeincludepats and storeexcludepats: sets of file patterns to include and
665 storeincludepats and storeexcludepats: sets of file patterns to include and
666 exclude in the repository copy, respectively. If not defined, all files
666 exclude in the repository copy, respectively. If not defined, all files
667 will be included (a "full" clone). Otherwise a "narrow" clone containing
667 will be included (a "full" clone). Otherwise a "narrow" clone containing
668 only the requested files will be performed. If ``storeincludepats`` is not
668 only the requested files will be performed. If ``storeincludepats`` is not
669 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
669 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
670 ``path:.``. If both are empty sets, no files will be cloned.
670 ``path:.``. If both are empty sets, no files will be cloned.
671 """
671 """
672
672
673 if isinstance(source, bytes):
673 if isinstance(source, bytes):
674 src = urlutil.get_clone_path(ui, source, branch)
674 src = urlutil.get_clone_path(ui, source, branch)
675 origsource, source, branches = src
675 origsource, source, branches = src
676 srcpeer = peer(ui, peeropts, source)
676 srcpeer = peer(ui, peeropts, source)
677 else:
677 else:
678 srcpeer = source.peer() # in case we were called with a localrepo
678 srcpeer = source.peer() # in case we were called with a localrepo
679 branches = (None, branch or [])
679 branches = (None, branch or [])
680 origsource = source = srcpeer.url()
680 origsource = source = srcpeer.url()
681 srclock = destlock = cleandir = None
681 srclock = destlock = cleandir = None
682 destpeer = None
682 destpeer = None
683 try:
683 try:
684 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
684 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
685
685
686 if dest is None:
686 if dest is None:
687 dest = defaultdest(source)
687 dest = defaultdest(source)
688 if dest:
688 if dest:
689 ui.status(_(b"destination directory: %s\n") % dest)
689 ui.status(_(b"destination directory: %s\n") % dest)
690 else:
690 else:
691 dest = urlutil.get_clone_path(ui, dest)[0]
691 dest = urlutil.get_clone_path(ui, dest)[0]
692
692
693 dest = urlutil.urllocalpath(dest)
693 dest = urlutil.urllocalpath(dest)
694 source = urlutil.urllocalpath(source)
694 source = urlutil.urllocalpath(source)
695
695
696 if not dest:
696 if not dest:
697 raise error.InputError(_(b"empty destination path is not valid"))
697 raise error.InputError(_(b"empty destination path is not valid"))
698
698
699 destvfs = vfsmod.vfs(dest, expandpath=True)
699 destvfs = vfsmod.vfs(dest, expandpath=True)
700 if destvfs.lexists():
700 if destvfs.lexists():
701 if not destvfs.isdir():
701 if not destvfs.isdir():
702 raise error.InputError(
702 raise error.InputError(
703 _(b"destination '%s' already exists") % dest
703 _(b"destination '%s' already exists") % dest
704 )
704 )
705 elif destvfs.listdir():
705 elif destvfs.listdir():
706 raise error.InputError(
706 raise error.InputError(
707 _(b"destination '%s' is not empty") % dest
707 _(b"destination '%s' is not empty") % dest
708 )
708 )
709
709
710 createopts = {}
710 createopts = {}
711 narrow = False
711 narrow = False
712
712
713 if storeincludepats is not None:
713 if storeincludepats is not None:
714 narrowspec.validatepatterns(storeincludepats)
714 narrowspec.validatepatterns(storeincludepats)
715 narrow = True
715 narrow = True
716
716
717 if storeexcludepats is not None:
717 if storeexcludepats is not None:
718 narrowspec.validatepatterns(storeexcludepats)
718 narrowspec.validatepatterns(storeexcludepats)
719 narrow = True
719 narrow = True
720
720
721 if narrow:
721 if narrow:
722 # Include everything by default if only exclusion patterns defined.
722 # Include everything by default if only exclusion patterns defined.
723 if storeexcludepats and not storeincludepats:
723 if storeexcludepats and not storeincludepats:
724 storeincludepats = {b'path:.'}
724 storeincludepats = {b'path:.'}
725
725
726 createopts[b'narrowfiles'] = True
726 createopts[b'narrowfiles'] = True
727
727
728 if depth:
728 if depth:
729 createopts[b'shallowfilestore'] = True
729 createopts[b'shallowfilestore'] = True
730
730
731 if srcpeer.capable(b'lfs-serve'):
731 if srcpeer.capable(b'lfs-serve'):
732 # Repository creation honors the config if it disabled the extension, so
732 # Repository creation honors the config if it disabled the extension, so
733 # we can't just announce that lfs will be enabled. This check avoids
733 # we can't just announce that lfs will be enabled. This check avoids
734 # saying that lfs will be enabled, and then saying it's an unknown
734 # saying that lfs will be enabled, and then saying it's an unknown
735 # feature. The lfs creation option is set in either case so that a
735 # feature. The lfs creation option is set in either case so that a
736 # requirement is added. If the extension is explicitly disabled but the
736 # requirement is added. If the extension is explicitly disabled but the
737 # requirement is set, the clone aborts early, before transferring any
737 # requirement is set, the clone aborts early, before transferring any
738 # data.
738 # data.
739 createopts[b'lfs'] = True
739 createopts[b'lfs'] = True
740
740
741 if extensions.disabled_help(b'lfs'):
741 if extensions.disabled_help(b'lfs'):
742 ui.status(
742 ui.status(
743 _(
743 _(
744 b'(remote is using large file support (lfs), but it is '
744 b'(remote is using large file support (lfs), but it is '
745 b'explicitly disabled in the local configuration)\n'
745 b'explicitly disabled in the local configuration)\n'
746 )
746 )
747 )
747 )
748 else:
748 else:
749 ui.status(
749 ui.status(
750 _(
750 _(
751 b'(remote is using large file support (lfs); lfs will '
751 b'(remote is using large file support (lfs); lfs will '
752 b'be enabled for this repository)\n'
752 b'be enabled for this repository)\n'
753 )
753 )
754 )
754 )
755
755
756 shareopts = shareopts or {}
756 shareopts = shareopts or {}
757 sharepool = shareopts.get(b'pool')
757 sharepool = shareopts.get(b'pool')
758 sharenamemode = shareopts.get(b'mode')
758 sharenamemode = shareopts.get(b'mode')
759 if sharepool and islocal(dest):
759 if sharepool and islocal(dest):
760 sharepath = None
760 sharepath = None
761 if sharenamemode == b'identity':
761 if sharenamemode == b'identity':
762 # Resolve the name from the initial changeset in the remote
762 # Resolve the name from the initial changeset in the remote
763 # repository. This returns nullid when the remote is empty. It
763 # repository. This returns nullid when the remote is empty. It
764 # raises RepoLookupError if revision 0 is filtered or otherwise
764 # raises RepoLookupError if revision 0 is filtered or otherwise
765 # not available. If we fail to resolve, sharing is not enabled.
765 # not available. If we fail to resolve, sharing is not enabled.
766 try:
766 try:
767 with srcpeer.commandexecutor() as e:
767 with srcpeer.commandexecutor() as e:
768 rootnode = e.callcommand(
768 rootnode = e.callcommand(
769 b'lookup',
769 b'lookup',
770 {
770 {
771 b'key': b'0',
771 b'key': b'0',
772 },
772 },
773 ).result()
773 ).result()
774
774
775 if rootnode != nullid:
775 if rootnode != nullid:
776 sharepath = os.path.join(sharepool, hex(rootnode))
776 sharepath = os.path.join(sharepool, hex(rootnode))
777 else:
777 else:
778 ui.status(
778 ui.status(
779 _(
779 _(
780 b'(not using pooled storage: '
780 b'(not using pooled storage: '
781 b'remote appears to be empty)\n'
781 b'remote appears to be empty)\n'
782 )
782 )
783 )
783 )
784 except error.RepoLookupError:
784 except error.RepoLookupError:
785 ui.status(
785 ui.status(
786 _(
786 _(
787 b'(not using pooled storage: '
787 b'(not using pooled storage: '
788 b'unable to resolve identity of remote)\n'
788 b'unable to resolve identity of remote)\n'
789 )
789 )
790 )
790 )
791 elif sharenamemode == b'remote':
791 elif sharenamemode == b'remote':
792 sharepath = os.path.join(
792 sharepath = os.path.join(
793 sharepool, hex(hashutil.sha1(source).digest())
793 sharepool, hex(hashutil.sha1(source).digest())
794 )
794 )
795 else:
795 else:
796 raise error.Abort(
796 raise error.Abort(
797 _(b'unknown share naming mode: %s') % sharenamemode
797 _(b'unknown share naming mode: %s') % sharenamemode
798 )
798 )
799
799
800 # TODO this is a somewhat arbitrary restriction.
800 # TODO this is a somewhat arbitrary restriction.
801 if narrow:
801 if narrow:
802 ui.status(
802 ui.status(
803 _(b'(pooled storage not supported for narrow clones)\n')
803 _(b'(pooled storage not supported for narrow clones)\n')
804 )
804 )
805 sharepath = None
805 sharepath = None
806
806
807 if sharepath:
807 if sharepath:
808 return clonewithshare(
808 return clonewithshare(
809 ui,
809 ui,
810 peeropts,
810 peeropts,
811 sharepath,
811 sharepath,
812 source,
812 source,
813 srcpeer,
813 srcpeer,
814 dest,
814 dest,
815 pull=pull,
815 pull=pull,
816 rev=revs,
816 rev=revs,
817 update=update,
817 update=update,
818 stream=stream,
818 stream=stream,
819 )
819 )
820
820
821 srcrepo = srcpeer.local()
821 srcrepo = srcpeer.local()
822
822
823 abspath = origsource
823 abspath = origsource
824 if islocal(origsource):
824 if islocal(origsource):
825 abspath = os.path.abspath(urlutil.urllocalpath(origsource))
825 abspath = os.path.abspath(urlutil.urllocalpath(origsource))
826
826
827 if islocal(dest):
827 if islocal(dest):
828 cleandir = dest
828 cleandir = dest
829
829
830 copy = False
830 copy = False
831 if (
831 if (
832 srcrepo
832 srcrepo
833 and srcrepo.cancopy()
833 and srcrepo.cancopy()
834 and islocal(dest)
834 and islocal(dest)
835 and not phases.hassecret(srcrepo)
835 and not phases.hassecret(srcrepo)
836 ):
836 ):
837 copy = not pull and not revs
837 copy = not pull and not revs
838
838
839 # TODO this is a somewhat arbitrary restriction.
839 # TODO this is a somewhat arbitrary restriction.
840 if narrow:
840 if narrow:
841 copy = False
841 copy = False
842
842
843 if copy:
843 if copy:
844 try:
844 try:
845 # we use a lock here because if we race with commit, we
845 # we use a lock here because if we race with commit, we
846 # can end up with extra data in the cloned revlogs that's
846 # can end up with extra data in the cloned revlogs that's
847 # not pointed to by changesets, thus causing verify to
847 # not pointed to by changesets, thus causing verify to
848 # fail
848 # fail
849 srclock = srcrepo.lock(wait=False)
849 srclock = srcrepo.lock(wait=False)
850 except error.LockError:
850 except error.LockError:
851 copy = False
851 copy = False
852
852
853 if copy:
853 if copy:
854 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
854 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
855 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
855 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
856 if not os.path.exists(dest):
856 if not os.path.exists(dest):
857 util.makedirs(dest)
857 util.makedirs(dest)
858 else:
858 else:
859 # only clean up directories we create ourselves
859 # only clean up directories we create ourselves
860 cleandir = hgdir
860 cleandir = hgdir
861 try:
861 try:
862 destpath = hgdir
862 destpath = hgdir
863 util.makedir(destpath, notindexed=True)
863 util.makedir(destpath, notindexed=True)
864 except OSError as inst:
864 except OSError as inst:
865 if inst.errno == errno.EEXIST:
865 if inst.errno == errno.EEXIST:
866 cleandir = None
866 cleandir = None
867 raise error.Abort(
867 raise error.Abort(
868 _(b"destination '%s' already exists") % dest
868 _(b"destination '%s' already exists") % dest
869 )
869 )
870 raise
870 raise
871
871
872 destlock = copystore(ui, srcrepo, destpath)
872 destlock = copystore(ui, srcrepo, destpath)
873 # copy bookmarks over
873 # copy bookmarks over
874 srcbookmarks = srcrepo.vfs.join(b'bookmarks')
874 srcbookmarks = srcrepo.vfs.join(b'bookmarks')
875 dstbookmarks = os.path.join(destpath, b'bookmarks')
875 dstbookmarks = os.path.join(destpath, b'bookmarks')
876 if os.path.exists(srcbookmarks):
876 if os.path.exists(srcbookmarks):
877 util.copyfile(srcbookmarks, dstbookmarks)
877 util.copyfile(srcbookmarks, dstbookmarks)
878
878
879 dstcachedir = os.path.join(destpath, b'cache')
879 dstcachedir = os.path.join(destpath, b'cache')
880 for cache in cacheutil.cachetocopy(srcrepo):
880 for cache in cacheutil.cachetocopy(srcrepo):
881 _copycache(srcrepo, dstcachedir, cache)
881 _copycache(srcrepo, dstcachedir, cache)
882
882
883 # we need to re-init the repo after manually copying the data
883 # we need to re-init the repo after manually copying the data
884 # into it
884 # into it
885 destpeer = peer(srcrepo, peeropts, dest)
885 destpeer = peer(srcrepo, peeropts, dest)
886 srcrepo.hook(b'outgoing', source=b'clone', node=nullhex)
886 srcrepo.hook(b'outgoing', source=b'clone', node=nullhex)
887 else:
887 else:
888 try:
888 try:
889 # only pass ui when no srcrepo
889 # only pass ui when no srcrepo
890 destpeer = peer(
890 destpeer = peer(
891 srcrepo or ui,
891 srcrepo or ui,
892 peeropts,
892 peeropts,
893 dest,
893 dest,
894 create=True,
894 create=True,
895 createopts=createopts,
895 createopts=createopts,
896 )
896 )
897 except OSError as inst:
897 except OSError as inst:
898 if inst.errno == errno.EEXIST:
898 if inst.errno == errno.EEXIST:
899 cleandir = None
899 cleandir = None
900 raise error.Abort(
900 raise error.Abort(
901 _(b"destination '%s' already exists") % dest
901 _(b"destination '%s' already exists") % dest
902 )
902 )
903 raise
903 raise
904
904
905 if revs:
905 if revs:
906 if not srcpeer.capable(b'lookup'):
906 if not srcpeer.capable(b'lookup'):
907 raise error.Abort(
907 raise error.Abort(
908 _(
908 _(
909 b"src repository does not support "
909 b"src repository does not support "
910 b"revision lookup and so doesn't "
910 b"revision lookup and so doesn't "
911 b"support clone by revision"
911 b"support clone by revision"
912 )
912 )
913 )
913 )
914
914
915 # TODO this is batchable.
915 # TODO this is batchable.
916 remoterevs = []
916 remoterevs = []
917 for rev in revs:
917 for rev in revs:
918 with srcpeer.commandexecutor() as e:
918 with srcpeer.commandexecutor() as e:
919 remoterevs.append(
919 remoterevs.append(
920 e.callcommand(
920 e.callcommand(
921 b'lookup',
921 b'lookup',
922 {
922 {
923 b'key': rev,
923 b'key': rev,
924 },
924 },
925 ).result()
925 ).result()
926 )
926 )
927 revs = remoterevs
927 revs = remoterevs
928
928
929 checkout = revs[0]
929 checkout = revs[0]
930 else:
930 else:
931 revs = None
931 revs = None
932 local = destpeer.local()
932 local = destpeer.local()
933 if local:
933 if local:
934 if narrow:
934 if narrow:
935 with local.wlock(), local.lock():
935 with local.wlock(), local.lock():
936 local.setnarrowpats(storeincludepats, storeexcludepats)
936 local.setnarrowpats(storeincludepats, storeexcludepats)
937 narrowspec.copytoworkingcopy(local)
937 narrowspec.copytoworkingcopy(local)
938
938
939 u = urlutil.url(abspath)
939 u = urlutil.url(abspath)
940 defaulturl = bytes(u)
940 defaulturl = bytes(u)
941 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
941 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
942 if not stream:
942 if not stream:
943 if pull:
943 if pull:
944 stream = False
944 stream = False
945 else:
945 else:
946 stream = None
946 stream = None
947 # internal config: ui.quietbookmarkmove
947 # internal config: ui.quietbookmarkmove
948 overrides = {(b'ui', b'quietbookmarkmove'): True}
948 overrides = {(b'ui', b'quietbookmarkmove'): True}
949 with local.ui.configoverride(overrides, b'clone'):
949 with local.ui.configoverride(overrides, b'clone'):
950 exchange.pull(
950 exchange.pull(
951 local,
951 local,
952 srcpeer,
952 srcpeer,
953 revs,
953 revs,
954 streamclonerequested=stream,
954 streamclonerequested=stream,
955 includepats=storeincludepats,
955 includepats=storeincludepats,
956 excludepats=storeexcludepats,
956 excludepats=storeexcludepats,
957 depth=depth,
957 depth=depth,
958 )
958 )
959 elif srcrepo:
959 elif srcrepo:
960 # TODO lift restriction once exchange.push() accepts narrow
960 # TODO lift restriction once exchange.push() accepts narrow
961 # push.
961 # push.
962 if narrow:
962 if narrow:
963 raise error.Abort(
963 raise error.Abort(
964 _(
964 _(
965 b'narrow clone not available for '
965 b'narrow clone not available for '
966 b'remote destinations'
966 b'remote destinations'
967 )
967 )
968 )
968 )
969
969
970 exchange.push(
970 exchange.push(
971 srcrepo,
971 srcrepo,
972 destpeer,
972 destpeer,
973 revs=revs,
973 revs=revs,
974 bookmarks=srcrepo._bookmarks.keys(),
974 bookmarks=srcrepo._bookmarks.keys(),
975 )
975 )
976 else:
976 else:
977 raise error.Abort(
977 raise error.Abort(
978 _(b"clone from remote to remote not supported")
978 _(b"clone from remote to remote not supported")
979 )
979 )
980
980
981 cleandir = None
981 cleandir = None
982
982
983 destrepo = destpeer.local()
983 destrepo = destpeer.local()
984 if destrepo:
984 if destrepo:
985 template = uimod.samplehgrcs[b'cloned']
985 template = uimod.samplehgrcs[b'cloned']
986 u = urlutil.url(abspath)
986 u = urlutil.url(abspath)
987 u.passwd = None
987 u.passwd = None
988 defaulturl = bytes(u)
988 defaulturl = bytes(u)
989 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
989 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
990 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
990 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
991
991
992 if ui.configbool(b'experimental', b'remotenames'):
992 if ui.configbool(b'experimental', b'remotenames'):
993 logexchange.pullremotenames(destrepo, srcpeer)
993 logexchange.pullremotenames(destrepo, srcpeer)
994
994
995 if update:
995 if update:
996 if update is not True:
996 if update is not True:
997 with srcpeer.commandexecutor() as e:
997 with srcpeer.commandexecutor() as e:
998 checkout = e.callcommand(
998 checkout = e.callcommand(
999 b'lookup',
999 b'lookup',
1000 {
1000 {
1001 b'key': update,
1001 b'key': update,
1002 },
1002 },
1003 ).result()
1003 ).result()
1004
1004
1005 uprev = None
1005 uprev = None
1006 status = None
1006 status = None
1007 if checkout is not None:
1007 if checkout is not None:
1008 # Some extensions (at least hg-git and hg-subversion) have
1008 # Some extensions (at least hg-git and hg-subversion) have
1009 # a peer.lookup() implementation that returns a name instead
1009 # a peer.lookup() implementation that returns a name instead
1010 # of a nodeid. We work around it here until we've figured
1010 # of a nodeid. We work around it here until we've figured
1011 # out a better solution.
1011 # out a better solution.
1012 if len(checkout) == 20 and checkout in destrepo:
1012 if len(checkout) == 20 and checkout in destrepo:
1013 uprev = checkout
1013 uprev = checkout
1014 elif scmutil.isrevsymbol(destrepo, checkout):
1014 elif scmutil.isrevsymbol(destrepo, checkout):
1015 uprev = scmutil.revsymbol(destrepo, checkout).node()
1015 uprev = scmutil.revsymbol(destrepo, checkout).node()
1016 else:
1016 else:
1017 if update is not True:
1017 if update is not True:
1018 try:
1018 try:
1019 uprev = destrepo.lookup(update)
1019 uprev = destrepo.lookup(update)
1020 except error.RepoLookupError:
1020 except error.RepoLookupError:
1021 pass
1021 pass
1022 if uprev is None:
1022 if uprev is None:
1023 try:
1023 try:
1024 if destrepo._activebookmark:
1024 if destrepo._activebookmark:
1025 uprev = destrepo.lookup(destrepo._activebookmark)
1025 uprev = destrepo.lookup(destrepo._activebookmark)
1026 update = destrepo._activebookmark
1026 update = destrepo._activebookmark
1027 else:
1027 else:
1028 uprev = destrepo._bookmarks[b'@']
1028 uprev = destrepo._bookmarks[b'@']
1029 update = b'@'
1029 update = b'@'
1030 bn = destrepo[uprev].branch()
1030 bn = destrepo[uprev].branch()
1031 if bn == b'default':
1031 if bn == b'default':
1032 status = _(b"updating to bookmark %s\n" % update)
1032 status = _(b"updating to bookmark %s\n" % update)
1033 else:
1033 else:
1034 status = (
1034 status = (
1035 _(b"updating to bookmark %s on branch %s\n")
1035 _(b"updating to bookmark %s on branch %s\n")
1036 ) % (update, bn)
1036 ) % (update, bn)
1037 except KeyError:
1037 except KeyError:
1038 try:
1038 try:
1039 uprev = destrepo.branchtip(b'default')
1039 uprev = destrepo.branchtip(b'default')
1040 except error.RepoLookupError:
1040 except error.RepoLookupError:
1041 uprev = destrepo.lookup(b'tip')
1041 uprev = destrepo.lookup(b'tip')
1042 if not status:
1042 if not status:
1043 bn = destrepo[uprev].branch()
1043 bn = destrepo[uprev].branch()
1044 status = _(b"updating to branch %s\n") % bn
1044 status = _(b"updating to branch %s\n") % bn
1045 destrepo.ui.status(status)
1045 destrepo.ui.status(status)
1046 _update(destrepo, uprev)
1046 _update(destrepo, uprev)
1047 if update in destrepo._bookmarks:
1047 if update in destrepo._bookmarks:
1048 bookmarks.activate(destrepo, update)
1048 bookmarks.activate(destrepo, update)
1049 if destlock is not None:
1049 if destlock is not None:
1050 release(destlock)
1050 release(destlock)
1051 # here is a tiny windows were someone could end up writing the
1051 # here is a tiny windows were someone could end up writing the
1052 # repository before the cache are sure to be warm. This is "fine"
1052 # repository before the cache are sure to be warm. This is "fine"
1053 # as the only "bad" outcome would be some slowness. That potential
1053 # as the only "bad" outcome would be some slowness. That potential
1054 # slowness already affect reader.
1054 # slowness already affect reader.
1055 with destrepo.lock():
1055 with destrepo.lock():
1056 destrepo.updatecaches(full=True)
1056 destrepo.updatecaches(full=True)
1057 finally:
1057 finally:
1058 release(srclock, destlock)
1058 release(srclock, destlock)
1059 if cleandir is not None:
1059 if cleandir is not None:
1060 shutil.rmtree(cleandir, True)
1060 shutil.rmtree(cleandir, True)
1061 if srcpeer is not None:
1061 if srcpeer is not None:
1062 srcpeer.close()
1062 srcpeer.close()
1063 if destpeer and destpeer.local() is None:
1063 if destpeer and destpeer.local() is None:
1064 destpeer.close()
1064 destpeer.close()
1065 return srcpeer, destpeer
1065 return srcpeer, destpeer
1066
1066
1067
1067
1068 def _showstats(repo, stats, quietempty=False):
1068 def _showstats(repo, stats, quietempty=False):
1069 if quietempty and stats.isempty():
1069 if quietempty and stats.isempty():
1070 return
1070 return
1071 repo.ui.status(
1071 repo.ui.status(
1072 _(
1072 _(
1073 b"%d files updated, %d files merged, "
1073 b"%d files updated, %d files merged, "
1074 b"%d files removed, %d files unresolved\n"
1074 b"%d files removed, %d files unresolved\n"
1075 )
1075 )
1076 % (
1076 % (
1077 stats.updatedcount,
1077 stats.updatedcount,
1078 stats.mergedcount,
1078 stats.mergedcount,
1079 stats.removedcount,
1079 stats.removedcount,
1080 stats.unresolvedcount,
1080 stats.unresolvedcount,
1081 )
1081 )
1082 )
1082 )
1083
1083
1084
1084
1085 def updaterepo(repo, node, overwrite, updatecheck=None):
1085 def updaterepo(repo, node, overwrite, updatecheck=None):
1086 """Update the working directory to node.
1086 """Update the working directory to node.
1087
1087
1088 When overwrite is set, changes are clobbered, merged else
1088 When overwrite is set, changes are clobbered, merged else
1089
1089
1090 returns stats (see pydoc mercurial.merge.applyupdates)"""
1090 returns stats (see pydoc mercurial.merge.applyupdates)"""
1091 repo.ui.deprecwarn(
1091 repo.ui.deprecwarn(
1092 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1092 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1093 b'5.7',
1093 b'5.7',
1094 )
1094 )
1095 return mergemod._update(
1095 return mergemod._update(
1096 repo,
1096 repo,
1097 node,
1097 node,
1098 branchmerge=False,
1098 branchmerge=False,
1099 force=overwrite,
1099 force=overwrite,
1100 labels=[b'working copy', b'destination'],
1100 labels=[b'working copy', b'destination'],
1101 updatecheck=updatecheck,
1101 updatecheck=updatecheck,
1102 )
1102 )
1103
1103
1104
1104
1105 def update(repo, node, quietempty=False, updatecheck=None):
1105 def update(repo, node, quietempty=False, updatecheck=None):
1106 """update the working directory to node"""
1106 """update the working directory to node"""
1107 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1107 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1108 _showstats(repo, stats, quietempty)
1108 _showstats(repo, stats, quietempty)
1109 if stats.unresolvedcount:
1109 if stats.unresolvedcount:
1110 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1110 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1111 return stats.unresolvedcount > 0
1111 return stats.unresolvedcount > 0
1112
1112
1113
1113
1114 # naming conflict in clone()
1114 # naming conflict in clone()
1115 _update = update
1115 _update = update
1116
1116
1117
1117
1118 def clean(repo, node, show_stats=True, quietempty=False):
1118 def clean(repo, node, show_stats=True, quietempty=False):
1119 """forcibly switch the working directory to node, clobbering changes"""
1119 """forcibly switch the working directory to node, clobbering changes"""
1120 stats = mergemod.clean_update(repo[node])
1120 stats = mergemod.clean_update(repo[node])
1121 assert stats.unresolvedcount == 0
1121 assert stats.unresolvedcount == 0
1122 if show_stats:
1122 if show_stats:
1123 _showstats(repo, stats, quietempty)
1123 _showstats(repo, stats, quietempty)
1124 return False
1124 return False
1125
1125
1126
1126
1127 # naming conflict in updatetotally()
1127 # naming conflict in updatetotally()
1128 _clean = clean
1128 _clean = clean
1129
1129
1130 _VALID_UPDATECHECKS = {
1130 _VALID_UPDATECHECKS = {
1131 mergemod.UPDATECHECK_ABORT,
1131 mergemod.UPDATECHECK_ABORT,
1132 mergemod.UPDATECHECK_NONE,
1132 mergemod.UPDATECHECK_NONE,
1133 mergemod.UPDATECHECK_LINEAR,
1133 mergemod.UPDATECHECK_LINEAR,
1134 mergemod.UPDATECHECK_NO_CONFLICT,
1134 mergemod.UPDATECHECK_NO_CONFLICT,
1135 }
1135 }
1136
1136
1137
1137
1138 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1138 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1139 """Update the working directory with extra care for non-file components
1139 """Update the working directory with extra care for non-file components
1140
1140
1141 This takes care of non-file components below:
1141 This takes care of non-file components below:
1142
1142
1143 :bookmark: might be advanced or (in)activated
1143 :bookmark: might be advanced or (in)activated
1144
1144
1145 This takes arguments below:
1145 This takes arguments below:
1146
1146
1147 :checkout: to which revision the working directory is updated
1147 :checkout: to which revision the working directory is updated
1148 :brev: a name, which might be a bookmark to be activated after updating
1148 :brev: a name, which might be a bookmark to be activated after updating
1149 :clean: whether changes in the working directory can be discarded
1149 :clean: whether changes in the working directory can be discarded
1150 :updatecheck: how to deal with a dirty working directory
1150 :updatecheck: how to deal with a dirty working directory
1151
1151
1152 Valid values for updatecheck are the UPDATECHECK_* constants
1152 Valid values for updatecheck are the UPDATECHECK_* constants
1153 defined in the merge module. Passing `None` will result in using the
1153 defined in the merge module. Passing `None` will result in using the
1154 configured default.
1154 configured default.
1155
1155
1156 * ABORT: abort if the working directory is dirty
1156 * ABORT: abort if the working directory is dirty
1157 * NONE: don't check (merge working directory changes into destination)
1157 * NONE: don't check (merge working directory changes into destination)
1158 * LINEAR: check that update is linear before merging working directory
1158 * LINEAR: check that update is linear before merging working directory
1159 changes into destination
1159 changes into destination
1160 * NO_CONFLICT: check that the update does not result in file merges
1160 * NO_CONFLICT: check that the update does not result in file merges
1161
1161
1162 This returns whether conflict is detected at updating or not.
1162 This returns whether conflict is detected at updating or not.
1163 """
1163 """
1164 if updatecheck is None:
1164 if updatecheck is None:
1165 updatecheck = ui.config(b'commands', b'update.check')
1165 updatecheck = ui.config(b'commands', b'update.check')
1166 if updatecheck not in _VALID_UPDATECHECKS:
1166 if updatecheck not in _VALID_UPDATECHECKS:
1167 # If not configured, or invalid value configured
1167 # If not configured, or invalid value configured
1168 updatecheck = mergemod.UPDATECHECK_LINEAR
1168 updatecheck = mergemod.UPDATECHECK_LINEAR
1169 if updatecheck not in _VALID_UPDATECHECKS:
1169 if updatecheck not in _VALID_UPDATECHECKS:
1170 raise ValueError(
1170 raise ValueError(
1171 r'Invalid updatecheck value %r (can accept %r)'
1171 r'Invalid updatecheck value %r (can accept %r)'
1172 % (updatecheck, _VALID_UPDATECHECKS)
1172 % (updatecheck, _VALID_UPDATECHECKS)
1173 )
1173 )
1174 with repo.wlock():
1174 with repo.wlock():
1175 movemarkfrom = None
1175 movemarkfrom = None
1176 warndest = False
1176 warndest = False
1177 if checkout is None:
1177 if checkout is None:
1178 updata = destutil.destupdate(repo, clean=clean)
1178 updata = destutil.destupdate(repo, clean=clean)
1179 checkout, movemarkfrom, brev = updata
1179 checkout, movemarkfrom, brev = updata
1180 warndest = True
1180 warndest = True
1181
1181
1182 if clean:
1182 if clean:
1183 ret = _clean(repo, checkout)
1183 ret = _clean(repo, checkout)
1184 else:
1184 else:
1185 if updatecheck == mergemod.UPDATECHECK_ABORT:
1185 if updatecheck == mergemod.UPDATECHECK_ABORT:
1186 cmdutil.bailifchanged(repo, merge=False)
1186 cmdutil.bailifchanged(repo, merge=False)
1187 updatecheck = mergemod.UPDATECHECK_NONE
1187 updatecheck = mergemod.UPDATECHECK_NONE
1188 ret = _update(repo, checkout, updatecheck=updatecheck)
1188 ret = _update(repo, checkout, updatecheck=updatecheck)
1189
1189
1190 if not ret and movemarkfrom:
1190 if not ret and movemarkfrom:
1191 if movemarkfrom == repo[b'.'].node():
1191 if movemarkfrom == repo[b'.'].node():
1192 pass # no-op update
1192 pass # no-op update
1193 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1193 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1194 b = ui.label(repo._activebookmark, b'bookmarks.active')
1194 b = ui.label(repo._activebookmark, b'bookmarks.active')
1195 ui.status(_(b"updating bookmark %s\n") % b)
1195 ui.status(_(b"updating bookmark %s\n") % b)
1196 else:
1196 else:
1197 # this can happen with a non-linear update
1197 # this can happen with a non-linear update
1198 b = ui.label(repo._activebookmark, b'bookmarks')
1198 b = ui.label(repo._activebookmark, b'bookmarks')
1199 ui.status(_(b"(leaving bookmark %s)\n") % b)
1199 ui.status(_(b"(leaving bookmark %s)\n") % b)
1200 bookmarks.deactivate(repo)
1200 bookmarks.deactivate(repo)
1201 elif brev in repo._bookmarks:
1201 elif brev in repo._bookmarks:
1202 if brev != repo._activebookmark:
1202 if brev != repo._activebookmark:
1203 b = ui.label(brev, b'bookmarks.active')
1203 b = ui.label(brev, b'bookmarks.active')
1204 ui.status(_(b"(activating bookmark %s)\n") % b)
1204 ui.status(_(b"(activating bookmark %s)\n") % b)
1205 bookmarks.activate(repo, brev)
1205 bookmarks.activate(repo, brev)
1206 elif brev:
1206 elif brev:
1207 if repo._activebookmark:
1207 if repo._activebookmark:
1208 b = ui.label(repo._activebookmark, b'bookmarks')
1208 b = ui.label(repo._activebookmark, b'bookmarks')
1209 ui.status(_(b"(leaving bookmark %s)\n") % b)
1209 ui.status(_(b"(leaving bookmark %s)\n") % b)
1210 bookmarks.deactivate(repo)
1210 bookmarks.deactivate(repo)
1211
1211
1212 if warndest:
1212 if warndest:
1213 destutil.statusotherdests(ui, repo)
1213 destutil.statusotherdests(ui, repo)
1214
1214
1215 return ret
1215 return ret
1216
1216
1217
1217
1218 def merge(
1218 def merge(
1219 ctx,
1219 ctx,
1220 force=False,
1220 force=False,
1221 remind=True,
1221 remind=True,
1222 labels=None,
1222 labels=None,
1223 ):
1223 ):
1224 """Branch merge with node, resolving changes. Return true if any
1224 """Branch merge with node, resolving changes. Return true if any
1225 unresolved conflicts."""
1225 unresolved conflicts."""
1226 repo = ctx.repo()
1226 repo = ctx.repo()
1227 stats = mergemod.merge(ctx, force=force, labels=labels)
1227 stats = mergemod.merge(ctx, force=force, labels=labels)
1228 _showstats(repo, stats)
1228 _showstats(repo, stats)
1229 if stats.unresolvedcount:
1229 if stats.unresolvedcount:
1230 repo.ui.status(
1230 repo.ui.status(
1231 _(
1231 _(
1232 b"use 'hg resolve' to retry unresolved file merges "
1232 b"use 'hg resolve' to retry unresolved file merges "
1233 b"or 'hg merge --abort' to abandon\n"
1233 b"or 'hg merge --abort' to abandon\n"
1234 )
1234 )
1235 )
1235 )
1236 elif remind:
1236 elif remind:
1237 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1237 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1238 return stats.unresolvedcount > 0
1238 return stats.unresolvedcount > 0
1239
1239
1240
1240
1241 def abortmerge(ui, repo):
1241 def abortmerge(ui, repo):
1242 ms = mergestatemod.mergestate.read(repo)
1242 ms = mergestatemod.mergestate.read(repo)
1243 if ms.active():
1243 if ms.active():
1244 # there were conflicts
1244 # there were conflicts
1245 node = ms.localctx.hex()
1245 node = ms.localctx.hex()
1246 else:
1246 else:
1247 # there were no conficts, mergestate was not stored
1247 # there were no conficts, mergestate was not stored
1248 node = repo[b'.'].hex()
1248 node = repo[b'.'].hex()
1249
1249
1250 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1250 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1251 stats = mergemod.clean_update(repo[node])
1251 stats = mergemod.clean_update(repo[node])
1252 assert stats.unresolvedcount == 0
1252 assert stats.unresolvedcount == 0
1253 _showstats(repo, stats)
1253 _showstats(repo, stats)
1254
1254
1255
1255
1256 def _incoming(
1256 def _incoming(
1257 displaychlist,
1257 displaychlist,
1258 subreporecurse,
1258 subreporecurse,
1259 ui,
1259 ui,
1260 repo,
1260 repo,
1261 source,
1261 source,
1262 opts,
1262 opts,
1263 buffered=False,
1263 buffered=False,
1264 subpath=None,
1264 subpath=None,
1265 ):
1265 ):
1266 """
1266 """
1267 Helper for incoming / gincoming.
1267 Helper for incoming / gincoming.
1268 displaychlist gets called with
1268 displaychlist gets called with
1269 (remoterepo, incomingchangesetlist, displayer) parameters,
1269 (remoterepo, incomingchangesetlist, displayer) parameters,
1270 and is supposed to contain only code that can't be unified.
1270 and is supposed to contain only code that can't be unified.
1271 """
1271 """
1272 srcs = urlutil.get_pull_paths(repo, ui, [source], opts.get(b'branch'))
1272 srcs = urlutil.get_pull_paths(repo, ui, [source], opts.get(b'branch'))
1273 srcs = list(srcs)
1273 srcs = list(srcs)
1274 if len(srcs) != 1:
1274 if len(srcs) != 1:
1275 msg = _('for now, incoming supports only a single source, %d provided')
1275 msg = _(b'for now, incoming supports only a single source, %d provided')
1276 msg %= len(srcs)
1276 msg %= len(srcs)
1277 raise error.Abort(msg)
1277 raise error.Abort(msg)
1278 source, branches = srcs[0]
1278 source, branches = srcs[0]
1279 if subpath is not None:
1279 if subpath is not None:
1280 subpath = urlutil.url(subpath)
1280 subpath = urlutil.url(subpath)
1281 if subpath.isabs():
1281 if subpath.isabs():
1282 source = bytes(subpath)
1282 source = bytes(subpath)
1283 else:
1283 else:
1284 p = urlutil.url(source)
1284 p = urlutil.url(source)
1285 p.path = os.path.normpath(b'%s/%s' % (p.path, subpath))
1285 p.path = os.path.normpath(b'%s/%s' % (p.path, subpath))
1286 source = bytes(p)
1286 source = bytes(p)
1287 other = peer(repo, opts, source)
1287 other = peer(repo, opts, source)
1288 cleanupfn = other.close
1288 cleanupfn = other.close
1289 try:
1289 try:
1290 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source))
1290 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source))
1291 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1291 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1292
1292
1293 if revs:
1293 if revs:
1294 revs = [other.lookup(rev) for rev in revs]
1294 revs = [other.lookup(rev) for rev in revs]
1295 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1295 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1296 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
1296 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
1297 )
1297 )
1298
1298
1299 if not chlist:
1299 if not chlist:
1300 ui.status(_(b"no changes found\n"))
1300 ui.status(_(b"no changes found\n"))
1301 return subreporecurse()
1301 return subreporecurse()
1302 ui.pager(b'incoming')
1302 ui.pager(b'incoming')
1303 displayer = logcmdutil.changesetdisplayer(
1303 displayer = logcmdutil.changesetdisplayer(
1304 ui, other, opts, buffered=buffered
1304 ui, other, opts, buffered=buffered
1305 )
1305 )
1306 displaychlist(other, chlist, displayer)
1306 displaychlist(other, chlist, displayer)
1307 displayer.close()
1307 displayer.close()
1308 finally:
1308 finally:
1309 cleanupfn()
1309 cleanupfn()
1310 subreporecurse()
1310 subreporecurse()
1311 return 0 # exit code is zero since we found incoming changes
1311 return 0 # exit code is zero since we found incoming changes
1312
1312
1313
1313
1314 def incoming(ui, repo, source, opts, subpath=None):
1314 def incoming(ui, repo, source, opts, subpath=None):
1315 def subreporecurse():
1315 def subreporecurse():
1316 ret = 1
1316 ret = 1
1317 if opts.get(b'subrepos'):
1317 if opts.get(b'subrepos'):
1318 ctx = repo[None]
1318 ctx = repo[None]
1319 for subpath in sorted(ctx.substate):
1319 for subpath in sorted(ctx.substate):
1320 sub = ctx.sub(subpath)
1320 sub = ctx.sub(subpath)
1321 ret = min(ret, sub.incoming(ui, source, opts))
1321 ret = min(ret, sub.incoming(ui, source, opts))
1322 return ret
1322 return ret
1323
1323
1324 def display(other, chlist, displayer):
1324 def display(other, chlist, displayer):
1325 limit = logcmdutil.getlimit(opts)
1325 limit = logcmdutil.getlimit(opts)
1326 if opts.get(b'newest_first'):
1326 if opts.get(b'newest_first'):
1327 chlist.reverse()
1327 chlist.reverse()
1328 count = 0
1328 count = 0
1329 for n in chlist:
1329 for n in chlist:
1330 if limit is not None and count >= limit:
1330 if limit is not None and count >= limit:
1331 break
1331 break
1332 parents = [p for p in other.changelog.parents(n) if p != nullid]
1332 parents = [p for p in other.changelog.parents(n) if p != nullid]
1333 if opts.get(b'no_merges') and len(parents) == 2:
1333 if opts.get(b'no_merges') and len(parents) == 2:
1334 continue
1334 continue
1335 count += 1
1335 count += 1
1336 displayer.show(other[n])
1336 displayer.show(other[n])
1337
1337
1338 return _incoming(
1338 return _incoming(
1339 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1339 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1340 )
1340 )
1341
1341
1342
1342
1343 def _outgoing(ui, repo, dests, opts, subpath=None):
1343 def _outgoing(ui, repo, dests, opts, subpath=None):
1344 out = set()
1344 out = set()
1345 others = []
1345 others = []
1346 for path in urlutil.get_push_paths(repo, ui, dests):
1346 for path in urlutil.get_push_paths(repo, ui, dests):
1347 dest = path.pushloc or path.loc
1347 dest = path.pushloc or path.loc
1348 if subpath is not None:
1348 if subpath is not None:
1349 subpath = urlutil.url(subpath)
1349 subpath = urlutil.url(subpath)
1350 if subpath.isabs():
1350 if subpath.isabs():
1351 dest = bytes(subpath)
1351 dest = bytes(subpath)
1352 else:
1352 else:
1353 p = urlutil.url(dest)
1353 p = urlutil.url(dest)
1354 p.path = os.path.normpath(b'%s/%s' % (p.path, subpath))
1354 p.path = os.path.normpath(b'%s/%s' % (p.path, subpath))
1355 dest = bytes(p)
1355 dest = bytes(p)
1356 branches = path.branch, opts.get(b'branch') or []
1356 branches = path.branch, opts.get(b'branch') or []
1357
1357
1358 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1358 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1359 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1359 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1360 if revs:
1360 if revs:
1361 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1361 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1362
1362
1363 other = peer(repo, opts, dest)
1363 other = peer(repo, opts, dest)
1364 try:
1364 try:
1365 outgoing = discovery.findcommonoutgoing(
1365 outgoing = discovery.findcommonoutgoing(
1366 repo, other, revs, force=opts.get(b'force')
1366 repo, other, revs, force=opts.get(b'force')
1367 )
1367 )
1368 o = outgoing.missing
1368 o = outgoing.missing
1369 out.update(o)
1369 out.update(o)
1370 if not o:
1370 if not o:
1371 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1371 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1372 others.append(other)
1372 others.append(other)
1373 except: # re-raises
1373 except: # re-raises
1374 other.close()
1374 other.close()
1375 raise
1375 raise
1376 # make sure this is ordered by revision number
1376 # make sure this is ordered by revision number
1377 outgoing_revs = list(out)
1377 outgoing_revs = list(out)
1378 cl = repo.changelog
1378 cl = repo.changelog
1379 outgoing_revs.sort(key=cl.rev)
1379 outgoing_revs.sort(key=cl.rev)
1380 return outgoing_revs, others
1380 return outgoing_revs, others
1381
1381
1382
1382
1383 def _outgoing_recurse(ui, repo, dests, opts):
1383 def _outgoing_recurse(ui, repo, dests, opts):
1384 ret = 1
1384 ret = 1
1385 if opts.get(b'subrepos'):
1385 if opts.get(b'subrepos'):
1386 ctx = repo[None]
1386 ctx = repo[None]
1387 for subpath in sorted(ctx.substate):
1387 for subpath in sorted(ctx.substate):
1388 sub = ctx.sub(subpath)
1388 sub = ctx.sub(subpath)
1389 ret = min(ret, sub.outgoing(ui, dests, opts))
1389 ret = min(ret, sub.outgoing(ui, dests, opts))
1390 return ret
1390 return ret
1391
1391
1392
1392
1393 def _outgoing_filter(repo, revs, opts):
1393 def _outgoing_filter(repo, revs, opts):
1394 """apply revision filtering/ordering option for outgoing"""
1394 """apply revision filtering/ordering option for outgoing"""
1395 limit = logcmdutil.getlimit(opts)
1395 limit = logcmdutil.getlimit(opts)
1396 no_merges = opts.get(b'no_merges')
1396 no_merges = opts.get(b'no_merges')
1397 if opts.get(b'newest_first'):
1397 if opts.get(b'newest_first'):
1398 revs.reverse()
1398 revs.reverse()
1399 if limit is None and not no_merges:
1399 if limit is None and not no_merges:
1400 for r in revs:
1400 for r in revs:
1401 yield r
1401 yield r
1402 return
1402 return
1403
1403
1404 count = 0
1404 count = 0
1405 cl = repo.changelog
1405 cl = repo.changelog
1406 for n in revs:
1406 for n in revs:
1407 if limit is not None and count >= limit:
1407 if limit is not None and count >= limit:
1408 break
1408 break
1409 parents = [p for p in cl.parents(n) if p != nullid]
1409 parents = [p for p in cl.parents(n) if p != nullid]
1410 if no_merges and len(parents) == 2:
1410 if no_merges and len(parents) == 2:
1411 continue
1411 continue
1412 count += 1
1412 count += 1
1413 yield n
1413 yield n
1414
1414
1415
1415
1416 def outgoing(ui, repo, dests, opts, subpath=None):
1416 def outgoing(ui, repo, dests, opts, subpath=None):
1417 if opts.get(b'graph'):
1417 if opts.get(b'graph'):
1418 logcmdutil.checkunsupportedgraphflags([], opts)
1418 logcmdutil.checkunsupportedgraphflags([], opts)
1419 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1419 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1420 ret = 1
1420 ret = 1
1421 try:
1421 try:
1422 if o:
1422 if o:
1423 ret = 0
1423 ret = 0
1424
1424
1425 if opts.get(b'graph'):
1425 if opts.get(b'graph'):
1426 revdag = logcmdutil.graphrevs(repo, o, opts)
1426 revdag = logcmdutil.graphrevs(repo, o, opts)
1427 ui.pager(b'outgoing')
1427 ui.pager(b'outgoing')
1428 displayer = logcmdutil.changesetdisplayer(
1428 displayer = logcmdutil.changesetdisplayer(
1429 ui, repo, opts, buffered=True
1429 ui, repo, opts, buffered=True
1430 )
1430 )
1431 logcmdutil.displaygraph(
1431 logcmdutil.displaygraph(
1432 ui, repo, revdag, displayer, graphmod.asciiedges
1432 ui, repo, revdag, displayer, graphmod.asciiedges
1433 )
1433 )
1434 else:
1434 else:
1435 ui.pager(b'outgoing')
1435 ui.pager(b'outgoing')
1436 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1436 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1437 for n in _outgoing_filter(repo, o, opts):
1437 for n in _outgoing_filter(repo, o, opts):
1438 displayer.show(repo[n])
1438 displayer.show(repo[n])
1439 displayer.close()
1439 displayer.close()
1440 for oth in others:
1440 for oth in others:
1441 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1441 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1442 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1442 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1443 return ret # exit code is zero since we found outgoing changes
1443 return ret # exit code is zero since we found outgoing changes
1444 finally:
1444 finally:
1445 for oth in others:
1445 for oth in others:
1446 oth.close()
1446 oth.close()
1447
1447
1448
1448
1449 def verify(repo, level=None):
1449 def verify(repo, level=None):
1450 """verify the consistency of a repository"""
1450 """verify the consistency of a repository"""
1451 ret = verifymod.verify(repo, level=level)
1451 ret = verifymod.verify(repo, level=level)
1452
1452
1453 # Broken subrepo references in hidden csets don't seem worth worrying about,
1453 # Broken subrepo references in hidden csets don't seem worth worrying about,
1454 # since they can't be pushed/pulled, and --hidden can be used if they are a
1454 # since they can't be pushed/pulled, and --hidden can be used if they are a
1455 # concern.
1455 # concern.
1456
1456
1457 # pathto() is needed for -R case
1457 # pathto() is needed for -R case
1458 revs = repo.revs(
1458 revs = repo.revs(
1459 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1459 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1460 )
1460 )
1461
1461
1462 if revs:
1462 if revs:
1463 repo.ui.status(_(b'checking subrepo links\n'))
1463 repo.ui.status(_(b'checking subrepo links\n'))
1464 for rev in revs:
1464 for rev in revs:
1465 ctx = repo[rev]
1465 ctx = repo[rev]
1466 try:
1466 try:
1467 for subpath in ctx.substate:
1467 for subpath in ctx.substate:
1468 try:
1468 try:
1469 ret = (
1469 ret = (
1470 ctx.sub(subpath, allowcreate=False).verify() or ret
1470 ctx.sub(subpath, allowcreate=False).verify() or ret
1471 )
1471 )
1472 except error.RepoError as e:
1472 except error.RepoError as e:
1473 repo.ui.warn(b'%d: %s\n' % (rev, e))
1473 repo.ui.warn(b'%d: %s\n' % (rev, e))
1474 except Exception:
1474 except Exception:
1475 repo.ui.warn(
1475 repo.ui.warn(
1476 _(b'.hgsubstate is corrupt in revision %s\n')
1476 _(b'.hgsubstate is corrupt in revision %s\n')
1477 % short(ctx.node())
1477 % short(ctx.node())
1478 )
1478 )
1479
1479
1480 return ret
1480 return ret
1481
1481
1482
1482
1483 def remoteui(src, opts):
1483 def remoteui(src, opts):
1484 """build a remote ui from ui or repo and opts"""
1484 """build a remote ui from ui or repo and opts"""
1485 if util.safehasattr(src, b'baseui'): # looks like a repository
1485 if util.safehasattr(src, b'baseui'): # looks like a repository
1486 dst = src.baseui.copy() # drop repo-specific config
1486 dst = src.baseui.copy() # drop repo-specific config
1487 src = src.ui # copy target options from repo
1487 src = src.ui # copy target options from repo
1488 else: # assume it's a global ui object
1488 else: # assume it's a global ui object
1489 dst = src.copy() # keep all global options
1489 dst = src.copy() # keep all global options
1490
1490
1491 # copy ssh-specific options
1491 # copy ssh-specific options
1492 for o in b'ssh', b'remotecmd':
1492 for o in b'ssh', b'remotecmd':
1493 v = opts.get(o) or src.config(b'ui', o)
1493 v = opts.get(o) or src.config(b'ui', o)
1494 if v:
1494 if v:
1495 dst.setconfig(b"ui", o, v, b'copied')
1495 dst.setconfig(b"ui", o, v, b'copied')
1496
1496
1497 # copy bundle-specific options
1497 # copy bundle-specific options
1498 r = src.config(b'bundle', b'mainreporoot')
1498 r = src.config(b'bundle', b'mainreporoot')
1499 if r:
1499 if r:
1500 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1500 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1501
1501
1502 # copy selected local settings to the remote ui
1502 # copy selected local settings to the remote ui
1503 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1503 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1504 for key, val in src.configitems(sect):
1504 for key, val in src.configitems(sect):
1505 dst.setconfig(sect, key, val, b'copied')
1505 dst.setconfig(sect, key, val, b'copied')
1506 v = src.config(b'web', b'cacerts')
1506 v = src.config(b'web', b'cacerts')
1507 if v:
1507 if v:
1508 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1508 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1509
1509
1510 return dst
1510 return dst
1511
1511
1512
1512
1513 # Files of interest
1513 # Files of interest
1514 # Used to check if the repository has changed looking at mtime and size of
1514 # Used to check if the repository has changed looking at mtime and size of
1515 # these files.
1515 # these files.
1516 foi = [
1516 foi = [
1517 (b'spath', b'00changelog.i'),
1517 (b'spath', b'00changelog.i'),
1518 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1518 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1519 (b'spath', b'obsstore'),
1519 (b'spath', b'obsstore'),
1520 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1520 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1521 ]
1521 ]
1522
1522
1523
1523
1524 class cachedlocalrepo(object):
1524 class cachedlocalrepo(object):
1525 """Holds a localrepository that can be cached and reused."""
1525 """Holds a localrepository that can be cached and reused."""
1526
1526
1527 def __init__(self, repo):
1527 def __init__(self, repo):
1528 """Create a new cached repo from an existing repo.
1528 """Create a new cached repo from an existing repo.
1529
1529
1530 We assume the passed in repo was recently created. If the
1530 We assume the passed in repo was recently created. If the
1531 repo has changed between when it was created and when it was
1531 repo has changed between when it was created and when it was
1532 turned into a cache, it may not refresh properly.
1532 turned into a cache, it may not refresh properly.
1533 """
1533 """
1534 assert isinstance(repo, localrepo.localrepository)
1534 assert isinstance(repo, localrepo.localrepository)
1535 self._repo = repo
1535 self._repo = repo
1536 self._state, self.mtime = self._repostate()
1536 self._state, self.mtime = self._repostate()
1537 self._filtername = repo.filtername
1537 self._filtername = repo.filtername
1538
1538
1539 def fetch(self):
1539 def fetch(self):
1540 """Refresh (if necessary) and return a repository.
1540 """Refresh (if necessary) and return a repository.
1541
1541
1542 If the cached instance is out of date, it will be recreated
1542 If the cached instance is out of date, it will be recreated
1543 automatically and returned.
1543 automatically and returned.
1544
1544
1545 Returns a tuple of the repo and a boolean indicating whether a new
1545 Returns a tuple of the repo and a boolean indicating whether a new
1546 repo instance was created.
1546 repo instance was created.
1547 """
1547 """
1548 # We compare the mtimes and sizes of some well-known files to
1548 # We compare the mtimes and sizes of some well-known files to
1549 # determine if the repo changed. This is not precise, as mtimes
1549 # determine if the repo changed. This is not precise, as mtimes
1550 # are susceptible to clock skew and imprecise filesystems and
1550 # are susceptible to clock skew and imprecise filesystems and
1551 # file content can change while maintaining the same size.
1551 # file content can change while maintaining the same size.
1552
1552
1553 state, mtime = self._repostate()
1553 state, mtime = self._repostate()
1554 if state == self._state:
1554 if state == self._state:
1555 return self._repo, False
1555 return self._repo, False
1556
1556
1557 repo = repository(self._repo.baseui, self._repo.url())
1557 repo = repository(self._repo.baseui, self._repo.url())
1558 if self._filtername:
1558 if self._filtername:
1559 self._repo = repo.filtered(self._filtername)
1559 self._repo = repo.filtered(self._filtername)
1560 else:
1560 else:
1561 self._repo = repo.unfiltered()
1561 self._repo = repo.unfiltered()
1562 self._state = state
1562 self._state = state
1563 self.mtime = mtime
1563 self.mtime = mtime
1564
1564
1565 return self._repo, True
1565 return self._repo, True
1566
1566
1567 def _repostate(self):
1567 def _repostate(self):
1568 state = []
1568 state = []
1569 maxmtime = -1
1569 maxmtime = -1
1570 for attr, fname in foi:
1570 for attr, fname in foi:
1571 prefix = getattr(self._repo, attr)
1571 prefix = getattr(self._repo, attr)
1572 p = os.path.join(prefix, fname)
1572 p = os.path.join(prefix, fname)
1573 try:
1573 try:
1574 st = os.stat(p)
1574 st = os.stat(p)
1575 except OSError:
1575 except OSError:
1576 st = os.stat(prefix)
1576 st = os.stat(prefix)
1577 state.append((st[stat.ST_MTIME], st.st_size))
1577 state.append((st[stat.ST_MTIME], st.st_size))
1578 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1578 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1579
1579
1580 return tuple(state), maxmtime
1580 return tuple(state), maxmtime
1581
1581
1582 def copy(self):
1582 def copy(self):
1583 """Obtain a copy of this class instance.
1583 """Obtain a copy of this class instance.
1584
1584
1585 A new localrepository instance is obtained. The new instance should be
1585 A new localrepository instance is obtained. The new instance should be
1586 completely independent of the original.
1586 completely independent of the original.
1587 """
1587 """
1588 repo = repository(self._repo.baseui, self._repo.origroot)
1588 repo = repository(self._repo.baseui, self._repo.origroot)
1589 if self._filtername:
1589 if self._filtername:
1590 repo = repo.filtered(self._filtername)
1590 repo = repo.filtered(self._filtername)
1591 else:
1591 else:
1592 repo = repo.unfiltered()
1592 repo = repo.unfiltered()
1593 c = cachedlocalrepo(repo)
1593 c = cachedlocalrepo(repo)
1594 c._state = self._state
1594 c._state = self._state
1595 c.mtime = self.mtime
1595 c.mtime = self.mtime
1596 return c
1596 return c
General Comments 0
You need to be logged in to leave comments. Login now