##// END OF EJS Templates
branching: merge with stable
Raphaël Gomès -
r49418:f13fb742 merge default
parent child Browse files
Show More
@@ -1,1601 +1,1610 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import os
12 import os
13 import posixpath
13 import shutil
14 import shutil
14 import stat
15 import stat
15 import weakref
16 import weakref
16
17
17 from .i18n import _
18 from .i18n import _
18 from .node import (
19 from .node import (
19 hex,
20 hex,
20 sha1nodeconstants,
21 sha1nodeconstants,
21 short,
22 short,
22 )
23 )
23 from .pycompat import getattr
24 from .pycompat import getattr
24
25
25 from . import (
26 from . import (
26 bookmarks,
27 bookmarks,
27 bundlerepo,
28 bundlerepo,
28 cmdutil,
29 cmdutil,
29 destutil,
30 destutil,
30 discovery,
31 discovery,
31 error,
32 error,
32 exchange,
33 exchange,
33 extensions,
34 extensions,
34 graphmod,
35 graphmod,
35 httppeer,
36 httppeer,
36 localrepo,
37 localrepo,
37 lock,
38 lock,
38 logcmdutil,
39 logcmdutil,
39 logexchange,
40 logexchange,
40 merge as mergemod,
41 merge as mergemod,
41 mergestate as mergestatemod,
42 mergestate as mergestatemod,
42 narrowspec,
43 narrowspec,
43 phases,
44 phases,
44 requirements,
45 requirements,
45 scmutil,
46 scmutil,
46 sshpeer,
47 sshpeer,
47 statichttprepo,
48 statichttprepo,
48 ui as uimod,
49 ui as uimod,
49 unionrepo,
50 unionrepo,
50 url,
51 url,
51 util,
52 util,
52 verify as verifymod,
53 verify as verifymod,
53 vfs as vfsmod,
54 vfs as vfsmod,
54 )
55 )
55 from .interfaces import repository as repositorymod
56 from .interfaces import repository as repositorymod
56 from .utils import (
57 from .utils import (
57 hashutil,
58 hashutil,
58 stringutil,
59 stringutil,
59 urlutil,
60 urlutil,
60 )
61 )
61
62
62
63
63 release = lock.release
64 release = lock.release
64
65
65 # shared features
66 # shared features
66 sharedbookmarks = b'bookmarks'
67 sharedbookmarks = b'bookmarks'
67
68
68
69
69 def _local(path):
70 def _local(path):
70 path = util.expandpath(urlutil.urllocalpath(path))
71 path = util.expandpath(urlutil.urllocalpath(path))
71
72
72 try:
73 try:
73 # we use os.stat() directly here instead of os.path.isfile()
74 # we use os.stat() directly here instead of os.path.isfile()
74 # because the latter started returning `False` on invalid path
75 # because the latter started returning `False` on invalid path
75 # exceptions starting in 3.8 and we care about handling
76 # exceptions starting in 3.8 and we care about handling
76 # invalid paths specially here.
77 # invalid paths specially here.
77 st = os.stat(path)
78 st = os.stat(path)
78 isfile = stat.S_ISREG(st.st_mode)
79 isfile = stat.S_ISREG(st.st_mode)
79 # Python 2 raises TypeError, Python 3 ValueError.
80 # Python 2 raises TypeError, Python 3 ValueError.
80 except (TypeError, ValueError) as e:
81 except (TypeError, ValueError) as e:
81 raise error.Abort(
82 raise error.Abort(
82 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
83 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
83 )
84 )
84 except OSError:
85 except OSError:
85 isfile = False
86 isfile = False
86
87
87 return isfile and bundlerepo or localrepo
88 return isfile and bundlerepo or localrepo
88
89
89
90
90 def addbranchrevs(lrepo, other, branches, revs):
91 def addbranchrevs(lrepo, other, branches, revs):
91 peer = other.peer() # a courtesy to callers using a localrepo for other
92 peer = other.peer() # a courtesy to callers using a localrepo for other
92 hashbranch, branches = branches
93 hashbranch, branches = branches
93 if not hashbranch and not branches:
94 if not hashbranch and not branches:
94 x = revs or None
95 x = revs or None
95 if revs:
96 if revs:
96 y = revs[0]
97 y = revs[0]
97 else:
98 else:
98 y = None
99 y = None
99 return x, y
100 return x, y
100 if revs:
101 if revs:
101 revs = list(revs)
102 revs = list(revs)
102 else:
103 else:
103 revs = []
104 revs = []
104
105
105 if not peer.capable(b'branchmap'):
106 if not peer.capable(b'branchmap'):
106 if branches:
107 if branches:
107 raise error.Abort(_(b"remote branch lookup not supported"))
108 raise error.Abort(_(b"remote branch lookup not supported"))
108 revs.append(hashbranch)
109 revs.append(hashbranch)
109 return revs, revs[0]
110 return revs, revs[0]
110
111
111 with peer.commandexecutor() as e:
112 with peer.commandexecutor() as e:
112 branchmap = e.callcommand(b'branchmap', {}).result()
113 branchmap = e.callcommand(b'branchmap', {}).result()
113
114
114 def primary(branch):
115 def primary(branch):
115 if branch == b'.':
116 if branch == b'.':
116 if not lrepo:
117 if not lrepo:
117 raise error.Abort(_(b"dirstate branch not accessible"))
118 raise error.Abort(_(b"dirstate branch not accessible"))
118 branch = lrepo.dirstate.branch()
119 branch = lrepo.dirstate.branch()
119 if branch in branchmap:
120 if branch in branchmap:
120 revs.extend(hex(r) for r in reversed(branchmap[branch]))
121 revs.extend(hex(r) for r in reversed(branchmap[branch]))
121 return True
122 return True
122 else:
123 else:
123 return False
124 return False
124
125
125 for branch in branches:
126 for branch in branches:
126 if not primary(branch):
127 if not primary(branch):
127 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
128 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
128 if hashbranch:
129 if hashbranch:
129 if not primary(hashbranch):
130 if not primary(hashbranch):
130 revs.append(hashbranch)
131 revs.append(hashbranch)
131 return revs, revs[0]
132 return revs, revs[0]
132
133
133
134
134 schemes = {
135 schemes = {
135 b'bundle': bundlerepo,
136 b'bundle': bundlerepo,
136 b'union': unionrepo,
137 b'union': unionrepo,
137 b'file': _local,
138 b'file': _local,
138 b'http': httppeer,
139 b'http': httppeer,
139 b'https': httppeer,
140 b'https': httppeer,
140 b'ssh': sshpeer,
141 b'ssh': sshpeer,
141 b'static-http': statichttprepo,
142 b'static-http': statichttprepo,
142 }
143 }
143
144
144
145
145 def _peerlookup(path):
146 def _peerlookup(path):
146 u = urlutil.url(path)
147 u = urlutil.url(path)
147 scheme = u.scheme or b'file'
148 scheme = u.scheme or b'file'
148 thing = schemes.get(scheme) or schemes[b'file']
149 thing = schemes.get(scheme) or schemes[b'file']
149 try:
150 try:
150 return thing(path)
151 return thing(path)
151 except TypeError:
152 except TypeError:
152 # we can't test callable(thing) because 'thing' can be an unloaded
153 # we can't test callable(thing) because 'thing' can be an unloaded
153 # module that implements __call__
154 # module that implements __call__
154 if not util.safehasattr(thing, b'instance'):
155 if not util.safehasattr(thing, b'instance'):
155 raise
156 raise
156 return thing
157 return thing
157
158
158
159
159 def islocal(repo):
160 def islocal(repo):
160 '''return true if repo (or path pointing to repo) is local'''
161 '''return true if repo (or path pointing to repo) is local'''
161 if isinstance(repo, bytes):
162 if isinstance(repo, bytes):
162 try:
163 try:
163 return _peerlookup(repo).islocal(repo)
164 return _peerlookup(repo).islocal(repo)
164 except AttributeError:
165 except AttributeError:
165 return False
166 return False
166 return repo.local()
167 return repo.local()
167
168
168
169
169 def openpath(ui, path, sendaccept=True):
170 def openpath(ui, path, sendaccept=True):
170 '''open path with open if local, url.open if remote'''
171 '''open path with open if local, url.open if remote'''
171 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
172 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
172 if pathurl.islocal():
173 if pathurl.islocal():
173 return util.posixfile(pathurl.localpath(), b'rb')
174 return util.posixfile(pathurl.localpath(), b'rb')
174 else:
175 else:
175 return url.open(ui, path, sendaccept=sendaccept)
176 return url.open(ui, path, sendaccept=sendaccept)
176
177
177
178
178 # a list of (ui, repo) functions called for wire peer initialization
179 # a list of (ui, repo) functions called for wire peer initialization
179 wirepeersetupfuncs = []
180 wirepeersetupfuncs = []
180
181
181
182
182 def _peerorrepo(
183 def _peerorrepo(
183 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
184 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
184 ):
185 ):
185 """return a repository object for the specified path"""
186 """return a repository object for the specified path"""
186 obj = _peerlookup(path).instance(
187 obj = _peerlookup(path).instance(
187 ui, path, create, intents=intents, createopts=createopts
188 ui, path, create, intents=intents, createopts=createopts
188 )
189 )
189 ui = getattr(obj, "ui", ui)
190 ui = getattr(obj, "ui", ui)
190 for f in presetupfuncs or []:
191 for f in presetupfuncs or []:
191 f(ui, obj)
192 f(ui, obj)
192 ui.log(b'extension', b'- executing reposetup hooks\n')
193 ui.log(b'extension', b'- executing reposetup hooks\n')
193 with util.timedcm('all reposetup') as allreposetupstats:
194 with util.timedcm('all reposetup') as allreposetupstats:
194 for name, module in extensions.extensions(ui):
195 for name, module in extensions.extensions(ui):
195 ui.log(b'extension', b' - running reposetup for %s\n', name)
196 ui.log(b'extension', b' - running reposetup for %s\n', name)
196 hook = getattr(module, 'reposetup', None)
197 hook = getattr(module, 'reposetup', None)
197 if hook:
198 if hook:
198 with util.timedcm('reposetup %r', name) as stats:
199 with util.timedcm('reposetup %r', name) as stats:
199 hook(ui, obj)
200 hook(ui, obj)
200 ui.log(
201 ui.log(
201 b'extension', b' > reposetup for %s took %s\n', name, stats
202 b'extension', b' > reposetup for %s took %s\n', name, stats
202 )
203 )
203 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
204 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
204 if not obj.local():
205 if not obj.local():
205 for f in wirepeersetupfuncs:
206 for f in wirepeersetupfuncs:
206 f(ui, obj)
207 f(ui, obj)
207 return obj
208 return obj
208
209
209
210
210 def repository(
211 def repository(
211 ui,
212 ui,
212 path=b'',
213 path=b'',
213 create=False,
214 create=False,
214 presetupfuncs=None,
215 presetupfuncs=None,
215 intents=None,
216 intents=None,
216 createopts=None,
217 createopts=None,
217 ):
218 ):
218 """return a repository object for the specified path"""
219 """return a repository object for the specified path"""
219 peer = _peerorrepo(
220 peer = _peerorrepo(
220 ui,
221 ui,
221 path,
222 path,
222 create,
223 create,
223 presetupfuncs=presetupfuncs,
224 presetupfuncs=presetupfuncs,
224 intents=intents,
225 intents=intents,
225 createopts=createopts,
226 createopts=createopts,
226 )
227 )
227 repo = peer.local()
228 repo = peer.local()
228 if not repo:
229 if not repo:
229 raise error.Abort(
230 raise error.Abort(
230 _(b"repository '%s' is not local") % (path or peer.url())
231 _(b"repository '%s' is not local") % (path or peer.url())
231 )
232 )
232 return repo.filtered(b'visible')
233 return repo.filtered(b'visible')
233
234
234
235
235 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
236 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
236 '''return a repository peer for the specified path'''
237 '''return a repository peer for the specified path'''
237 rui = remoteui(uiorrepo, opts)
238 rui = remoteui(uiorrepo, opts)
238 return _peerorrepo(
239 return _peerorrepo(
239 rui, path, create, intents=intents, createopts=createopts
240 rui, path, create, intents=intents, createopts=createopts
240 ).peer()
241 ).peer()
241
242
242
243
243 def defaultdest(source):
244 def defaultdest(source):
244 """return default destination of clone if none is given
245 """return default destination of clone if none is given
245
246
246 >>> defaultdest(b'foo')
247 >>> defaultdest(b'foo')
247 'foo'
248 'foo'
248 >>> defaultdest(b'/foo/bar')
249 >>> defaultdest(b'/foo/bar')
249 'bar'
250 'bar'
250 >>> defaultdest(b'/')
251 >>> defaultdest(b'/')
251 ''
252 ''
252 >>> defaultdest(b'')
253 >>> defaultdest(b'')
253 ''
254 ''
254 >>> defaultdest(b'http://example.org/')
255 >>> defaultdest(b'http://example.org/')
255 ''
256 ''
256 >>> defaultdest(b'http://example.org/foo/')
257 >>> defaultdest(b'http://example.org/foo/')
257 'foo'
258 'foo'
258 """
259 """
259 path = urlutil.url(source).path
260 path = urlutil.url(source).path
260 if not path:
261 if not path:
261 return b''
262 return b''
262 return os.path.basename(os.path.normpath(path))
263 return os.path.basename(os.path.normpath(path))
263
264
264
265
265 def sharedreposource(repo):
266 def sharedreposource(repo):
266 """Returns repository object for source repository of a shared repo.
267 """Returns repository object for source repository of a shared repo.
267
268
268 If repo is not a shared repository, returns None.
269 If repo is not a shared repository, returns None.
269 """
270 """
270 if repo.sharedpath == repo.path:
271 if repo.sharedpath == repo.path:
271 return None
272 return None
272
273
273 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
274 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
274 return repo.srcrepo
275 return repo.srcrepo
275
276
276 # the sharedpath always ends in the .hg; we want the path to the repo
277 # the sharedpath always ends in the .hg; we want the path to the repo
277 source = repo.vfs.split(repo.sharedpath)[0]
278 source = repo.vfs.split(repo.sharedpath)[0]
278 srcurl, branches = urlutil.parseurl(source)
279 srcurl, branches = urlutil.parseurl(source)
279 srcrepo = repository(repo.ui, srcurl)
280 srcrepo = repository(repo.ui, srcurl)
280 repo.srcrepo = srcrepo
281 repo.srcrepo = srcrepo
281 return srcrepo
282 return srcrepo
282
283
283
284
284 def share(
285 def share(
285 ui,
286 ui,
286 source,
287 source,
287 dest=None,
288 dest=None,
288 update=True,
289 update=True,
289 bookmarks=True,
290 bookmarks=True,
290 defaultpath=None,
291 defaultpath=None,
291 relative=False,
292 relative=False,
292 ):
293 ):
293 '''create a shared repository'''
294 '''create a shared repository'''
294
295
295 if not islocal(source):
296 if not islocal(source):
296 raise error.Abort(_(b'can only share local repositories'))
297 raise error.Abort(_(b'can only share local repositories'))
297
298
298 if not dest:
299 if not dest:
299 dest = defaultdest(source)
300 dest = defaultdest(source)
300 else:
301 else:
301 dest = urlutil.get_clone_path(ui, dest)[1]
302 dest = urlutil.get_clone_path(ui, dest)[1]
302
303
303 if isinstance(source, bytes):
304 if isinstance(source, bytes):
304 origsource, source, branches = urlutil.get_clone_path(ui, source)
305 origsource, source, branches = urlutil.get_clone_path(ui, source)
305 srcrepo = repository(ui, source)
306 srcrepo = repository(ui, source)
306 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
307 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
307 else:
308 else:
308 srcrepo = source.local()
309 srcrepo = source.local()
309 checkout = None
310 checkout = None
310
311
311 shareditems = set()
312 shareditems = set()
312 if bookmarks:
313 if bookmarks:
313 shareditems.add(sharedbookmarks)
314 shareditems.add(sharedbookmarks)
314
315
315 r = repository(
316 r = repository(
316 ui,
317 ui,
317 dest,
318 dest,
318 create=True,
319 create=True,
319 createopts={
320 createopts={
320 b'sharedrepo': srcrepo,
321 b'sharedrepo': srcrepo,
321 b'sharedrelative': relative,
322 b'sharedrelative': relative,
322 b'shareditems': shareditems,
323 b'shareditems': shareditems,
323 },
324 },
324 )
325 )
325
326
326 postshare(srcrepo, r, defaultpath=defaultpath)
327 postshare(srcrepo, r, defaultpath=defaultpath)
327 r = repository(ui, dest)
328 r = repository(ui, dest)
328 _postshareupdate(r, update, checkout=checkout)
329 _postshareupdate(r, update, checkout=checkout)
329 return r
330 return r
330
331
331
332
332 def _prependsourcehgrc(repo):
333 def _prependsourcehgrc(repo):
333 """copies the source repo config and prepend it in current repo .hg/hgrc
334 """copies the source repo config and prepend it in current repo .hg/hgrc
334 on unshare. This is only done if the share was perfomed using share safe
335 on unshare. This is only done if the share was perfomed using share safe
335 method where we share config of source in shares"""
336 method where we share config of source in shares"""
336 srcvfs = vfsmod.vfs(repo.sharedpath)
337 srcvfs = vfsmod.vfs(repo.sharedpath)
337 dstvfs = vfsmod.vfs(repo.path)
338 dstvfs = vfsmod.vfs(repo.path)
338
339
339 if not srcvfs.exists(b'hgrc'):
340 if not srcvfs.exists(b'hgrc'):
340 return
341 return
341
342
342 currentconfig = b''
343 currentconfig = b''
343 if dstvfs.exists(b'hgrc'):
344 if dstvfs.exists(b'hgrc'):
344 currentconfig = dstvfs.read(b'hgrc')
345 currentconfig = dstvfs.read(b'hgrc')
345
346
346 with dstvfs(b'hgrc', b'wb') as fp:
347 with dstvfs(b'hgrc', b'wb') as fp:
347 sourceconfig = srcvfs.read(b'hgrc')
348 sourceconfig = srcvfs.read(b'hgrc')
348 fp.write(b"# Config copied from shared source\n")
349 fp.write(b"# Config copied from shared source\n")
349 fp.write(sourceconfig)
350 fp.write(sourceconfig)
350 fp.write(b'\n')
351 fp.write(b'\n')
351 fp.write(currentconfig)
352 fp.write(currentconfig)
352
353
353
354
354 def unshare(ui, repo):
355 def unshare(ui, repo):
355 """convert a shared repository to a normal one
356 """convert a shared repository to a normal one
356
357
357 Copy the store data to the repo and remove the sharedpath data.
358 Copy the store data to the repo and remove the sharedpath data.
358
359
359 Returns a new repository object representing the unshared repository.
360 Returns a new repository object representing the unshared repository.
360
361
361 The passed repository object is not usable after this function is
362 The passed repository object is not usable after this function is
362 called.
363 called.
363 """
364 """
364
365
365 with repo.lock():
366 with repo.lock():
366 # we use locks here because if we race with commit, we
367 # we use locks here because if we race with commit, we
367 # can end up with extra data in the cloned revlogs that's
368 # can end up with extra data in the cloned revlogs that's
368 # not pointed to by changesets, thus causing verify to
369 # not pointed to by changesets, thus causing verify to
369 # fail
370 # fail
370 destlock = copystore(ui, repo, repo.path)
371 destlock = copystore(ui, repo, repo.path)
371 with destlock or util.nullcontextmanager():
372 with destlock or util.nullcontextmanager():
372 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
373 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
373 # we were sharing .hg/hgrc of the share source with the current
374 # we were sharing .hg/hgrc of the share source with the current
374 # repo. We need to copy that while unsharing otherwise it can
375 # repo. We need to copy that while unsharing otherwise it can
375 # disable hooks and other checks
376 # disable hooks and other checks
376 _prependsourcehgrc(repo)
377 _prependsourcehgrc(repo)
377
378
378 sharefile = repo.vfs.join(b'sharedpath')
379 sharefile = repo.vfs.join(b'sharedpath')
379 util.rename(sharefile, sharefile + b'.old')
380 util.rename(sharefile, sharefile + b'.old')
380
381
381 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
382 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
382 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
383 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
383 scmutil.writereporequirements(repo)
384 scmutil.writereporequirements(repo)
384
385
385 # Removing share changes some fundamental properties of the repo instance.
386 # Removing share changes some fundamental properties of the repo instance.
386 # So we instantiate a new repo object and operate on it rather than
387 # So we instantiate a new repo object and operate on it rather than
387 # try to keep the existing repo usable.
388 # try to keep the existing repo usable.
388 newrepo = repository(repo.baseui, repo.root, create=False)
389 newrepo = repository(repo.baseui, repo.root, create=False)
389
390
390 # TODO: figure out how to access subrepos that exist, but were previously
391 # TODO: figure out how to access subrepos that exist, but were previously
391 # removed from .hgsub
392 # removed from .hgsub
392 c = newrepo[b'.']
393 c = newrepo[b'.']
393 subs = c.substate
394 subs = c.substate
394 for s in sorted(subs):
395 for s in sorted(subs):
395 c.sub(s).unshare()
396 c.sub(s).unshare()
396
397
397 localrepo.poisonrepository(repo)
398 localrepo.poisonrepository(repo)
398
399
399 return newrepo
400 return newrepo
400
401
401
402
402 def postshare(sourcerepo, destrepo, defaultpath=None):
403 def postshare(sourcerepo, destrepo, defaultpath=None):
403 """Called after a new shared repo is created.
404 """Called after a new shared repo is created.
404
405
405 The new repo only has a requirements file and pointer to the source.
406 The new repo only has a requirements file and pointer to the source.
406 This function configures additional shared data.
407 This function configures additional shared data.
407
408
408 Extensions can wrap this function and write additional entries to
409 Extensions can wrap this function and write additional entries to
409 destrepo/.hg/shared to indicate additional pieces of data to be shared.
410 destrepo/.hg/shared to indicate additional pieces of data to be shared.
410 """
411 """
411 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
412 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
412 if default:
413 if default:
413 template = b'[paths]\ndefault = %s\n'
414 template = b'[paths]\ndefault = %s\n'
414 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
415 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
415 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
416 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
416 with destrepo.wlock():
417 with destrepo.wlock():
417 narrowspec.copytoworkingcopy(destrepo)
418 narrowspec.copytoworkingcopy(destrepo)
418
419
419
420
420 def _postshareupdate(repo, update, checkout=None):
421 def _postshareupdate(repo, update, checkout=None):
421 """Maybe perform a working directory update after a shared repo is created.
422 """Maybe perform a working directory update after a shared repo is created.
422
423
423 ``update`` can be a boolean or a revision to update to.
424 ``update`` can be a boolean or a revision to update to.
424 """
425 """
425 if not update:
426 if not update:
426 return
427 return
427
428
428 repo.ui.status(_(b"updating working directory\n"))
429 repo.ui.status(_(b"updating working directory\n"))
429 if update is not True:
430 if update is not True:
430 checkout = update
431 checkout = update
431 for test in (checkout, b'default', b'tip'):
432 for test in (checkout, b'default', b'tip'):
432 if test is None:
433 if test is None:
433 continue
434 continue
434 try:
435 try:
435 uprev = repo.lookup(test)
436 uprev = repo.lookup(test)
436 break
437 break
437 except error.RepoLookupError:
438 except error.RepoLookupError:
438 continue
439 continue
439 _update(repo, uprev)
440 _update(repo, uprev)
440
441
441
442
442 def copystore(ui, srcrepo, destpath):
443 def copystore(ui, srcrepo, destpath):
443 """copy files from store of srcrepo in destpath
444 """copy files from store of srcrepo in destpath
444
445
445 returns destlock
446 returns destlock
446 """
447 """
447 destlock = None
448 destlock = None
448 try:
449 try:
449 hardlink = None
450 hardlink = None
450 topic = _(b'linking') if hardlink else _(b'copying')
451 topic = _(b'linking') if hardlink else _(b'copying')
451 with ui.makeprogress(topic, unit=_(b'files')) as progress:
452 with ui.makeprogress(topic, unit=_(b'files')) as progress:
452 num = 0
453 num = 0
453 srcpublishing = srcrepo.publishing()
454 srcpublishing = srcrepo.publishing()
454 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
455 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
455 dstvfs = vfsmod.vfs(destpath)
456 dstvfs = vfsmod.vfs(destpath)
456 for f in srcrepo.store.copylist():
457 for f in srcrepo.store.copylist():
457 if srcpublishing and f.endswith(b'phaseroots'):
458 if srcpublishing and f.endswith(b'phaseroots'):
458 continue
459 continue
459 dstbase = os.path.dirname(f)
460 dstbase = os.path.dirname(f)
460 if dstbase and not dstvfs.exists(dstbase):
461 if dstbase and not dstvfs.exists(dstbase):
461 dstvfs.mkdir(dstbase)
462 dstvfs.mkdir(dstbase)
462 if srcvfs.exists(f):
463 if srcvfs.exists(f):
463 if f.endswith(b'data'):
464 if f.endswith(b'data'):
464 # 'dstbase' may be empty (e.g. revlog format 0)
465 # 'dstbase' may be empty (e.g. revlog format 0)
465 lockfile = os.path.join(dstbase, b"lock")
466 lockfile = os.path.join(dstbase, b"lock")
466 # lock to avoid premature writing to the target
467 # lock to avoid premature writing to the target
467 destlock = lock.lock(dstvfs, lockfile)
468 destlock = lock.lock(dstvfs, lockfile)
468 hardlink, n = util.copyfiles(
469 hardlink, n = util.copyfiles(
469 srcvfs.join(f), dstvfs.join(f), hardlink, progress
470 srcvfs.join(f), dstvfs.join(f), hardlink, progress
470 )
471 )
471 num += n
472 num += n
472 if hardlink:
473 if hardlink:
473 ui.debug(b"linked %d files\n" % num)
474 ui.debug(b"linked %d files\n" % num)
474 else:
475 else:
475 ui.debug(b"copied %d files\n" % num)
476 ui.debug(b"copied %d files\n" % num)
476 return destlock
477 return destlock
477 except: # re-raises
478 except: # re-raises
478 release(destlock)
479 release(destlock)
479 raise
480 raise
480
481
481
482
482 def clonewithshare(
483 def clonewithshare(
483 ui,
484 ui,
484 peeropts,
485 peeropts,
485 sharepath,
486 sharepath,
486 source,
487 source,
487 srcpeer,
488 srcpeer,
488 dest,
489 dest,
489 pull=False,
490 pull=False,
490 rev=None,
491 rev=None,
491 update=True,
492 update=True,
492 stream=False,
493 stream=False,
493 ):
494 ):
494 """Perform a clone using a shared repo.
495 """Perform a clone using a shared repo.
495
496
496 The store for the repository will be located at <sharepath>/.hg. The
497 The store for the repository will be located at <sharepath>/.hg. The
497 specified revisions will be cloned or pulled from "source". A shared repo
498 specified revisions will be cloned or pulled from "source". A shared repo
498 will be created at "dest" and a working copy will be created if "update" is
499 will be created at "dest" and a working copy will be created if "update" is
499 True.
500 True.
500 """
501 """
501 revs = None
502 revs = None
502 if rev:
503 if rev:
503 if not srcpeer.capable(b'lookup'):
504 if not srcpeer.capable(b'lookup'):
504 raise error.Abort(
505 raise error.Abort(
505 _(
506 _(
506 b"src repository does not support "
507 b"src repository does not support "
507 b"revision lookup and so doesn't "
508 b"revision lookup and so doesn't "
508 b"support clone by revision"
509 b"support clone by revision"
509 )
510 )
510 )
511 )
511
512
512 # TODO this is batchable.
513 # TODO this is batchable.
513 remoterevs = []
514 remoterevs = []
514 for r in rev:
515 for r in rev:
515 with srcpeer.commandexecutor() as e:
516 with srcpeer.commandexecutor() as e:
516 remoterevs.append(
517 remoterevs.append(
517 e.callcommand(
518 e.callcommand(
518 b'lookup',
519 b'lookup',
519 {
520 {
520 b'key': r,
521 b'key': r,
521 },
522 },
522 ).result()
523 ).result()
523 )
524 )
524 revs = remoterevs
525 revs = remoterevs
525
526
526 # Obtain a lock before checking for or cloning the pooled repo otherwise
527 # Obtain a lock before checking for or cloning the pooled repo otherwise
527 # 2 clients may race creating or populating it.
528 # 2 clients may race creating or populating it.
528 pooldir = os.path.dirname(sharepath)
529 pooldir = os.path.dirname(sharepath)
529 # lock class requires the directory to exist.
530 # lock class requires the directory to exist.
530 try:
531 try:
531 util.makedir(pooldir, False)
532 util.makedir(pooldir, False)
532 except OSError as e:
533 except OSError as e:
533 if e.errno != errno.EEXIST:
534 if e.errno != errno.EEXIST:
534 raise
535 raise
535
536
536 poolvfs = vfsmod.vfs(pooldir)
537 poolvfs = vfsmod.vfs(pooldir)
537 basename = os.path.basename(sharepath)
538 basename = os.path.basename(sharepath)
538
539
539 with lock.lock(poolvfs, b'%s.lock' % basename):
540 with lock.lock(poolvfs, b'%s.lock' % basename):
540 if os.path.exists(sharepath):
541 if os.path.exists(sharepath):
541 ui.status(
542 ui.status(
542 _(b'(sharing from existing pooled repository %s)\n') % basename
543 _(b'(sharing from existing pooled repository %s)\n') % basename
543 )
544 )
544 else:
545 else:
545 ui.status(
546 ui.status(
546 _(b'(sharing from new pooled repository %s)\n') % basename
547 _(b'(sharing from new pooled repository %s)\n') % basename
547 )
548 )
548 # Always use pull mode because hardlinks in share mode don't work
549 # Always use pull mode because hardlinks in share mode don't work
549 # well. Never update because working copies aren't necessary in
550 # well. Never update because working copies aren't necessary in
550 # share mode.
551 # share mode.
551 clone(
552 clone(
552 ui,
553 ui,
553 peeropts,
554 peeropts,
554 source,
555 source,
555 dest=sharepath,
556 dest=sharepath,
556 pull=True,
557 pull=True,
557 revs=rev,
558 revs=rev,
558 update=False,
559 update=False,
559 stream=stream,
560 stream=stream,
560 )
561 )
561
562
562 # Resolve the value to put in [paths] section for the source.
563 # Resolve the value to put in [paths] section for the source.
563 if islocal(source):
564 if islocal(source):
564 defaultpath = util.abspath(urlutil.urllocalpath(source))
565 defaultpath = util.abspath(urlutil.urllocalpath(source))
565 else:
566 else:
566 defaultpath = source
567 defaultpath = source
567
568
568 sharerepo = repository(ui, path=sharepath)
569 sharerepo = repository(ui, path=sharepath)
569 destrepo = share(
570 destrepo = share(
570 ui,
571 ui,
571 sharerepo,
572 sharerepo,
572 dest=dest,
573 dest=dest,
573 update=False,
574 update=False,
574 bookmarks=False,
575 bookmarks=False,
575 defaultpath=defaultpath,
576 defaultpath=defaultpath,
576 )
577 )
577
578
578 # We need to perform a pull against the dest repo to fetch bookmarks
579 # We need to perform a pull against the dest repo to fetch bookmarks
579 # and other non-store data that isn't shared by default. In the case of
580 # and other non-store data that isn't shared by default. In the case of
580 # non-existing shared repo, this means we pull from the remote twice. This
581 # non-existing shared repo, this means we pull from the remote twice. This
581 # is a bit weird. But at the time it was implemented, there wasn't an easy
582 # is a bit weird. But at the time it was implemented, there wasn't an easy
582 # way to pull just non-changegroup data.
583 # way to pull just non-changegroup data.
583 exchange.pull(destrepo, srcpeer, heads=revs)
584 exchange.pull(destrepo, srcpeer, heads=revs)
584
585
585 _postshareupdate(destrepo, update)
586 _postshareupdate(destrepo, update)
586
587
587 return srcpeer, peer(ui, peeropts, dest)
588 return srcpeer, peer(ui, peeropts, dest)
588
589
589
590
590 # Recomputing caches is often slow on big repos, so copy them.
591 # Recomputing caches is often slow on big repos, so copy them.
591 def _copycache(srcrepo, dstcachedir, fname):
592 def _copycache(srcrepo, dstcachedir, fname):
592 """copy a cache from srcrepo to destcachedir (if it exists)"""
593 """copy a cache from srcrepo to destcachedir (if it exists)"""
593 srcfname = srcrepo.cachevfs.join(fname)
594 srcfname = srcrepo.cachevfs.join(fname)
594 dstfname = os.path.join(dstcachedir, fname)
595 dstfname = os.path.join(dstcachedir, fname)
595 if os.path.exists(srcfname):
596 if os.path.exists(srcfname):
596 if not os.path.exists(dstcachedir):
597 if not os.path.exists(dstcachedir):
597 os.mkdir(dstcachedir)
598 os.mkdir(dstcachedir)
598 util.copyfile(srcfname, dstfname)
599 util.copyfile(srcfname, dstfname)
599
600
600
601
601 def clone(
602 def clone(
602 ui,
603 ui,
603 peeropts,
604 peeropts,
604 source,
605 source,
605 dest=None,
606 dest=None,
606 pull=False,
607 pull=False,
607 revs=None,
608 revs=None,
608 update=True,
609 update=True,
609 stream=False,
610 stream=False,
610 branch=None,
611 branch=None,
611 shareopts=None,
612 shareopts=None,
612 storeincludepats=None,
613 storeincludepats=None,
613 storeexcludepats=None,
614 storeexcludepats=None,
614 depth=None,
615 depth=None,
615 ):
616 ):
616 """Make a copy of an existing repository.
617 """Make a copy of an existing repository.
617
618
618 Create a copy of an existing repository in a new directory. The
619 Create a copy of an existing repository in a new directory. The
619 source and destination are URLs, as passed to the repository
620 source and destination are URLs, as passed to the repository
620 function. Returns a pair of repository peers, the source and
621 function. Returns a pair of repository peers, the source and
621 newly created destination.
622 newly created destination.
622
623
623 The location of the source is added to the new repository's
624 The location of the source is added to the new repository's
624 .hg/hgrc file, as the default to be used for future pulls and
625 .hg/hgrc file, as the default to be used for future pulls and
625 pushes.
626 pushes.
626
627
627 If an exception is raised, the partly cloned/updated destination
628 If an exception is raised, the partly cloned/updated destination
628 repository will be deleted.
629 repository will be deleted.
629
630
630 Arguments:
631 Arguments:
631
632
632 source: repository object or URL
633 source: repository object or URL
633
634
634 dest: URL of destination repository to create (defaults to base
635 dest: URL of destination repository to create (defaults to base
635 name of source repository)
636 name of source repository)
636
637
637 pull: always pull from source repository, even in local case or if the
638 pull: always pull from source repository, even in local case or if the
638 server prefers streaming
639 server prefers streaming
639
640
640 stream: stream raw data uncompressed from repository (fast over
641 stream: stream raw data uncompressed from repository (fast over
641 LAN, slow over WAN)
642 LAN, slow over WAN)
642
643
643 revs: revision to clone up to (implies pull=True)
644 revs: revision to clone up to (implies pull=True)
644
645
645 update: update working directory after clone completes, if
646 update: update working directory after clone completes, if
646 destination is local repository (True means update to default rev,
647 destination is local repository (True means update to default rev,
647 anything else is treated as a revision)
648 anything else is treated as a revision)
648
649
649 branch: branches to clone
650 branch: branches to clone
650
651
651 shareopts: dict of options to control auto sharing behavior. The "pool" key
652 shareopts: dict of options to control auto sharing behavior. The "pool" key
652 activates auto sharing mode and defines the directory for stores. The
653 activates auto sharing mode and defines the directory for stores. The
653 "mode" key determines how to construct the directory name of the shared
654 "mode" key determines how to construct the directory name of the shared
654 repository. "identity" means the name is derived from the node of the first
655 repository. "identity" means the name is derived from the node of the first
655 changeset in the repository. "remote" means the name is derived from the
656 changeset in the repository. "remote" means the name is derived from the
656 remote's path/URL. Defaults to "identity."
657 remote's path/URL. Defaults to "identity."
657
658
658 storeincludepats and storeexcludepats: sets of file patterns to include and
659 storeincludepats and storeexcludepats: sets of file patterns to include and
659 exclude in the repository copy, respectively. If not defined, all files
660 exclude in the repository copy, respectively. If not defined, all files
660 will be included (a "full" clone). Otherwise a "narrow" clone containing
661 will be included (a "full" clone). Otherwise a "narrow" clone containing
661 only the requested files will be performed. If ``storeincludepats`` is not
662 only the requested files will be performed. If ``storeincludepats`` is not
662 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
663 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
663 ``path:.``. If both are empty sets, no files will be cloned.
664 ``path:.``. If both are empty sets, no files will be cloned.
664 """
665 """
665
666
666 if isinstance(source, bytes):
667 if isinstance(source, bytes):
667 src = urlutil.get_clone_path(ui, source, branch)
668 src = urlutil.get_clone_path(ui, source, branch)
668 origsource, source, branches = src
669 origsource, source, branches = src
669 srcpeer = peer(ui, peeropts, source)
670 srcpeer = peer(ui, peeropts, source)
670 else:
671 else:
671 srcpeer = source.peer() # in case we were called with a localrepo
672 srcpeer = source.peer() # in case we were called with a localrepo
672 branches = (None, branch or [])
673 branches = (None, branch or [])
673 origsource = source = srcpeer.url()
674 origsource = source = srcpeer.url()
674 srclock = destlock = destwlock = cleandir = None
675 srclock = destlock = destwlock = cleandir = None
675 destpeer = None
676 destpeer = None
676 try:
677 try:
677 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
678 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
678
679
679 if dest is None:
680 if dest is None:
680 dest = defaultdest(source)
681 dest = defaultdest(source)
681 if dest:
682 if dest:
682 ui.status(_(b"destination directory: %s\n") % dest)
683 ui.status(_(b"destination directory: %s\n") % dest)
683 else:
684 else:
684 dest = urlutil.get_clone_path(ui, dest)[0]
685 dest = urlutil.get_clone_path(ui, dest)[0]
685
686
686 dest = urlutil.urllocalpath(dest)
687 dest = urlutil.urllocalpath(dest)
687 source = urlutil.urllocalpath(source)
688 source = urlutil.urllocalpath(source)
688
689
689 if not dest:
690 if not dest:
690 raise error.InputError(_(b"empty destination path is not valid"))
691 raise error.InputError(_(b"empty destination path is not valid"))
691
692
692 destvfs = vfsmod.vfs(dest, expandpath=True)
693 destvfs = vfsmod.vfs(dest, expandpath=True)
693 if destvfs.lexists():
694 if destvfs.lexists():
694 if not destvfs.isdir():
695 if not destvfs.isdir():
695 raise error.InputError(
696 raise error.InputError(
696 _(b"destination '%s' already exists") % dest
697 _(b"destination '%s' already exists") % dest
697 )
698 )
698 elif destvfs.listdir():
699 elif destvfs.listdir():
699 raise error.InputError(
700 raise error.InputError(
700 _(b"destination '%s' is not empty") % dest
701 _(b"destination '%s' is not empty") % dest
701 )
702 )
702
703
703 createopts = {}
704 createopts = {}
704 narrow = False
705 narrow = False
705
706
706 if storeincludepats is not None:
707 if storeincludepats is not None:
707 narrowspec.validatepatterns(storeincludepats)
708 narrowspec.validatepatterns(storeincludepats)
708 narrow = True
709 narrow = True
709
710
710 if storeexcludepats is not None:
711 if storeexcludepats is not None:
711 narrowspec.validatepatterns(storeexcludepats)
712 narrowspec.validatepatterns(storeexcludepats)
712 narrow = True
713 narrow = True
713
714
714 if narrow:
715 if narrow:
715 # Include everything by default if only exclusion patterns defined.
716 # Include everything by default if only exclusion patterns defined.
716 if storeexcludepats and not storeincludepats:
717 if storeexcludepats and not storeincludepats:
717 storeincludepats = {b'path:.'}
718 storeincludepats = {b'path:.'}
718
719
719 createopts[b'narrowfiles'] = True
720 createopts[b'narrowfiles'] = True
720
721
721 if depth:
722 if depth:
722 createopts[b'shallowfilestore'] = True
723 createopts[b'shallowfilestore'] = True
723
724
724 if srcpeer.capable(b'lfs-serve'):
725 if srcpeer.capable(b'lfs-serve'):
725 # Repository creation honors the config if it disabled the extension, so
726 # Repository creation honors the config if it disabled the extension, so
726 # we can't just announce that lfs will be enabled. This check avoids
727 # we can't just announce that lfs will be enabled. This check avoids
727 # saying that lfs will be enabled, and then saying it's an unknown
728 # saying that lfs will be enabled, and then saying it's an unknown
728 # feature. The lfs creation option is set in either case so that a
729 # feature. The lfs creation option is set in either case so that a
729 # requirement is added. If the extension is explicitly disabled but the
730 # requirement is added. If the extension is explicitly disabled but the
730 # requirement is set, the clone aborts early, before transferring any
731 # requirement is set, the clone aborts early, before transferring any
731 # data.
732 # data.
732 createopts[b'lfs'] = True
733 createopts[b'lfs'] = True
733
734
734 if extensions.disabled_help(b'lfs'):
735 if extensions.disabled_help(b'lfs'):
735 ui.status(
736 ui.status(
736 _(
737 _(
737 b'(remote is using large file support (lfs), but it is '
738 b'(remote is using large file support (lfs), but it is '
738 b'explicitly disabled in the local configuration)\n'
739 b'explicitly disabled in the local configuration)\n'
739 )
740 )
740 )
741 )
741 else:
742 else:
742 ui.status(
743 ui.status(
743 _(
744 _(
744 b'(remote is using large file support (lfs); lfs will '
745 b'(remote is using large file support (lfs); lfs will '
745 b'be enabled for this repository)\n'
746 b'be enabled for this repository)\n'
746 )
747 )
747 )
748 )
748
749
749 shareopts = shareopts or {}
750 shareopts = shareopts or {}
750 sharepool = shareopts.get(b'pool')
751 sharepool = shareopts.get(b'pool')
751 sharenamemode = shareopts.get(b'mode')
752 sharenamemode = shareopts.get(b'mode')
752 if sharepool and islocal(dest):
753 if sharepool and islocal(dest):
753 sharepath = None
754 sharepath = None
754 if sharenamemode == b'identity':
755 if sharenamemode == b'identity':
755 # Resolve the name from the initial changeset in the remote
756 # Resolve the name from the initial changeset in the remote
756 # repository. This returns nullid when the remote is empty. It
757 # repository. This returns nullid when the remote is empty. It
757 # raises RepoLookupError if revision 0 is filtered or otherwise
758 # raises RepoLookupError if revision 0 is filtered or otherwise
758 # not available. If we fail to resolve, sharing is not enabled.
759 # not available. If we fail to resolve, sharing is not enabled.
759 try:
760 try:
760 with srcpeer.commandexecutor() as e:
761 with srcpeer.commandexecutor() as e:
761 rootnode = e.callcommand(
762 rootnode = e.callcommand(
762 b'lookup',
763 b'lookup',
763 {
764 {
764 b'key': b'0',
765 b'key': b'0',
765 },
766 },
766 ).result()
767 ).result()
767
768
768 if rootnode != sha1nodeconstants.nullid:
769 if rootnode != sha1nodeconstants.nullid:
769 sharepath = os.path.join(sharepool, hex(rootnode))
770 sharepath = os.path.join(sharepool, hex(rootnode))
770 else:
771 else:
771 ui.status(
772 ui.status(
772 _(
773 _(
773 b'(not using pooled storage: '
774 b'(not using pooled storage: '
774 b'remote appears to be empty)\n'
775 b'remote appears to be empty)\n'
775 )
776 )
776 )
777 )
777 except error.RepoLookupError:
778 except error.RepoLookupError:
778 ui.status(
779 ui.status(
779 _(
780 _(
780 b'(not using pooled storage: '
781 b'(not using pooled storage: '
781 b'unable to resolve identity of remote)\n'
782 b'unable to resolve identity of remote)\n'
782 )
783 )
783 )
784 )
784 elif sharenamemode == b'remote':
785 elif sharenamemode == b'remote':
785 sharepath = os.path.join(
786 sharepath = os.path.join(
786 sharepool, hex(hashutil.sha1(source).digest())
787 sharepool, hex(hashutil.sha1(source).digest())
787 )
788 )
788 else:
789 else:
789 raise error.Abort(
790 raise error.Abort(
790 _(b'unknown share naming mode: %s') % sharenamemode
791 _(b'unknown share naming mode: %s') % sharenamemode
791 )
792 )
792
793
793 # TODO this is a somewhat arbitrary restriction.
794 # TODO this is a somewhat arbitrary restriction.
794 if narrow:
795 if narrow:
795 ui.status(
796 ui.status(
796 _(b'(pooled storage not supported for narrow clones)\n')
797 _(b'(pooled storage not supported for narrow clones)\n')
797 )
798 )
798 sharepath = None
799 sharepath = None
799
800
800 if sharepath:
801 if sharepath:
801 return clonewithshare(
802 return clonewithshare(
802 ui,
803 ui,
803 peeropts,
804 peeropts,
804 sharepath,
805 sharepath,
805 source,
806 source,
806 srcpeer,
807 srcpeer,
807 dest,
808 dest,
808 pull=pull,
809 pull=pull,
809 rev=revs,
810 rev=revs,
810 update=update,
811 update=update,
811 stream=stream,
812 stream=stream,
812 )
813 )
813
814
814 srcrepo = srcpeer.local()
815 srcrepo = srcpeer.local()
815
816
816 abspath = origsource
817 abspath = origsource
817 if islocal(origsource):
818 if islocal(origsource):
818 abspath = util.abspath(urlutil.urllocalpath(origsource))
819 abspath = util.abspath(urlutil.urllocalpath(origsource))
819
820
820 if islocal(dest):
821 if islocal(dest):
821 if os.path.exists(dest):
822 if os.path.exists(dest):
822 # only clean up directories we create ourselves
823 # only clean up directories we create ourselves
823 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
824 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
824 cleandir = hgdir
825 cleandir = hgdir
825 else:
826 else:
826 cleandir = dest
827 cleandir = dest
827
828
828 copy = False
829 copy = False
829 if (
830 if (
830 srcrepo
831 srcrepo
831 and srcrepo.cancopy()
832 and srcrepo.cancopy()
832 and islocal(dest)
833 and islocal(dest)
833 and not phases.hassecret(srcrepo)
834 and not phases.hassecret(srcrepo)
834 ):
835 ):
835 copy = not pull and not revs
836 copy = not pull and not revs
836
837
837 # TODO this is a somewhat arbitrary restriction.
838 # TODO this is a somewhat arbitrary restriction.
838 if narrow:
839 if narrow:
839 copy = False
840 copy = False
840
841
841 if copy:
842 if copy:
842 try:
843 try:
843 # we use a lock here because if we race with commit, we
844 # we use a lock here because if we race with commit, we
844 # can end up with extra data in the cloned revlogs that's
845 # can end up with extra data in the cloned revlogs that's
845 # not pointed to by changesets, thus causing verify to
846 # not pointed to by changesets, thus causing verify to
846 # fail
847 # fail
847 srclock = srcrepo.lock(wait=False)
848 srclock = srcrepo.lock(wait=False)
848 except error.LockError:
849 except error.LockError:
849 copy = False
850 copy = False
850
851
851 if copy:
852 if copy:
852 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
853 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
853
854
854 destrootpath = urlutil.urllocalpath(dest)
855 destrootpath = urlutil.urllocalpath(dest)
855 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
856 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
856 localrepo.createrepository(
857 localrepo.createrepository(
857 ui,
858 ui,
858 destrootpath,
859 destrootpath,
859 requirements=dest_reqs,
860 requirements=dest_reqs,
860 )
861 )
861 destrepo = localrepo.makelocalrepository(ui, destrootpath)
862 destrepo = localrepo.makelocalrepository(ui, destrootpath)
862
863
863 destwlock = destrepo.wlock()
864 destwlock = destrepo.wlock()
864 destlock = destrepo.lock()
865 destlock = destrepo.lock()
865 from . import streamclone # avoid cycle
866 from . import streamclone # avoid cycle
866
867
867 streamclone.local_copy(srcrepo, destrepo)
868 streamclone.local_copy(srcrepo, destrepo)
868
869
869 # we need to re-init the repo after manually copying the data
870 # we need to re-init the repo after manually copying the data
870 # into it
871 # into it
871 destpeer = peer(srcrepo, peeropts, dest)
872 destpeer = peer(srcrepo, peeropts, dest)
872
873
873 # make the peer aware that is it already locked
874 # make the peer aware that is it already locked
874 #
875 #
875 # important:
876 # important:
876 #
877 #
877 # We still need to release that lock at the end of the function
878 # We still need to release that lock at the end of the function
878 destpeer.local()._lockref = weakref.ref(destlock)
879 destpeer.local()._lockref = weakref.ref(destlock)
879 destpeer.local()._wlockref = weakref.ref(destwlock)
880 destpeer.local()._wlockref = weakref.ref(destwlock)
880 # dirstate also needs to be copied because `_wlockref` has a reference
881 # dirstate also needs to be copied because `_wlockref` has a reference
881 # to it: this dirstate is saved to disk when the wlock is released
882 # to it: this dirstate is saved to disk when the wlock is released
882 destpeer.local().dirstate = destrepo.dirstate
883 destpeer.local().dirstate = destrepo.dirstate
883
884
884 srcrepo.hook(
885 srcrepo.hook(
885 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
886 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
886 )
887 )
887 else:
888 else:
888 try:
889 try:
889 # only pass ui when no srcrepo
890 # only pass ui when no srcrepo
890 destpeer = peer(
891 destpeer = peer(
891 srcrepo or ui,
892 srcrepo or ui,
892 peeropts,
893 peeropts,
893 dest,
894 dest,
894 create=True,
895 create=True,
895 createopts=createopts,
896 createopts=createopts,
896 )
897 )
897 except OSError as inst:
898 except OSError as inst:
898 if inst.errno == errno.EEXIST:
899 if inst.errno == errno.EEXIST:
899 cleandir = None
900 cleandir = None
900 raise error.Abort(
901 raise error.Abort(
901 _(b"destination '%s' already exists") % dest
902 _(b"destination '%s' already exists") % dest
902 )
903 )
903 raise
904 raise
904
905
905 if revs:
906 if revs:
906 if not srcpeer.capable(b'lookup'):
907 if not srcpeer.capable(b'lookup'):
907 raise error.Abort(
908 raise error.Abort(
908 _(
909 _(
909 b"src repository does not support "
910 b"src repository does not support "
910 b"revision lookup and so doesn't "
911 b"revision lookup and so doesn't "
911 b"support clone by revision"
912 b"support clone by revision"
912 )
913 )
913 )
914 )
914
915
915 # TODO this is batchable.
916 # TODO this is batchable.
916 remoterevs = []
917 remoterevs = []
917 for rev in revs:
918 for rev in revs:
918 with srcpeer.commandexecutor() as e:
919 with srcpeer.commandexecutor() as e:
919 remoterevs.append(
920 remoterevs.append(
920 e.callcommand(
921 e.callcommand(
921 b'lookup',
922 b'lookup',
922 {
923 {
923 b'key': rev,
924 b'key': rev,
924 },
925 },
925 ).result()
926 ).result()
926 )
927 )
927 revs = remoterevs
928 revs = remoterevs
928
929
929 checkout = revs[0]
930 checkout = revs[0]
930 else:
931 else:
931 revs = None
932 revs = None
932 local = destpeer.local()
933 local = destpeer.local()
933 if local:
934 if local:
934 if narrow:
935 if narrow:
935 with local.wlock(), local.lock():
936 with local.wlock(), local.lock():
936 local.setnarrowpats(storeincludepats, storeexcludepats)
937 local.setnarrowpats(storeincludepats, storeexcludepats)
937 narrowspec.copytoworkingcopy(local)
938 narrowspec.copytoworkingcopy(local)
938
939
939 u = urlutil.url(abspath)
940 u = urlutil.url(abspath)
940 defaulturl = bytes(u)
941 defaulturl = bytes(u)
941 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
942 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
942 if not stream:
943 if not stream:
943 if pull:
944 if pull:
944 stream = False
945 stream = False
945 else:
946 else:
946 stream = None
947 stream = None
947 # internal config: ui.quietbookmarkmove
948 # internal config: ui.quietbookmarkmove
948 overrides = {(b'ui', b'quietbookmarkmove'): True}
949 overrides = {(b'ui', b'quietbookmarkmove'): True}
949 with local.ui.configoverride(overrides, b'clone'):
950 with local.ui.configoverride(overrides, b'clone'):
950 exchange.pull(
951 exchange.pull(
951 local,
952 local,
952 srcpeer,
953 srcpeer,
953 heads=revs,
954 heads=revs,
954 streamclonerequested=stream,
955 streamclonerequested=stream,
955 includepats=storeincludepats,
956 includepats=storeincludepats,
956 excludepats=storeexcludepats,
957 excludepats=storeexcludepats,
957 depth=depth,
958 depth=depth,
958 )
959 )
959 elif srcrepo:
960 elif srcrepo:
960 # TODO lift restriction once exchange.push() accepts narrow
961 # TODO lift restriction once exchange.push() accepts narrow
961 # push.
962 # push.
962 if narrow:
963 if narrow:
963 raise error.Abort(
964 raise error.Abort(
964 _(
965 _(
965 b'narrow clone not available for '
966 b'narrow clone not available for '
966 b'remote destinations'
967 b'remote destinations'
967 )
968 )
968 )
969 )
969
970
970 exchange.push(
971 exchange.push(
971 srcrepo,
972 srcrepo,
972 destpeer,
973 destpeer,
973 revs=revs,
974 revs=revs,
974 bookmarks=srcrepo._bookmarks.keys(),
975 bookmarks=srcrepo._bookmarks.keys(),
975 )
976 )
976 else:
977 else:
977 raise error.Abort(
978 raise error.Abort(
978 _(b"clone from remote to remote not supported")
979 _(b"clone from remote to remote not supported")
979 )
980 )
980
981
981 cleandir = None
982 cleandir = None
982
983
983 destrepo = destpeer.local()
984 destrepo = destpeer.local()
984 if destrepo:
985 if destrepo:
985 template = uimod.samplehgrcs[b'cloned']
986 template = uimod.samplehgrcs[b'cloned']
986 u = urlutil.url(abspath)
987 u = urlutil.url(abspath)
987 u.passwd = None
988 u.passwd = None
988 defaulturl = bytes(u)
989 defaulturl = bytes(u)
989 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
990 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
990 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
991 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
991
992
992 if ui.configbool(b'experimental', b'remotenames'):
993 if ui.configbool(b'experimental', b'remotenames'):
993 logexchange.pullremotenames(destrepo, srcpeer)
994 logexchange.pullremotenames(destrepo, srcpeer)
994
995
995 if update:
996 if update:
996 if update is not True:
997 if update is not True:
997 with srcpeer.commandexecutor() as e:
998 with srcpeer.commandexecutor() as e:
998 checkout = e.callcommand(
999 checkout = e.callcommand(
999 b'lookup',
1000 b'lookup',
1000 {
1001 {
1001 b'key': update,
1002 b'key': update,
1002 },
1003 },
1003 ).result()
1004 ).result()
1004
1005
1005 uprev = None
1006 uprev = None
1006 status = None
1007 status = None
1007 if checkout is not None:
1008 if checkout is not None:
1008 # Some extensions (at least hg-git and hg-subversion) have
1009 # Some extensions (at least hg-git and hg-subversion) have
1009 # a peer.lookup() implementation that returns a name instead
1010 # a peer.lookup() implementation that returns a name instead
1010 # of a nodeid. We work around it here until we've figured
1011 # of a nodeid. We work around it here until we've figured
1011 # out a better solution.
1012 # out a better solution.
1012 if len(checkout) == 20 and checkout in destrepo:
1013 if len(checkout) == 20 and checkout in destrepo:
1013 uprev = checkout
1014 uprev = checkout
1014 elif scmutil.isrevsymbol(destrepo, checkout):
1015 elif scmutil.isrevsymbol(destrepo, checkout):
1015 uprev = scmutil.revsymbol(destrepo, checkout).node()
1016 uprev = scmutil.revsymbol(destrepo, checkout).node()
1016 else:
1017 else:
1017 if update is not True:
1018 if update is not True:
1018 try:
1019 try:
1019 uprev = destrepo.lookup(update)
1020 uprev = destrepo.lookup(update)
1020 except error.RepoLookupError:
1021 except error.RepoLookupError:
1021 pass
1022 pass
1022 if uprev is None:
1023 if uprev is None:
1023 try:
1024 try:
1024 if destrepo._activebookmark:
1025 if destrepo._activebookmark:
1025 uprev = destrepo.lookup(destrepo._activebookmark)
1026 uprev = destrepo.lookup(destrepo._activebookmark)
1026 update = destrepo._activebookmark
1027 update = destrepo._activebookmark
1027 else:
1028 else:
1028 uprev = destrepo._bookmarks[b'@']
1029 uprev = destrepo._bookmarks[b'@']
1029 update = b'@'
1030 update = b'@'
1030 bn = destrepo[uprev].branch()
1031 bn = destrepo[uprev].branch()
1031 if bn == b'default':
1032 if bn == b'default':
1032 status = _(b"updating to bookmark %s\n" % update)
1033 status = _(b"updating to bookmark %s\n" % update)
1033 else:
1034 else:
1034 status = (
1035 status = (
1035 _(b"updating to bookmark %s on branch %s\n")
1036 _(b"updating to bookmark %s on branch %s\n")
1036 ) % (update, bn)
1037 ) % (update, bn)
1037 except KeyError:
1038 except KeyError:
1038 try:
1039 try:
1039 uprev = destrepo.branchtip(b'default')
1040 uprev = destrepo.branchtip(b'default')
1040 except error.RepoLookupError:
1041 except error.RepoLookupError:
1041 uprev = destrepo.lookup(b'tip')
1042 uprev = destrepo.lookup(b'tip')
1042 if not status:
1043 if not status:
1043 bn = destrepo[uprev].branch()
1044 bn = destrepo[uprev].branch()
1044 status = _(b"updating to branch %s\n") % bn
1045 status = _(b"updating to branch %s\n") % bn
1045 destrepo.ui.status(status)
1046 destrepo.ui.status(status)
1046 _update(destrepo, uprev)
1047 _update(destrepo, uprev)
1047 if update in destrepo._bookmarks:
1048 if update in destrepo._bookmarks:
1048 bookmarks.activate(destrepo, update)
1049 bookmarks.activate(destrepo, update)
1049 if destlock is not None:
1050 if destlock is not None:
1050 release(destlock)
1051 release(destlock)
1051 if destwlock is not None:
1052 if destwlock is not None:
1052 release(destlock)
1053 release(destlock)
1053 # here is a tiny windows were someone could end up writing the
1054 # here is a tiny windows were someone could end up writing the
1054 # repository before the cache are sure to be warm. This is "fine"
1055 # repository before the cache are sure to be warm. This is "fine"
1055 # as the only "bad" outcome would be some slowness. That potential
1056 # as the only "bad" outcome would be some slowness. That potential
1056 # slowness already affect reader.
1057 # slowness already affect reader.
1057 with destrepo.lock():
1058 with destrepo.lock():
1058 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1059 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1059 finally:
1060 finally:
1060 release(srclock, destlock, destwlock)
1061 release(srclock, destlock, destwlock)
1061 if cleandir is not None:
1062 if cleandir is not None:
1062 shutil.rmtree(cleandir, True)
1063 shutil.rmtree(cleandir, True)
1063 if srcpeer is not None:
1064 if srcpeer is not None:
1064 srcpeer.close()
1065 srcpeer.close()
1065 if destpeer and destpeer.local() is None:
1066 if destpeer and destpeer.local() is None:
1066 destpeer.close()
1067 destpeer.close()
1067 return srcpeer, destpeer
1068 return srcpeer, destpeer
1068
1069
1069
1070
1070 def _showstats(repo, stats, quietempty=False):
1071 def _showstats(repo, stats, quietempty=False):
1071 if quietempty and stats.isempty():
1072 if quietempty and stats.isempty():
1072 return
1073 return
1073 repo.ui.status(
1074 repo.ui.status(
1074 _(
1075 _(
1075 b"%d files updated, %d files merged, "
1076 b"%d files updated, %d files merged, "
1076 b"%d files removed, %d files unresolved\n"
1077 b"%d files removed, %d files unresolved\n"
1077 )
1078 )
1078 % (
1079 % (
1079 stats.updatedcount,
1080 stats.updatedcount,
1080 stats.mergedcount,
1081 stats.mergedcount,
1081 stats.removedcount,
1082 stats.removedcount,
1082 stats.unresolvedcount,
1083 stats.unresolvedcount,
1083 )
1084 )
1084 )
1085 )
1085
1086
1086
1087
1087 def updaterepo(repo, node, overwrite, updatecheck=None):
1088 def updaterepo(repo, node, overwrite, updatecheck=None):
1088 """Update the working directory to node.
1089 """Update the working directory to node.
1089
1090
1090 When overwrite is set, changes are clobbered, merged else
1091 When overwrite is set, changes are clobbered, merged else
1091
1092
1092 returns stats (see pydoc mercurial.merge.applyupdates)"""
1093 returns stats (see pydoc mercurial.merge.applyupdates)"""
1093 repo.ui.deprecwarn(
1094 repo.ui.deprecwarn(
1094 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1095 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1095 b'5.7',
1096 b'5.7',
1096 )
1097 )
1097 return mergemod._update(
1098 return mergemod._update(
1098 repo,
1099 repo,
1099 node,
1100 node,
1100 branchmerge=False,
1101 branchmerge=False,
1101 force=overwrite,
1102 force=overwrite,
1102 labels=[b'working copy', b'destination'],
1103 labels=[b'working copy', b'destination'],
1103 updatecheck=updatecheck,
1104 updatecheck=updatecheck,
1104 )
1105 )
1105
1106
1106
1107
1107 def update(repo, node, quietempty=False, updatecheck=None):
1108 def update(repo, node, quietempty=False, updatecheck=None):
1108 """update the working directory to node"""
1109 """update the working directory to node"""
1109 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1110 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1110 _showstats(repo, stats, quietempty)
1111 _showstats(repo, stats, quietempty)
1111 if stats.unresolvedcount:
1112 if stats.unresolvedcount:
1112 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1113 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1113 return stats.unresolvedcount > 0
1114 return stats.unresolvedcount > 0
1114
1115
1115
1116
1116 # naming conflict in clone()
1117 # naming conflict in clone()
1117 _update = update
1118 _update = update
1118
1119
1119
1120
1120 def clean(repo, node, show_stats=True, quietempty=False):
1121 def clean(repo, node, show_stats=True, quietempty=False):
1121 """forcibly switch the working directory to node, clobbering changes"""
1122 """forcibly switch the working directory to node, clobbering changes"""
1122 stats = mergemod.clean_update(repo[node])
1123 stats = mergemod.clean_update(repo[node])
1123 assert stats.unresolvedcount == 0
1124 assert stats.unresolvedcount == 0
1124 if show_stats:
1125 if show_stats:
1125 _showstats(repo, stats, quietempty)
1126 _showstats(repo, stats, quietempty)
1126 return False
1127 return False
1127
1128
1128
1129
1129 # naming conflict in updatetotally()
1130 # naming conflict in updatetotally()
1130 _clean = clean
1131 _clean = clean
1131
1132
1132 _VALID_UPDATECHECKS = {
1133 _VALID_UPDATECHECKS = {
1133 mergemod.UPDATECHECK_ABORT,
1134 mergemod.UPDATECHECK_ABORT,
1134 mergemod.UPDATECHECK_NONE,
1135 mergemod.UPDATECHECK_NONE,
1135 mergemod.UPDATECHECK_LINEAR,
1136 mergemod.UPDATECHECK_LINEAR,
1136 mergemod.UPDATECHECK_NO_CONFLICT,
1137 mergemod.UPDATECHECK_NO_CONFLICT,
1137 }
1138 }
1138
1139
1139
1140
1140 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1141 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1141 """Update the working directory with extra care for non-file components
1142 """Update the working directory with extra care for non-file components
1142
1143
1143 This takes care of non-file components below:
1144 This takes care of non-file components below:
1144
1145
1145 :bookmark: might be advanced or (in)activated
1146 :bookmark: might be advanced or (in)activated
1146
1147
1147 This takes arguments below:
1148 This takes arguments below:
1148
1149
1149 :checkout: to which revision the working directory is updated
1150 :checkout: to which revision the working directory is updated
1150 :brev: a name, which might be a bookmark to be activated after updating
1151 :brev: a name, which might be a bookmark to be activated after updating
1151 :clean: whether changes in the working directory can be discarded
1152 :clean: whether changes in the working directory can be discarded
1152 :updatecheck: how to deal with a dirty working directory
1153 :updatecheck: how to deal with a dirty working directory
1153
1154
1154 Valid values for updatecheck are the UPDATECHECK_* constants
1155 Valid values for updatecheck are the UPDATECHECK_* constants
1155 defined in the merge module. Passing `None` will result in using the
1156 defined in the merge module. Passing `None` will result in using the
1156 configured default.
1157 configured default.
1157
1158
1158 * ABORT: abort if the working directory is dirty
1159 * ABORT: abort if the working directory is dirty
1159 * NONE: don't check (merge working directory changes into destination)
1160 * NONE: don't check (merge working directory changes into destination)
1160 * LINEAR: check that update is linear before merging working directory
1161 * LINEAR: check that update is linear before merging working directory
1161 changes into destination
1162 changes into destination
1162 * NO_CONFLICT: check that the update does not result in file merges
1163 * NO_CONFLICT: check that the update does not result in file merges
1163
1164
1164 This returns whether conflict is detected at updating or not.
1165 This returns whether conflict is detected at updating or not.
1165 """
1166 """
1166 if updatecheck is None:
1167 if updatecheck is None:
1167 updatecheck = ui.config(b'commands', b'update.check')
1168 updatecheck = ui.config(b'commands', b'update.check')
1168 if updatecheck not in _VALID_UPDATECHECKS:
1169 if updatecheck not in _VALID_UPDATECHECKS:
1169 # If not configured, or invalid value configured
1170 # If not configured, or invalid value configured
1170 updatecheck = mergemod.UPDATECHECK_LINEAR
1171 updatecheck = mergemod.UPDATECHECK_LINEAR
1171 if updatecheck not in _VALID_UPDATECHECKS:
1172 if updatecheck not in _VALID_UPDATECHECKS:
1172 raise ValueError(
1173 raise ValueError(
1173 r'Invalid updatecheck value %r (can accept %r)'
1174 r'Invalid updatecheck value %r (can accept %r)'
1174 % (updatecheck, _VALID_UPDATECHECKS)
1175 % (updatecheck, _VALID_UPDATECHECKS)
1175 )
1176 )
1176 with repo.wlock():
1177 with repo.wlock():
1177 movemarkfrom = None
1178 movemarkfrom = None
1178 warndest = False
1179 warndest = False
1179 if checkout is None:
1180 if checkout is None:
1180 updata = destutil.destupdate(repo, clean=clean)
1181 updata = destutil.destupdate(repo, clean=clean)
1181 checkout, movemarkfrom, brev = updata
1182 checkout, movemarkfrom, brev = updata
1182 warndest = True
1183 warndest = True
1183
1184
1184 if clean:
1185 if clean:
1185 ret = _clean(repo, checkout)
1186 ret = _clean(repo, checkout)
1186 else:
1187 else:
1187 if updatecheck == mergemod.UPDATECHECK_ABORT:
1188 if updatecheck == mergemod.UPDATECHECK_ABORT:
1188 cmdutil.bailifchanged(repo, merge=False)
1189 cmdutil.bailifchanged(repo, merge=False)
1189 updatecheck = mergemod.UPDATECHECK_NONE
1190 updatecheck = mergemod.UPDATECHECK_NONE
1190 ret = _update(repo, checkout, updatecheck=updatecheck)
1191 ret = _update(repo, checkout, updatecheck=updatecheck)
1191
1192
1192 if not ret and movemarkfrom:
1193 if not ret and movemarkfrom:
1193 if movemarkfrom == repo[b'.'].node():
1194 if movemarkfrom == repo[b'.'].node():
1194 pass # no-op update
1195 pass # no-op update
1195 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1196 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1196 b = ui.label(repo._activebookmark, b'bookmarks.active')
1197 b = ui.label(repo._activebookmark, b'bookmarks.active')
1197 ui.status(_(b"updating bookmark %s\n") % b)
1198 ui.status(_(b"updating bookmark %s\n") % b)
1198 else:
1199 else:
1199 # this can happen with a non-linear update
1200 # this can happen with a non-linear update
1200 b = ui.label(repo._activebookmark, b'bookmarks')
1201 b = ui.label(repo._activebookmark, b'bookmarks')
1201 ui.status(_(b"(leaving bookmark %s)\n") % b)
1202 ui.status(_(b"(leaving bookmark %s)\n") % b)
1202 bookmarks.deactivate(repo)
1203 bookmarks.deactivate(repo)
1203 elif brev in repo._bookmarks:
1204 elif brev in repo._bookmarks:
1204 if brev != repo._activebookmark:
1205 if brev != repo._activebookmark:
1205 b = ui.label(brev, b'bookmarks.active')
1206 b = ui.label(brev, b'bookmarks.active')
1206 ui.status(_(b"(activating bookmark %s)\n") % b)
1207 ui.status(_(b"(activating bookmark %s)\n") % b)
1207 bookmarks.activate(repo, brev)
1208 bookmarks.activate(repo, brev)
1208 elif brev:
1209 elif brev:
1209 if repo._activebookmark:
1210 if repo._activebookmark:
1210 b = ui.label(repo._activebookmark, b'bookmarks')
1211 b = ui.label(repo._activebookmark, b'bookmarks')
1211 ui.status(_(b"(leaving bookmark %s)\n") % b)
1212 ui.status(_(b"(leaving bookmark %s)\n") % b)
1212 bookmarks.deactivate(repo)
1213 bookmarks.deactivate(repo)
1213
1214
1214 if warndest:
1215 if warndest:
1215 destutil.statusotherdests(ui, repo)
1216 destutil.statusotherdests(ui, repo)
1216
1217
1217 return ret
1218 return ret
1218
1219
1219
1220
1220 def merge(
1221 def merge(
1221 ctx,
1222 ctx,
1222 force=False,
1223 force=False,
1223 remind=True,
1224 remind=True,
1224 labels=None,
1225 labels=None,
1225 ):
1226 ):
1226 """Branch merge with node, resolving changes. Return true if any
1227 """Branch merge with node, resolving changes. Return true if any
1227 unresolved conflicts."""
1228 unresolved conflicts."""
1228 repo = ctx.repo()
1229 repo = ctx.repo()
1229 stats = mergemod.merge(ctx, force=force, labels=labels)
1230 stats = mergemod.merge(ctx, force=force, labels=labels)
1230 _showstats(repo, stats)
1231 _showstats(repo, stats)
1231 if stats.unresolvedcount:
1232 if stats.unresolvedcount:
1232 repo.ui.status(
1233 repo.ui.status(
1233 _(
1234 _(
1234 b"use 'hg resolve' to retry unresolved file merges "
1235 b"use 'hg resolve' to retry unresolved file merges "
1235 b"or 'hg merge --abort' to abandon\n"
1236 b"or 'hg merge --abort' to abandon\n"
1236 )
1237 )
1237 )
1238 )
1238 elif remind:
1239 elif remind:
1239 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1240 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1240 return stats.unresolvedcount > 0
1241 return stats.unresolvedcount > 0
1241
1242
1242
1243
1243 def abortmerge(ui, repo):
1244 def abortmerge(ui, repo):
1244 ms = mergestatemod.mergestate.read(repo)
1245 ms = mergestatemod.mergestate.read(repo)
1245 if ms.active():
1246 if ms.active():
1246 # there were conflicts
1247 # there were conflicts
1247 node = ms.localctx.hex()
1248 node = ms.localctx.hex()
1248 else:
1249 else:
1249 # there were no conficts, mergestate was not stored
1250 # there were no conficts, mergestate was not stored
1250 node = repo[b'.'].hex()
1251 node = repo[b'.'].hex()
1251
1252
1252 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1253 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1253 stats = mergemod.clean_update(repo[node])
1254 stats = mergemod.clean_update(repo[node])
1254 assert stats.unresolvedcount == 0
1255 assert stats.unresolvedcount == 0
1255 _showstats(repo, stats)
1256 _showstats(repo, stats)
1256
1257
1257
1258
1258 def _incoming(
1259 def _incoming(
1259 displaychlist,
1260 displaychlist,
1260 subreporecurse,
1261 subreporecurse,
1261 ui,
1262 ui,
1262 repo,
1263 repo,
1263 source,
1264 source,
1264 opts,
1265 opts,
1265 buffered=False,
1266 buffered=False,
1266 subpath=None,
1267 subpath=None,
1267 ):
1268 ):
1268 """
1269 """
1269 Helper for incoming / gincoming.
1270 Helper for incoming / gincoming.
1270 displaychlist gets called with
1271 displaychlist gets called with
1271 (remoterepo, incomingchangesetlist, displayer) parameters,
1272 (remoterepo, incomingchangesetlist, displayer) parameters,
1272 and is supposed to contain only code that can't be unified.
1273 and is supposed to contain only code that can't be unified.
1273 """
1274 """
1274 srcs = urlutil.get_pull_paths(repo, ui, [source])
1275 srcs = urlutil.get_pull_paths(repo, ui, [source])
1275 srcs = list(srcs)
1276 srcs = list(srcs)
1276 if len(srcs) != 1:
1277 if len(srcs) != 1:
1277 msg = _(b'for now, incoming supports only a single source, %d provided')
1278 msg = _(b'for now, incoming supports only a single source, %d provided')
1278 msg %= len(srcs)
1279 msg %= len(srcs)
1279 raise error.Abort(msg)
1280 raise error.Abort(msg)
1280 path = srcs[0]
1281 path = srcs[0]
1281 source, branches = urlutil.parseurl(path.rawloc, opts.get(b'branch'))
1282 source, branches = urlutil.parseurl(path.rawloc, opts.get(b'branch'))
1282 if subpath is not None:
1283 if subpath is not None:
1283 subpath = urlutil.url(subpath)
1284 subpath = urlutil.url(subpath)
1284 if subpath.isabs():
1285 if subpath.isabs():
1285 source = bytes(subpath)
1286 source = bytes(subpath)
1286 else:
1287 else:
1287 p = urlutil.url(source)
1288 p = urlutil.url(source)
1288 p.path = os.path.normpath(b'%s/%s' % (p.path, subpath))
1289 if p.islocal():
1290 normpath = os.path.normpath
1291 else:
1292 normpath = posixpath.normpath
1293 p.path = normpath(b'%s/%s' % (p.path, subpath))
1289 source = bytes(p)
1294 source = bytes(p)
1290 other = peer(repo, opts, source)
1295 other = peer(repo, opts, source)
1291 cleanupfn = other.close
1296 cleanupfn = other.close
1292 try:
1297 try:
1293 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source))
1298 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source))
1294 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1299 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1295
1300
1296 if revs:
1301 if revs:
1297 revs = [other.lookup(rev) for rev in revs]
1302 revs = [other.lookup(rev) for rev in revs]
1298 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1303 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1299 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1304 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1300 )
1305 )
1301
1306
1302 if not chlist:
1307 if not chlist:
1303 ui.status(_(b"no changes found\n"))
1308 ui.status(_(b"no changes found\n"))
1304 return subreporecurse()
1309 return subreporecurse()
1305 ui.pager(b'incoming')
1310 ui.pager(b'incoming')
1306 displayer = logcmdutil.changesetdisplayer(
1311 displayer = logcmdutil.changesetdisplayer(
1307 ui, other, opts, buffered=buffered
1312 ui, other, opts, buffered=buffered
1308 )
1313 )
1309 displaychlist(other, chlist, displayer)
1314 displaychlist(other, chlist, displayer)
1310 displayer.close()
1315 displayer.close()
1311 finally:
1316 finally:
1312 cleanupfn()
1317 cleanupfn()
1313 subreporecurse()
1318 subreporecurse()
1314 return 0 # exit code is zero since we found incoming changes
1319 return 0 # exit code is zero since we found incoming changes
1315
1320
1316
1321
1317 def incoming(ui, repo, source, opts, subpath=None):
1322 def incoming(ui, repo, source, opts, subpath=None):
1318 def subreporecurse():
1323 def subreporecurse():
1319 ret = 1
1324 ret = 1
1320 if opts.get(b'subrepos'):
1325 if opts.get(b'subrepos'):
1321 ctx = repo[None]
1326 ctx = repo[None]
1322 for subpath in sorted(ctx.substate):
1327 for subpath in sorted(ctx.substate):
1323 sub = ctx.sub(subpath)
1328 sub = ctx.sub(subpath)
1324 ret = min(ret, sub.incoming(ui, source, opts))
1329 ret = min(ret, sub.incoming(ui, source, opts))
1325 return ret
1330 return ret
1326
1331
1327 def display(other, chlist, displayer):
1332 def display(other, chlist, displayer):
1328 limit = logcmdutil.getlimit(opts)
1333 limit = logcmdutil.getlimit(opts)
1329 if opts.get(b'newest_first'):
1334 if opts.get(b'newest_first'):
1330 chlist.reverse()
1335 chlist.reverse()
1331 count = 0
1336 count = 0
1332 for n in chlist:
1337 for n in chlist:
1333 if limit is not None and count >= limit:
1338 if limit is not None and count >= limit:
1334 break
1339 break
1335 parents = [
1340 parents = [
1336 p for p in other.changelog.parents(n) if p != repo.nullid
1341 p for p in other.changelog.parents(n) if p != repo.nullid
1337 ]
1342 ]
1338 if opts.get(b'no_merges') and len(parents) == 2:
1343 if opts.get(b'no_merges') and len(parents) == 2:
1339 continue
1344 continue
1340 count += 1
1345 count += 1
1341 displayer.show(other[n])
1346 displayer.show(other[n])
1342
1347
1343 return _incoming(
1348 return _incoming(
1344 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1349 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1345 )
1350 )
1346
1351
1347
1352
1348 def _outgoing(ui, repo, dests, opts, subpath=None):
1353 def _outgoing(ui, repo, dests, opts, subpath=None):
1349 out = set()
1354 out = set()
1350 others = []
1355 others = []
1351 for path in urlutil.get_push_paths(repo, ui, dests):
1356 for path in urlutil.get_push_paths(repo, ui, dests):
1352 dest = path.pushloc or path.loc
1357 dest = path.pushloc or path.loc
1353 if subpath is not None:
1358 if subpath is not None:
1354 subpath = urlutil.url(subpath)
1359 subpath = urlutil.url(subpath)
1355 if subpath.isabs():
1360 if subpath.isabs():
1356 dest = bytes(subpath)
1361 dest = bytes(subpath)
1357 else:
1362 else:
1358 p = urlutil.url(dest)
1363 p = urlutil.url(dest)
1359 p.path = os.path.normpath(b'%s/%s' % (p.path, subpath))
1364 if p.islocal():
1365 normpath = os.path.normpath
1366 else:
1367 normpath = posixpath.normpath
1368 p.path = normpath(b'%s/%s' % (p.path, subpath))
1360 dest = bytes(p)
1369 dest = bytes(p)
1361 branches = path.branch, opts.get(b'branch') or []
1370 branches = path.branch, opts.get(b'branch') or []
1362
1371
1363 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1372 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1364 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1373 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1365 if revs:
1374 if revs:
1366 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1375 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1367
1376
1368 other = peer(repo, opts, dest)
1377 other = peer(repo, opts, dest)
1369 try:
1378 try:
1370 outgoing = discovery.findcommonoutgoing(
1379 outgoing = discovery.findcommonoutgoing(
1371 repo, other, revs, force=opts.get(b'force')
1380 repo, other, revs, force=opts.get(b'force')
1372 )
1381 )
1373 o = outgoing.missing
1382 o = outgoing.missing
1374 out.update(o)
1383 out.update(o)
1375 if not o:
1384 if not o:
1376 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1385 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1377 others.append(other)
1386 others.append(other)
1378 except: # re-raises
1387 except: # re-raises
1379 other.close()
1388 other.close()
1380 raise
1389 raise
1381 # make sure this is ordered by revision number
1390 # make sure this is ordered by revision number
1382 outgoing_revs = list(out)
1391 outgoing_revs = list(out)
1383 cl = repo.changelog
1392 cl = repo.changelog
1384 outgoing_revs.sort(key=cl.rev)
1393 outgoing_revs.sort(key=cl.rev)
1385 return outgoing_revs, others
1394 return outgoing_revs, others
1386
1395
1387
1396
1388 def _outgoing_recurse(ui, repo, dests, opts):
1397 def _outgoing_recurse(ui, repo, dests, opts):
1389 ret = 1
1398 ret = 1
1390 if opts.get(b'subrepos'):
1399 if opts.get(b'subrepos'):
1391 ctx = repo[None]
1400 ctx = repo[None]
1392 for subpath in sorted(ctx.substate):
1401 for subpath in sorted(ctx.substate):
1393 sub = ctx.sub(subpath)
1402 sub = ctx.sub(subpath)
1394 ret = min(ret, sub.outgoing(ui, dests, opts))
1403 ret = min(ret, sub.outgoing(ui, dests, opts))
1395 return ret
1404 return ret
1396
1405
1397
1406
1398 def _outgoing_filter(repo, revs, opts):
1407 def _outgoing_filter(repo, revs, opts):
1399 """apply revision filtering/ordering option for outgoing"""
1408 """apply revision filtering/ordering option for outgoing"""
1400 limit = logcmdutil.getlimit(opts)
1409 limit = logcmdutil.getlimit(opts)
1401 no_merges = opts.get(b'no_merges')
1410 no_merges = opts.get(b'no_merges')
1402 if opts.get(b'newest_first'):
1411 if opts.get(b'newest_first'):
1403 revs.reverse()
1412 revs.reverse()
1404 if limit is None and not no_merges:
1413 if limit is None and not no_merges:
1405 for r in revs:
1414 for r in revs:
1406 yield r
1415 yield r
1407 return
1416 return
1408
1417
1409 count = 0
1418 count = 0
1410 cl = repo.changelog
1419 cl = repo.changelog
1411 for n in revs:
1420 for n in revs:
1412 if limit is not None and count >= limit:
1421 if limit is not None and count >= limit:
1413 break
1422 break
1414 parents = [p for p in cl.parents(n) if p != repo.nullid]
1423 parents = [p for p in cl.parents(n) if p != repo.nullid]
1415 if no_merges and len(parents) == 2:
1424 if no_merges and len(parents) == 2:
1416 continue
1425 continue
1417 count += 1
1426 count += 1
1418 yield n
1427 yield n
1419
1428
1420
1429
1421 def outgoing(ui, repo, dests, opts, subpath=None):
1430 def outgoing(ui, repo, dests, opts, subpath=None):
1422 if opts.get(b'graph'):
1431 if opts.get(b'graph'):
1423 logcmdutil.checkunsupportedgraphflags([], opts)
1432 logcmdutil.checkunsupportedgraphflags([], opts)
1424 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1433 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1425 ret = 1
1434 ret = 1
1426 try:
1435 try:
1427 if o:
1436 if o:
1428 ret = 0
1437 ret = 0
1429
1438
1430 if opts.get(b'graph'):
1439 if opts.get(b'graph'):
1431 revdag = logcmdutil.graphrevs(repo, o, opts)
1440 revdag = logcmdutil.graphrevs(repo, o, opts)
1432 ui.pager(b'outgoing')
1441 ui.pager(b'outgoing')
1433 displayer = logcmdutil.changesetdisplayer(
1442 displayer = logcmdutil.changesetdisplayer(
1434 ui, repo, opts, buffered=True
1443 ui, repo, opts, buffered=True
1435 )
1444 )
1436 logcmdutil.displaygraph(
1445 logcmdutil.displaygraph(
1437 ui, repo, revdag, displayer, graphmod.asciiedges
1446 ui, repo, revdag, displayer, graphmod.asciiedges
1438 )
1447 )
1439 else:
1448 else:
1440 ui.pager(b'outgoing')
1449 ui.pager(b'outgoing')
1441 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1450 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1442 for n in _outgoing_filter(repo, o, opts):
1451 for n in _outgoing_filter(repo, o, opts):
1443 displayer.show(repo[n])
1452 displayer.show(repo[n])
1444 displayer.close()
1453 displayer.close()
1445 for oth in others:
1454 for oth in others:
1446 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1455 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1447 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1456 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1448 return ret # exit code is zero since we found outgoing changes
1457 return ret # exit code is zero since we found outgoing changes
1449 finally:
1458 finally:
1450 for oth in others:
1459 for oth in others:
1451 oth.close()
1460 oth.close()
1452
1461
1453
1462
1454 def verify(repo, level=None):
1463 def verify(repo, level=None):
1455 """verify the consistency of a repository"""
1464 """verify the consistency of a repository"""
1456 ret = verifymod.verify(repo, level=level)
1465 ret = verifymod.verify(repo, level=level)
1457
1466
1458 # Broken subrepo references in hidden csets don't seem worth worrying about,
1467 # Broken subrepo references in hidden csets don't seem worth worrying about,
1459 # since they can't be pushed/pulled, and --hidden can be used if they are a
1468 # since they can't be pushed/pulled, and --hidden can be used if they are a
1460 # concern.
1469 # concern.
1461
1470
1462 # pathto() is needed for -R case
1471 # pathto() is needed for -R case
1463 revs = repo.revs(
1472 revs = repo.revs(
1464 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1473 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1465 )
1474 )
1466
1475
1467 if revs:
1476 if revs:
1468 repo.ui.status(_(b'checking subrepo links\n'))
1477 repo.ui.status(_(b'checking subrepo links\n'))
1469 for rev in revs:
1478 for rev in revs:
1470 ctx = repo[rev]
1479 ctx = repo[rev]
1471 try:
1480 try:
1472 for subpath in ctx.substate:
1481 for subpath in ctx.substate:
1473 try:
1482 try:
1474 ret = (
1483 ret = (
1475 ctx.sub(subpath, allowcreate=False).verify() or ret
1484 ctx.sub(subpath, allowcreate=False).verify() or ret
1476 )
1485 )
1477 except error.RepoError as e:
1486 except error.RepoError as e:
1478 repo.ui.warn(b'%d: %s\n' % (rev, e))
1487 repo.ui.warn(b'%d: %s\n' % (rev, e))
1479 except Exception:
1488 except Exception:
1480 repo.ui.warn(
1489 repo.ui.warn(
1481 _(b'.hgsubstate is corrupt in revision %s\n')
1490 _(b'.hgsubstate is corrupt in revision %s\n')
1482 % short(ctx.node())
1491 % short(ctx.node())
1483 )
1492 )
1484
1493
1485 return ret
1494 return ret
1486
1495
1487
1496
1488 def remoteui(src, opts):
1497 def remoteui(src, opts):
1489 """build a remote ui from ui or repo and opts"""
1498 """build a remote ui from ui or repo and opts"""
1490 if util.safehasattr(src, b'baseui'): # looks like a repository
1499 if util.safehasattr(src, b'baseui'): # looks like a repository
1491 dst = src.baseui.copy() # drop repo-specific config
1500 dst = src.baseui.copy() # drop repo-specific config
1492 src = src.ui # copy target options from repo
1501 src = src.ui # copy target options from repo
1493 else: # assume it's a global ui object
1502 else: # assume it's a global ui object
1494 dst = src.copy() # keep all global options
1503 dst = src.copy() # keep all global options
1495
1504
1496 # copy ssh-specific options
1505 # copy ssh-specific options
1497 for o in b'ssh', b'remotecmd':
1506 for o in b'ssh', b'remotecmd':
1498 v = opts.get(o) or src.config(b'ui', o)
1507 v = opts.get(o) or src.config(b'ui', o)
1499 if v:
1508 if v:
1500 dst.setconfig(b"ui", o, v, b'copied')
1509 dst.setconfig(b"ui", o, v, b'copied')
1501
1510
1502 # copy bundle-specific options
1511 # copy bundle-specific options
1503 r = src.config(b'bundle', b'mainreporoot')
1512 r = src.config(b'bundle', b'mainreporoot')
1504 if r:
1513 if r:
1505 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1514 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1506
1515
1507 # copy selected local settings to the remote ui
1516 # copy selected local settings to the remote ui
1508 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1517 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1509 for key, val in src.configitems(sect):
1518 for key, val in src.configitems(sect):
1510 dst.setconfig(sect, key, val, b'copied')
1519 dst.setconfig(sect, key, val, b'copied')
1511 v = src.config(b'web', b'cacerts')
1520 v = src.config(b'web', b'cacerts')
1512 if v:
1521 if v:
1513 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1522 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1514
1523
1515 return dst
1524 return dst
1516
1525
1517
1526
1518 # Files of interest
1527 # Files of interest
1519 # Used to check if the repository has changed looking at mtime and size of
1528 # Used to check if the repository has changed looking at mtime and size of
1520 # these files.
1529 # these files.
1521 foi = [
1530 foi = [
1522 (b'spath', b'00changelog.i'),
1531 (b'spath', b'00changelog.i'),
1523 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1532 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1524 (b'spath', b'obsstore'),
1533 (b'spath', b'obsstore'),
1525 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1534 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1526 ]
1535 ]
1527
1536
1528
1537
1529 class cachedlocalrepo(object):
1538 class cachedlocalrepo(object):
1530 """Holds a localrepository that can be cached and reused."""
1539 """Holds a localrepository that can be cached and reused."""
1531
1540
1532 def __init__(self, repo):
1541 def __init__(self, repo):
1533 """Create a new cached repo from an existing repo.
1542 """Create a new cached repo from an existing repo.
1534
1543
1535 We assume the passed in repo was recently created. If the
1544 We assume the passed in repo was recently created. If the
1536 repo has changed between when it was created and when it was
1545 repo has changed between when it was created and when it was
1537 turned into a cache, it may not refresh properly.
1546 turned into a cache, it may not refresh properly.
1538 """
1547 """
1539 assert isinstance(repo, localrepo.localrepository)
1548 assert isinstance(repo, localrepo.localrepository)
1540 self._repo = repo
1549 self._repo = repo
1541 self._state, self.mtime = self._repostate()
1550 self._state, self.mtime = self._repostate()
1542 self._filtername = repo.filtername
1551 self._filtername = repo.filtername
1543
1552
1544 def fetch(self):
1553 def fetch(self):
1545 """Refresh (if necessary) and return a repository.
1554 """Refresh (if necessary) and return a repository.
1546
1555
1547 If the cached instance is out of date, it will be recreated
1556 If the cached instance is out of date, it will be recreated
1548 automatically and returned.
1557 automatically and returned.
1549
1558
1550 Returns a tuple of the repo and a boolean indicating whether a new
1559 Returns a tuple of the repo and a boolean indicating whether a new
1551 repo instance was created.
1560 repo instance was created.
1552 """
1561 """
1553 # We compare the mtimes and sizes of some well-known files to
1562 # We compare the mtimes and sizes of some well-known files to
1554 # determine if the repo changed. This is not precise, as mtimes
1563 # determine if the repo changed. This is not precise, as mtimes
1555 # are susceptible to clock skew and imprecise filesystems and
1564 # are susceptible to clock skew and imprecise filesystems and
1556 # file content can change while maintaining the same size.
1565 # file content can change while maintaining the same size.
1557
1566
1558 state, mtime = self._repostate()
1567 state, mtime = self._repostate()
1559 if state == self._state:
1568 if state == self._state:
1560 return self._repo, False
1569 return self._repo, False
1561
1570
1562 repo = repository(self._repo.baseui, self._repo.url())
1571 repo = repository(self._repo.baseui, self._repo.url())
1563 if self._filtername:
1572 if self._filtername:
1564 self._repo = repo.filtered(self._filtername)
1573 self._repo = repo.filtered(self._filtername)
1565 else:
1574 else:
1566 self._repo = repo.unfiltered()
1575 self._repo = repo.unfiltered()
1567 self._state = state
1576 self._state = state
1568 self.mtime = mtime
1577 self.mtime = mtime
1569
1578
1570 return self._repo, True
1579 return self._repo, True
1571
1580
1572 def _repostate(self):
1581 def _repostate(self):
1573 state = []
1582 state = []
1574 maxmtime = -1
1583 maxmtime = -1
1575 for attr, fname in foi:
1584 for attr, fname in foi:
1576 prefix = getattr(self._repo, attr)
1585 prefix = getattr(self._repo, attr)
1577 p = os.path.join(prefix, fname)
1586 p = os.path.join(prefix, fname)
1578 try:
1587 try:
1579 st = os.stat(p)
1588 st = os.stat(p)
1580 except OSError:
1589 except OSError:
1581 st = os.stat(prefix)
1590 st = os.stat(prefix)
1582 state.append((st[stat.ST_MTIME], st.st_size))
1591 state.append((st[stat.ST_MTIME], st.st_size))
1583 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1592 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1584
1593
1585 return tuple(state), maxmtime
1594 return tuple(state), maxmtime
1586
1595
1587 def copy(self):
1596 def copy(self):
1588 """Obtain a copy of this class instance.
1597 """Obtain a copy of this class instance.
1589
1598
1590 A new localrepository instance is obtained. The new instance should be
1599 A new localrepository instance is obtained. The new instance should be
1591 completely independent of the original.
1600 completely independent of the original.
1592 """
1601 """
1593 repo = repository(self._repo.baseui, self._repo.origroot)
1602 repo = repository(self._repo.baseui, self._repo.origroot)
1594 if self._filtername:
1603 if self._filtername:
1595 repo = repo.filtered(self._filtername)
1604 repo = repo.filtered(self._filtername)
1596 else:
1605 else:
1597 repo = repo.unfiltered()
1606 repo = repo.unfiltered()
1598 c = cachedlocalrepo(repo)
1607 c = cachedlocalrepo(repo)
1599 c._state = self._state
1608 c._state = self._state
1600 c.mtime = self.mtime
1609 c.mtime = self.mtime
1601 return c
1610 return c
General Comments 0
You need to be logged in to leave comments. Login now