##// END OF EJS Templates
verify: introduce a notion of "level"...
marmoute -
r42331:57539e5e default
parent child Browse files
Show More
@@ -1,1234 +1,1234
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13 import os
13 import os
14 import shutil
14 import shutil
15 import stat
15 import stat
16
16
17 from .i18n import _
17 from .i18n import _
18 from .node import (
18 from .node import (
19 nullid,
19 nullid,
20 )
20 )
21
21
22 from . import (
22 from . import (
23 bookmarks,
23 bookmarks,
24 bundlerepo,
24 bundlerepo,
25 cacheutil,
25 cacheutil,
26 cmdutil,
26 cmdutil,
27 destutil,
27 destutil,
28 discovery,
28 discovery,
29 error,
29 error,
30 exchange,
30 exchange,
31 extensions,
31 extensions,
32 httppeer,
32 httppeer,
33 localrepo,
33 localrepo,
34 lock,
34 lock,
35 logcmdutil,
35 logcmdutil,
36 logexchange,
36 logexchange,
37 merge as mergemod,
37 merge as mergemod,
38 narrowspec,
38 narrowspec,
39 node,
39 node,
40 phases,
40 phases,
41 pycompat,
41 pycompat,
42 repository as repositorymod,
42 repository as repositorymod,
43 scmutil,
43 scmutil,
44 sshpeer,
44 sshpeer,
45 statichttprepo,
45 statichttprepo,
46 ui as uimod,
46 ui as uimod,
47 unionrepo,
47 unionrepo,
48 url,
48 url,
49 util,
49 util,
50 verify as verifymod,
50 verify as verifymod,
51 vfs as vfsmod,
51 vfs as vfsmod,
52 )
52 )
53
53
54 release = lock.release
54 release = lock.release
55
55
56 # shared features
56 # shared features
57 sharedbookmarks = 'bookmarks'
57 sharedbookmarks = 'bookmarks'
58
58
59 def _local(path):
59 def _local(path):
60 path = util.expandpath(util.urllocalpath(path))
60 path = util.expandpath(util.urllocalpath(path))
61
61
62 try:
62 try:
63 isfile = os.path.isfile(path)
63 isfile = os.path.isfile(path)
64 # Python 2 raises TypeError, Python 3 ValueError.
64 # Python 2 raises TypeError, Python 3 ValueError.
65 except (TypeError, ValueError) as e:
65 except (TypeError, ValueError) as e:
66 raise error.Abort(_('invalid path %s: %s') % (
66 raise error.Abort(_('invalid path %s: %s') % (
67 path, pycompat.bytestr(e)))
67 path, pycompat.bytestr(e)))
68
68
69 return isfile and bundlerepo or localrepo
69 return isfile and bundlerepo or localrepo
70
70
71 def addbranchrevs(lrepo, other, branches, revs):
71 def addbranchrevs(lrepo, other, branches, revs):
72 peer = other.peer() # a courtesy to callers using a localrepo for other
72 peer = other.peer() # a courtesy to callers using a localrepo for other
73 hashbranch, branches = branches
73 hashbranch, branches = branches
74 if not hashbranch and not branches:
74 if not hashbranch and not branches:
75 x = revs or None
75 x = revs or None
76 if revs:
76 if revs:
77 y = revs[0]
77 y = revs[0]
78 else:
78 else:
79 y = None
79 y = None
80 return x, y
80 return x, y
81 if revs:
81 if revs:
82 revs = list(revs)
82 revs = list(revs)
83 else:
83 else:
84 revs = []
84 revs = []
85
85
86 if not peer.capable('branchmap'):
86 if not peer.capable('branchmap'):
87 if branches:
87 if branches:
88 raise error.Abort(_("remote branch lookup not supported"))
88 raise error.Abort(_("remote branch lookup not supported"))
89 revs.append(hashbranch)
89 revs.append(hashbranch)
90 return revs, revs[0]
90 return revs, revs[0]
91
91
92 with peer.commandexecutor() as e:
92 with peer.commandexecutor() as e:
93 branchmap = e.callcommand('branchmap', {}).result()
93 branchmap = e.callcommand('branchmap', {}).result()
94
94
95 def primary(branch):
95 def primary(branch):
96 if branch == '.':
96 if branch == '.':
97 if not lrepo:
97 if not lrepo:
98 raise error.Abort(_("dirstate branch not accessible"))
98 raise error.Abort(_("dirstate branch not accessible"))
99 branch = lrepo.dirstate.branch()
99 branch = lrepo.dirstate.branch()
100 if branch in branchmap:
100 if branch in branchmap:
101 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
101 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
102 return True
102 return True
103 else:
103 else:
104 return False
104 return False
105
105
106 for branch in branches:
106 for branch in branches:
107 if not primary(branch):
107 if not primary(branch):
108 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
108 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
109 if hashbranch:
109 if hashbranch:
110 if not primary(hashbranch):
110 if not primary(hashbranch):
111 revs.append(hashbranch)
111 revs.append(hashbranch)
112 return revs, revs[0]
112 return revs, revs[0]
113
113
114 def parseurl(path, branches=None):
114 def parseurl(path, branches=None):
115 '''parse url#branch, returning (url, (branch, branches))'''
115 '''parse url#branch, returning (url, (branch, branches))'''
116
116
117 u = util.url(path)
117 u = util.url(path)
118 branch = None
118 branch = None
119 if u.fragment:
119 if u.fragment:
120 branch = u.fragment
120 branch = u.fragment
121 u.fragment = None
121 u.fragment = None
122 return bytes(u), (branch, branches or [])
122 return bytes(u), (branch, branches or [])
123
123
124 schemes = {
124 schemes = {
125 'bundle': bundlerepo,
125 'bundle': bundlerepo,
126 'union': unionrepo,
126 'union': unionrepo,
127 'file': _local,
127 'file': _local,
128 'http': httppeer,
128 'http': httppeer,
129 'https': httppeer,
129 'https': httppeer,
130 'ssh': sshpeer,
130 'ssh': sshpeer,
131 'static-http': statichttprepo,
131 'static-http': statichttprepo,
132 }
132 }
133
133
134 def _peerlookup(path):
134 def _peerlookup(path):
135 u = util.url(path)
135 u = util.url(path)
136 scheme = u.scheme or 'file'
136 scheme = u.scheme or 'file'
137 thing = schemes.get(scheme) or schemes['file']
137 thing = schemes.get(scheme) or schemes['file']
138 try:
138 try:
139 return thing(path)
139 return thing(path)
140 except TypeError:
140 except TypeError:
141 # we can't test callable(thing) because 'thing' can be an unloaded
141 # we can't test callable(thing) because 'thing' can be an unloaded
142 # module that implements __call__
142 # module that implements __call__
143 if not util.safehasattr(thing, 'instance'):
143 if not util.safehasattr(thing, 'instance'):
144 raise
144 raise
145 return thing
145 return thing
146
146
147 def islocal(repo):
147 def islocal(repo):
148 '''return true if repo (or path pointing to repo) is local'''
148 '''return true if repo (or path pointing to repo) is local'''
149 if isinstance(repo, bytes):
149 if isinstance(repo, bytes):
150 try:
150 try:
151 return _peerlookup(repo).islocal(repo)
151 return _peerlookup(repo).islocal(repo)
152 except AttributeError:
152 except AttributeError:
153 return False
153 return False
154 return repo.local()
154 return repo.local()
155
155
156 def openpath(ui, path, sendaccept=True):
156 def openpath(ui, path, sendaccept=True):
157 '''open path with open if local, url.open if remote'''
157 '''open path with open if local, url.open if remote'''
158 pathurl = util.url(path, parsequery=False, parsefragment=False)
158 pathurl = util.url(path, parsequery=False, parsefragment=False)
159 if pathurl.islocal():
159 if pathurl.islocal():
160 return util.posixfile(pathurl.localpath(), 'rb')
160 return util.posixfile(pathurl.localpath(), 'rb')
161 else:
161 else:
162 return url.open(ui, path, sendaccept=sendaccept)
162 return url.open(ui, path, sendaccept=sendaccept)
163
163
164 # a list of (ui, repo) functions called for wire peer initialization
164 # a list of (ui, repo) functions called for wire peer initialization
165 wirepeersetupfuncs = []
165 wirepeersetupfuncs = []
166
166
167 def _peerorrepo(ui, path, create=False, presetupfuncs=None,
167 def _peerorrepo(ui, path, create=False, presetupfuncs=None,
168 intents=None, createopts=None):
168 intents=None, createopts=None):
169 """return a repository object for the specified path"""
169 """return a repository object for the specified path"""
170 obj = _peerlookup(path).instance(ui, path, create, intents=intents,
170 obj = _peerlookup(path).instance(ui, path, create, intents=intents,
171 createopts=createopts)
171 createopts=createopts)
172 ui = getattr(obj, "ui", ui)
172 ui = getattr(obj, "ui", ui)
173 for f in presetupfuncs or []:
173 for f in presetupfuncs or []:
174 f(ui, obj)
174 f(ui, obj)
175 ui.log(b'extension', b'- executing reposetup hooks\n')
175 ui.log(b'extension', b'- executing reposetup hooks\n')
176 with util.timedcm('all reposetup') as allreposetupstats:
176 with util.timedcm('all reposetup') as allreposetupstats:
177 for name, module in extensions.extensions(ui):
177 for name, module in extensions.extensions(ui):
178 ui.log(b'extension', b' - running reposetup for %s\n', name)
178 ui.log(b'extension', b' - running reposetup for %s\n', name)
179 hook = getattr(module, 'reposetup', None)
179 hook = getattr(module, 'reposetup', None)
180 if hook:
180 if hook:
181 with util.timedcm('reposetup %r', name) as stats:
181 with util.timedcm('reposetup %r', name) as stats:
182 hook(ui, obj)
182 hook(ui, obj)
183 ui.log(b'extension', b' > reposetup for %s took %s\n',
183 ui.log(b'extension', b' > reposetup for %s took %s\n',
184 name, stats)
184 name, stats)
185 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
185 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
186 if not obj.local():
186 if not obj.local():
187 for f in wirepeersetupfuncs:
187 for f in wirepeersetupfuncs:
188 f(ui, obj)
188 f(ui, obj)
189 return obj
189 return obj
190
190
191 def repository(ui, path='', create=False, presetupfuncs=None, intents=None,
191 def repository(ui, path='', create=False, presetupfuncs=None, intents=None,
192 createopts=None):
192 createopts=None):
193 """return a repository object for the specified path"""
193 """return a repository object for the specified path"""
194 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs,
194 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs,
195 intents=intents, createopts=createopts)
195 intents=intents, createopts=createopts)
196 repo = peer.local()
196 repo = peer.local()
197 if not repo:
197 if not repo:
198 raise error.Abort(_("repository '%s' is not local") %
198 raise error.Abort(_("repository '%s' is not local") %
199 (path or peer.url()))
199 (path or peer.url()))
200 return repo.filtered('visible')
200 return repo.filtered('visible')
201
201
202 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
202 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
203 '''return a repository peer for the specified path'''
203 '''return a repository peer for the specified path'''
204 rui = remoteui(uiorrepo, opts)
204 rui = remoteui(uiorrepo, opts)
205 return _peerorrepo(rui, path, create, intents=intents,
205 return _peerorrepo(rui, path, create, intents=intents,
206 createopts=createopts).peer()
206 createopts=createopts).peer()
207
207
208 def defaultdest(source):
208 def defaultdest(source):
209 '''return default destination of clone if none is given
209 '''return default destination of clone if none is given
210
210
211 >>> defaultdest(b'foo')
211 >>> defaultdest(b'foo')
212 'foo'
212 'foo'
213 >>> defaultdest(b'/foo/bar')
213 >>> defaultdest(b'/foo/bar')
214 'bar'
214 'bar'
215 >>> defaultdest(b'/')
215 >>> defaultdest(b'/')
216 ''
216 ''
217 >>> defaultdest(b'')
217 >>> defaultdest(b'')
218 ''
218 ''
219 >>> defaultdest(b'http://example.org/')
219 >>> defaultdest(b'http://example.org/')
220 ''
220 ''
221 >>> defaultdest(b'http://example.org/foo/')
221 >>> defaultdest(b'http://example.org/foo/')
222 'foo'
222 'foo'
223 '''
223 '''
224 path = util.url(source).path
224 path = util.url(source).path
225 if not path:
225 if not path:
226 return ''
226 return ''
227 return os.path.basename(os.path.normpath(path))
227 return os.path.basename(os.path.normpath(path))
228
228
229 def sharedreposource(repo):
229 def sharedreposource(repo):
230 """Returns repository object for source repository of a shared repo.
230 """Returns repository object for source repository of a shared repo.
231
231
232 If repo is not a shared repository, returns None.
232 If repo is not a shared repository, returns None.
233 """
233 """
234 if repo.sharedpath == repo.path:
234 if repo.sharedpath == repo.path:
235 return None
235 return None
236
236
237 if util.safehasattr(repo, 'srcrepo') and repo.srcrepo:
237 if util.safehasattr(repo, 'srcrepo') and repo.srcrepo:
238 return repo.srcrepo
238 return repo.srcrepo
239
239
240 # the sharedpath always ends in the .hg; we want the path to the repo
240 # the sharedpath always ends in the .hg; we want the path to the repo
241 source = repo.vfs.split(repo.sharedpath)[0]
241 source = repo.vfs.split(repo.sharedpath)[0]
242 srcurl, branches = parseurl(source)
242 srcurl, branches = parseurl(source)
243 srcrepo = repository(repo.ui, srcurl)
243 srcrepo = repository(repo.ui, srcurl)
244 repo.srcrepo = srcrepo
244 repo.srcrepo = srcrepo
245 return srcrepo
245 return srcrepo
246
246
247 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
247 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
248 relative=False):
248 relative=False):
249 '''create a shared repository'''
249 '''create a shared repository'''
250
250
251 if not islocal(source):
251 if not islocal(source):
252 raise error.Abort(_('can only share local repositories'))
252 raise error.Abort(_('can only share local repositories'))
253
253
254 if not dest:
254 if not dest:
255 dest = defaultdest(source)
255 dest = defaultdest(source)
256 else:
256 else:
257 dest = ui.expandpath(dest)
257 dest = ui.expandpath(dest)
258
258
259 if isinstance(source, bytes):
259 if isinstance(source, bytes):
260 origsource = ui.expandpath(source)
260 origsource = ui.expandpath(source)
261 source, branches = parseurl(origsource)
261 source, branches = parseurl(origsource)
262 srcrepo = repository(ui, source)
262 srcrepo = repository(ui, source)
263 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
263 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
264 else:
264 else:
265 srcrepo = source.local()
265 srcrepo = source.local()
266 checkout = None
266 checkout = None
267
267
268 shareditems = set()
268 shareditems = set()
269 if bookmarks:
269 if bookmarks:
270 shareditems.add(sharedbookmarks)
270 shareditems.add(sharedbookmarks)
271
271
272 r = repository(ui, dest, create=True, createopts={
272 r = repository(ui, dest, create=True, createopts={
273 'sharedrepo': srcrepo,
273 'sharedrepo': srcrepo,
274 'sharedrelative': relative,
274 'sharedrelative': relative,
275 'shareditems': shareditems,
275 'shareditems': shareditems,
276 })
276 })
277
277
278 postshare(srcrepo, r, defaultpath=defaultpath)
278 postshare(srcrepo, r, defaultpath=defaultpath)
279 r = repository(ui, dest)
279 r = repository(ui, dest)
280 _postshareupdate(r, update, checkout=checkout)
280 _postshareupdate(r, update, checkout=checkout)
281 return r
281 return r
282
282
283 def unshare(ui, repo):
283 def unshare(ui, repo):
284 """convert a shared repository to a normal one
284 """convert a shared repository to a normal one
285
285
286 Copy the store data to the repo and remove the sharedpath data.
286 Copy the store data to the repo and remove the sharedpath data.
287
287
288 Returns a new repository object representing the unshared repository.
288 Returns a new repository object representing the unshared repository.
289
289
290 The passed repository object is not usable after this function is
290 The passed repository object is not usable after this function is
291 called.
291 called.
292 """
292 """
293
293
294 with repo.lock():
294 with repo.lock():
295 # we use locks here because if we race with commit, we
295 # we use locks here because if we race with commit, we
296 # can end up with extra data in the cloned revlogs that's
296 # can end up with extra data in the cloned revlogs that's
297 # not pointed to by changesets, thus causing verify to
297 # not pointed to by changesets, thus causing verify to
298 # fail
298 # fail
299 destlock = copystore(ui, repo, repo.path)
299 destlock = copystore(ui, repo, repo.path)
300 with destlock or util.nullcontextmanager():
300 with destlock or util.nullcontextmanager():
301
301
302 sharefile = repo.vfs.join('sharedpath')
302 sharefile = repo.vfs.join('sharedpath')
303 util.rename(sharefile, sharefile + '.old')
303 util.rename(sharefile, sharefile + '.old')
304
304
305 repo.requirements.discard('shared')
305 repo.requirements.discard('shared')
306 repo.requirements.discard('relshared')
306 repo.requirements.discard('relshared')
307 repo._writerequirements()
307 repo._writerequirements()
308
308
309 # Removing share changes some fundamental properties of the repo instance.
309 # Removing share changes some fundamental properties of the repo instance.
310 # So we instantiate a new repo object and operate on it rather than
310 # So we instantiate a new repo object and operate on it rather than
311 # try to keep the existing repo usable.
311 # try to keep the existing repo usable.
312 newrepo = repository(repo.baseui, repo.root, create=False)
312 newrepo = repository(repo.baseui, repo.root, create=False)
313
313
314 # TODO: figure out how to access subrepos that exist, but were previously
314 # TODO: figure out how to access subrepos that exist, but were previously
315 # removed from .hgsub
315 # removed from .hgsub
316 c = newrepo['.']
316 c = newrepo['.']
317 subs = c.substate
317 subs = c.substate
318 for s in sorted(subs):
318 for s in sorted(subs):
319 c.sub(s).unshare()
319 c.sub(s).unshare()
320
320
321 localrepo.poisonrepository(repo)
321 localrepo.poisonrepository(repo)
322
322
323 return newrepo
323 return newrepo
324
324
325 def postshare(sourcerepo, destrepo, defaultpath=None):
325 def postshare(sourcerepo, destrepo, defaultpath=None):
326 """Called after a new shared repo is created.
326 """Called after a new shared repo is created.
327
327
328 The new repo only has a requirements file and pointer to the source.
328 The new repo only has a requirements file and pointer to the source.
329 This function configures additional shared data.
329 This function configures additional shared data.
330
330
331 Extensions can wrap this function and write additional entries to
331 Extensions can wrap this function and write additional entries to
332 destrepo/.hg/shared to indicate additional pieces of data to be shared.
332 destrepo/.hg/shared to indicate additional pieces of data to be shared.
333 """
333 """
334 default = defaultpath or sourcerepo.ui.config('paths', 'default')
334 default = defaultpath or sourcerepo.ui.config('paths', 'default')
335 if default:
335 if default:
336 template = ('[paths]\n'
336 template = ('[paths]\n'
337 'default = %s\n')
337 'default = %s\n')
338 destrepo.vfs.write('hgrc', util.tonativeeol(template % default))
338 destrepo.vfs.write('hgrc', util.tonativeeol(template % default))
339 if repositorymod.NARROW_REQUIREMENT in sourcerepo.requirements:
339 if repositorymod.NARROW_REQUIREMENT in sourcerepo.requirements:
340 with destrepo.wlock():
340 with destrepo.wlock():
341 narrowspec.copytoworkingcopy(destrepo)
341 narrowspec.copytoworkingcopy(destrepo)
342
342
343 def _postshareupdate(repo, update, checkout=None):
343 def _postshareupdate(repo, update, checkout=None):
344 """Maybe perform a working directory update after a shared repo is created.
344 """Maybe perform a working directory update after a shared repo is created.
345
345
346 ``update`` can be a boolean or a revision to update to.
346 ``update`` can be a boolean or a revision to update to.
347 """
347 """
348 if not update:
348 if not update:
349 return
349 return
350
350
351 repo.ui.status(_("updating working directory\n"))
351 repo.ui.status(_("updating working directory\n"))
352 if update is not True:
352 if update is not True:
353 checkout = update
353 checkout = update
354 for test in (checkout, 'default', 'tip'):
354 for test in (checkout, 'default', 'tip'):
355 if test is None:
355 if test is None:
356 continue
356 continue
357 try:
357 try:
358 uprev = repo.lookup(test)
358 uprev = repo.lookup(test)
359 break
359 break
360 except error.RepoLookupError:
360 except error.RepoLookupError:
361 continue
361 continue
362 _update(repo, uprev)
362 _update(repo, uprev)
363
363
364 def copystore(ui, srcrepo, destpath):
364 def copystore(ui, srcrepo, destpath):
365 '''copy files from store of srcrepo in destpath
365 '''copy files from store of srcrepo in destpath
366
366
367 returns destlock
367 returns destlock
368 '''
368 '''
369 destlock = None
369 destlock = None
370 try:
370 try:
371 hardlink = None
371 hardlink = None
372 topic = _('linking') if hardlink else _('copying')
372 topic = _('linking') if hardlink else _('copying')
373 with ui.makeprogress(topic, unit=_('files')) as progress:
373 with ui.makeprogress(topic, unit=_('files')) as progress:
374 num = 0
374 num = 0
375 srcpublishing = srcrepo.publishing()
375 srcpublishing = srcrepo.publishing()
376 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
376 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
377 dstvfs = vfsmod.vfs(destpath)
377 dstvfs = vfsmod.vfs(destpath)
378 for f in srcrepo.store.copylist():
378 for f in srcrepo.store.copylist():
379 if srcpublishing and f.endswith('phaseroots'):
379 if srcpublishing and f.endswith('phaseroots'):
380 continue
380 continue
381 dstbase = os.path.dirname(f)
381 dstbase = os.path.dirname(f)
382 if dstbase and not dstvfs.exists(dstbase):
382 if dstbase and not dstvfs.exists(dstbase):
383 dstvfs.mkdir(dstbase)
383 dstvfs.mkdir(dstbase)
384 if srcvfs.exists(f):
384 if srcvfs.exists(f):
385 if f.endswith('data'):
385 if f.endswith('data'):
386 # 'dstbase' may be empty (e.g. revlog format 0)
386 # 'dstbase' may be empty (e.g. revlog format 0)
387 lockfile = os.path.join(dstbase, "lock")
387 lockfile = os.path.join(dstbase, "lock")
388 # lock to avoid premature writing to the target
388 # lock to avoid premature writing to the target
389 destlock = lock.lock(dstvfs, lockfile)
389 destlock = lock.lock(dstvfs, lockfile)
390 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
390 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
391 hardlink, progress)
391 hardlink, progress)
392 num += n
392 num += n
393 if hardlink:
393 if hardlink:
394 ui.debug("linked %d files\n" % num)
394 ui.debug("linked %d files\n" % num)
395 else:
395 else:
396 ui.debug("copied %d files\n" % num)
396 ui.debug("copied %d files\n" % num)
397 return destlock
397 return destlock
398 except: # re-raises
398 except: # re-raises
399 release(destlock)
399 release(destlock)
400 raise
400 raise
401
401
402 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
402 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
403 rev=None, update=True, stream=False):
403 rev=None, update=True, stream=False):
404 """Perform a clone using a shared repo.
404 """Perform a clone using a shared repo.
405
405
406 The store for the repository will be located at <sharepath>/.hg. The
406 The store for the repository will be located at <sharepath>/.hg. The
407 specified revisions will be cloned or pulled from "source". A shared repo
407 specified revisions will be cloned or pulled from "source". A shared repo
408 will be created at "dest" and a working copy will be created if "update" is
408 will be created at "dest" and a working copy will be created if "update" is
409 True.
409 True.
410 """
410 """
411 revs = None
411 revs = None
412 if rev:
412 if rev:
413 if not srcpeer.capable('lookup'):
413 if not srcpeer.capable('lookup'):
414 raise error.Abort(_("src repository does not support "
414 raise error.Abort(_("src repository does not support "
415 "revision lookup and so doesn't "
415 "revision lookup and so doesn't "
416 "support clone by revision"))
416 "support clone by revision"))
417
417
418 # TODO this is batchable.
418 # TODO this is batchable.
419 remoterevs = []
419 remoterevs = []
420 for r in rev:
420 for r in rev:
421 with srcpeer.commandexecutor() as e:
421 with srcpeer.commandexecutor() as e:
422 remoterevs.append(e.callcommand('lookup', {
422 remoterevs.append(e.callcommand('lookup', {
423 'key': r,
423 'key': r,
424 }).result())
424 }).result())
425 revs = remoterevs
425 revs = remoterevs
426
426
427 # Obtain a lock before checking for or cloning the pooled repo otherwise
427 # Obtain a lock before checking for or cloning the pooled repo otherwise
428 # 2 clients may race creating or populating it.
428 # 2 clients may race creating or populating it.
429 pooldir = os.path.dirname(sharepath)
429 pooldir = os.path.dirname(sharepath)
430 # lock class requires the directory to exist.
430 # lock class requires the directory to exist.
431 try:
431 try:
432 util.makedir(pooldir, False)
432 util.makedir(pooldir, False)
433 except OSError as e:
433 except OSError as e:
434 if e.errno != errno.EEXIST:
434 if e.errno != errno.EEXIST:
435 raise
435 raise
436
436
437 poolvfs = vfsmod.vfs(pooldir)
437 poolvfs = vfsmod.vfs(pooldir)
438 basename = os.path.basename(sharepath)
438 basename = os.path.basename(sharepath)
439
439
440 with lock.lock(poolvfs, '%s.lock' % basename):
440 with lock.lock(poolvfs, '%s.lock' % basename):
441 if os.path.exists(sharepath):
441 if os.path.exists(sharepath):
442 ui.status(_('(sharing from existing pooled repository %s)\n') %
442 ui.status(_('(sharing from existing pooled repository %s)\n') %
443 basename)
443 basename)
444 else:
444 else:
445 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
445 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
446 # Always use pull mode because hardlinks in share mode don't work
446 # Always use pull mode because hardlinks in share mode don't work
447 # well. Never update because working copies aren't necessary in
447 # well. Never update because working copies aren't necessary in
448 # share mode.
448 # share mode.
449 clone(ui, peeropts, source, dest=sharepath, pull=True,
449 clone(ui, peeropts, source, dest=sharepath, pull=True,
450 revs=rev, update=False, stream=stream)
450 revs=rev, update=False, stream=stream)
451
451
452 # Resolve the value to put in [paths] section for the source.
452 # Resolve the value to put in [paths] section for the source.
453 if islocal(source):
453 if islocal(source):
454 defaultpath = os.path.abspath(util.urllocalpath(source))
454 defaultpath = os.path.abspath(util.urllocalpath(source))
455 else:
455 else:
456 defaultpath = source
456 defaultpath = source
457
457
458 sharerepo = repository(ui, path=sharepath)
458 sharerepo = repository(ui, path=sharepath)
459 destrepo = share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
459 destrepo = share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
460 defaultpath=defaultpath)
460 defaultpath=defaultpath)
461
461
462 # We need to perform a pull against the dest repo to fetch bookmarks
462 # We need to perform a pull against the dest repo to fetch bookmarks
463 # and other non-store data that isn't shared by default. In the case of
463 # and other non-store data that isn't shared by default. In the case of
464 # non-existing shared repo, this means we pull from the remote twice. This
464 # non-existing shared repo, this means we pull from the remote twice. This
465 # is a bit weird. But at the time it was implemented, there wasn't an easy
465 # is a bit weird. But at the time it was implemented, there wasn't an easy
466 # way to pull just non-changegroup data.
466 # way to pull just non-changegroup data.
467 exchange.pull(destrepo, srcpeer, heads=revs)
467 exchange.pull(destrepo, srcpeer, heads=revs)
468
468
469 _postshareupdate(destrepo, update)
469 _postshareupdate(destrepo, update)
470
470
471 return srcpeer, peer(ui, peeropts, dest)
471 return srcpeer, peer(ui, peeropts, dest)
472
472
473 # Recomputing branch cache might be slow on big repos,
473 # Recomputing branch cache might be slow on big repos,
474 # so just copy it
474 # so just copy it
475 def _copycache(srcrepo, dstcachedir, fname):
475 def _copycache(srcrepo, dstcachedir, fname):
476 """copy a cache from srcrepo to destcachedir (if it exists)"""
476 """copy a cache from srcrepo to destcachedir (if it exists)"""
477 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
477 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
478 dstbranchcache = os.path.join(dstcachedir, fname)
478 dstbranchcache = os.path.join(dstcachedir, fname)
479 if os.path.exists(srcbranchcache):
479 if os.path.exists(srcbranchcache):
480 if not os.path.exists(dstcachedir):
480 if not os.path.exists(dstcachedir):
481 os.mkdir(dstcachedir)
481 os.mkdir(dstcachedir)
482 util.copyfile(srcbranchcache, dstbranchcache)
482 util.copyfile(srcbranchcache, dstbranchcache)
483
483
484 def clone(ui, peeropts, source, dest=None, pull=False, revs=None,
484 def clone(ui, peeropts, source, dest=None, pull=False, revs=None,
485 update=True, stream=False, branch=None, shareopts=None,
485 update=True, stream=False, branch=None, shareopts=None,
486 storeincludepats=None, storeexcludepats=None, depth=None):
486 storeincludepats=None, storeexcludepats=None, depth=None):
487 """Make a copy of an existing repository.
487 """Make a copy of an existing repository.
488
488
489 Create a copy of an existing repository in a new directory. The
489 Create a copy of an existing repository in a new directory. The
490 source and destination are URLs, as passed to the repository
490 source and destination are URLs, as passed to the repository
491 function. Returns a pair of repository peers, the source and
491 function. Returns a pair of repository peers, the source and
492 newly created destination.
492 newly created destination.
493
493
494 The location of the source is added to the new repository's
494 The location of the source is added to the new repository's
495 .hg/hgrc file, as the default to be used for future pulls and
495 .hg/hgrc file, as the default to be used for future pulls and
496 pushes.
496 pushes.
497
497
498 If an exception is raised, the partly cloned/updated destination
498 If an exception is raised, the partly cloned/updated destination
499 repository will be deleted.
499 repository will be deleted.
500
500
501 Arguments:
501 Arguments:
502
502
503 source: repository object or URL
503 source: repository object or URL
504
504
505 dest: URL of destination repository to create (defaults to base
505 dest: URL of destination repository to create (defaults to base
506 name of source repository)
506 name of source repository)
507
507
508 pull: always pull from source repository, even in local case or if the
508 pull: always pull from source repository, even in local case or if the
509 server prefers streaming
509 server prefers streaming
510
510
511 stream: stream raw data uncompressed from repository (fast over
511 stream: stream raw data uncompressed from repository (fast over
512 LAN, slow over WAN)
512 LAN, slow over WAN)
513
513
514 revs: revision to clone up to (implies pull=True)
514 revs: revision to clone up to (implies pull=True)
515
515
516 update: update working directory after clone completes, if
516 update: update working directory after clone completes, if
517 destination is local repository (True means update to default rev,
517 destination is local repository (True means update to default rev,
518 anything else is treated as a revision)
518 anything else is treated as a revision)
519
519
520 branch: branches to clone
520 branch: branches to clone
521
521
522 shareopts: dict of options to control auto sharing behavior. The "pool" key
522 shareopts: dict of options to control auto sharing behavior. The "pool" key
523 activates auto sharing mode and defines the directory for stores. The
523 activates auto sharing mode and defines the directory for stores. The
524 "mode" key determines how to construct the directory name of the shared
524 "mode" key determines how to construct the directory name of the shared
525 repository. "identity" means the name is derived from the node of the first
525 repository. "identity" means the name is derived from the node of the first
526 changeset in the repository. "remote" means the name is derived from the
526 changeset in the repository. "remote" means the name is derived from the
527 remote's path/URL. Defaults to "identity."
527 remote's path/URL. Defaults to "identity."
528
528
529 storeincludepats and storeexcludepats: sets of file patterns to include and
529 storeincludepats and storeexcludepats: sets of file patterns to include and
530 exclude in the repository copy, respectively. If not defined, all files
530 exclude in the repository copy, respectively. If not defined, all files
531 will be included (a "full" clone). Otherwise a "narrow" clone containing
531 will be included (a "full" clone). Otherwise a "narrow" clone containing
532 only the requested files will be performed. If ``storeincludepats`` is not
532 only the requested files will be performed. If ``storeincludepats`` is not
533 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
533 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
534 ``path:.``. If both are empty sets, no files will be cloned.
534 ``path:.``. If both are empty sets, no files will be cloned.
535 """
535 """
536
536
537 if isinstance(source, bytes):
537 if isinstance(source, bytes):
538 origsource = ui.expandpath(source)
538 origsource = ui.expandpath(source)
539 source, branches = parseurl(origsource, branch)
539 source, branches = parseurl(origsource, branch)
540 srcpeer = peer(ui, peeropts, source)
540 srcpeer = peer(ui, peeropts, source)
541 else:
541 else:
542 srcpeer = source.peer() # in case we were called with a localrepo
542 srcpeer = source.peer() # in case we were called with a localrepo
543 branches = (None, branch or [])
543 branches = (None, branch or [])
544 origsource = source = srcpeer.url()
544 origsource = source = srcpeer.url()
545 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
545 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
546
546
547 if dest is None:
547 if dest is None:
548 dest = defaultdest(source)
548 dest = defaultdest(source)
549 if dest:
549 if dest:
550 ui.status(_("destination directory: %s\n") % dest)
550 ui.status(_("destination directory: %s\n") % dest)
551 else:
551 else:
552 dest = ui.expandpath(dest)
552 dest = ui.expandpath(dest)
553
553
554 dest = util.urllocalpath(dest)
554 dest = util.urllocalpath(dest)
555 source = util.urllocalpath(source)
555 source = util.urllocalpath(source)
556
556
557 if not dest:
557 if not dest:
558 raise error.Abort(_("empty destination path is not valid"))
558 raise error.Abort(_("empty destination path is not valid"))
559
559
560 destvfs = vfsmod.vfs(dest, expandpath=True)
560 destvfs = vfsmod.vfs(dest, expandpath=True)
561 if destvfs.lexists():
561 if destvfs.lexists():
562 if not destvfs.isdir():
562 if not destvfs.isdir():
563 raise error.Abort(_("destination '%s' already exists") % dest)
563 raise error.Abort(_("destination '%s' already exists") % dest)
564 elif destvfs.listdir():
564 elif destvfs.listdir():
565 raise error.Abort(_("destination '%s' is not empty") % dest)
565 raise error.Abort(_("destination '%s' is not empty") % dest)
566
566
567 createopts = {}
567 createopts = {}
568 narrow = False
568 narrow = False
569
569
570 if storeincludepats is not None:
570 if storeincludepats is not None:
571 narrowspec.validatepatterns(storeincludepats)
571 narrowspec.validatepatterns(storeincludepats)
572 narrow = True
572 narrow = True
573
573
574 if storeexcludepats is not None:
574 if storeexcludepats is not None:
575 narrowspec.validatepatterns(storeexcludepats)
575 narrowspec.validatepatterns(storeexcludepats)
576 narrow = True
576 narrow = True
577
577
578 if narrow:
578 if narrow:
579 # Include everything by default if only exclusion patterns defined.
579 # Include everything by default if only exclusion patterns defined.
580 if storeexcludepats and not storeincludepats:
580 if storeexcludepats and not storeincludepats:
581 storeincludepats = {'path:.'}
581 storeincludepats = {'path:.'}
582
582
583 createopts['narrowfiles'] = True
583 createopts['narrowfiles'] = True
584
584
585 if depth:
585 if depth:
586 createopts['shallowfilestore'] = True
586 createopts['shallowfilestore'] = True
587
587
588 if srcpeer.capable(b'lfs-serve'):
588 if srcpeer.capable(b'lfs-serve'):
589 # Repository creation honors the config if it disabled the extension, so
589 # Repository creation honors the config if it disabled the extension, so
590 # we can't just announce that lfs will be enabled. This check avoids
590 # we can't just announce that lfs will be enabled. This check avoids
591 # saying that lfs will be enabled, and then saying it's an unknown
591 # saying that lfs will be enabled, and then saying it's an unknown
592 # feature. The lfs creation option is set in either case so that a
592 # feature. The lfs creation option is set in either case so that a
593 # requirement is added. If the extension is explicitly disabled but the
593 # requirement is added. If the extension is explicitly disabled but the
594 # requirement is set, the clone aborts early, before transferring any
594 # requirement is set, the clone aborts early, before transferring any
595 # data.
595 # data.
596 createopts['lfs'] = True
596 createopts['lfs'] = True
597
597
598 if extensions.disabledext('lfs'):
598 if extensions.disabledext('lfs'):
599 ui.status(_('(remote is using large file support (lfs), but it is '
599 ui.status(_('(remote is using large file support (lfs), but it is '
600 'explicitly disabled in the local configuration)\n'))
600 'explicitly disabled in the local configuration)\n'))
601 else:
601 else:
602 ui.status(_('(remote is using large file support (lfs); lfs will '
602 ui.status(_('(remote is using large file support (lfs); lfs will '
603 'be enabled for this repository)\n'))
603 'be enabled for this repository)\n'))
604
604
605 shareopts = shareopts or {}
605 shareopts = shareopts or {}
606 sharepool = shareopts.get('pool')
606 sharepool = shareopts.get('pool')
607 sharenamemode = shareopts.get('mode')
607 sharenamemode = shareopts.get('mode')
608 if sharepool and islocal(dest):
608 if sharepool and islocal(dest):
609 sharepath = None
609 sharepath = None
610 if sharenamemode == 'identity':
610 if sharenamemode == 'identity':
611 # Resolve the name from the initial changeset in the remote
611 # Resolve the name from the initial changeset in the remote
612 # repository. This returns nullid when the remote is empty. It
612 # repository. This returns nullid when the remote is empty. It
613 # raises RepoLookupError if revision 0 is filtered or otherwise
613 # raises RepoLookupError if revision 0 is filtered or otherwise
614 # not available. If we fail to resolve, sharing is not enabled.
614 # not available. If we fail to resolve, sharing is not enabled.
615 try:
615 try:
616 with srcpeer.commandexecutor() as e:
616 with srcpeer.commandexecutor() as e:
617 rootnode = e.callcommand('lookup', {
617 rootnode = e.callcommand('lookup', {
618 'key': '0',
618 'key': '0',
619 }).result()
619 }).result()
620
620
621 if rootnode != node.nullid:
621 if rootnode != node.nullid:
622 sharepath = os.path.join(sharepool, node.hex(rootnode))
622 sharepath = os.path.join(sharepool, node.hex(rootnode))
623 else:
623 else:
624 ui.status(_('(not using pooled storage: '
624 ui.status(_('(not using pooled storage: '
625 'remote appears to be empty)\n'))
625 'remote appears to be empty)\n'))
626 except error.RepoLookupError:
626 except error.RepoLookupError:
627 ui.status(_('(not using pooled storage: '
627 ui.status(_('(not using pooled storage: '
628 'unable to resolve identity of remote)\n'))
628 'unable to resolve identity of remote)\n'))
629 elif sharenamemode == 'remote':
629 elif sharenamemode == 'remote':
630 sharepath = os.path.join(
630 sharepath = os.path.join(
631 sharepool, node.hex(hashlib.sha1(source).digest()))
631 sharepool, node.hex(hashlib.sha1(source).digest()))
632 else:
632 else:
633 raise error.Abort(_('unknown share naming mode: %s') %
633 raise error.Abort(_('unknown share naming mode: %s') %
634 sharenamemode)
634 sharenamemode)
635
635
636 # TODO this is a somewhat arbitrary restriction.
636 # TODO this is a somewhat arbitrary restriction.
637 if narrow:
637 if narrow:
638 ui.status(_('(pooled storage not supported for narrow clones)\n'))
638 ui.status(_('(pooled storage not supported for narrow clones)\n'))
639 sharepath = None
639 sharepath = None
640
640
641 if sharepath:
641 if sharepath:
642 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
642 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
643 dest, pull=pull, rev=revs, update=update,
643 dest, pull=pull, rev=revs, update=update,
644 stream=stream)
644 stream=stream)
645
645
646 srclock = destlock = cleandir = None
646 srclock = destlock = cleandir = None
647 srcrepo = srcpeer.local()
647 srcrepo = srcpeer.local()
648 try:
648 try:
649 abspath = origsource
649 abspath = origsource
650 if islocal(origsource):
650 if islocal(origsource):
651 abspath = os.path.abspath(util.urllocalpath(origsource))
651 abspath = os.path.abspath(util.urllocalpath(origsource))
652
652
653 if islocal(dest):
653 if islocal(dest):
654 cleandir = dest
654 cleandir = dest
655
655
656 copy = False
656 copy = False
657 if (srcrepo and srcrepo.cancopy() and islocal(dest)
657 if (srcrepo and srcrepo.cancopy() and islocal(dest)
658 and not phases.hassecret(srcrepo)):
658 and not phases.hassecret(srcrepo)):
659 copy = not pull and not revs
659 copy = not pull and not revs
660
660
661 # TODO this is a somewhat arbitrary restriction.
661 # TODO this is a somewhat arbitrary restriction.
662 if narrow:
662 if narrow:
663 copy = False
663 copy = False
664
664
665 if copy:
665 if copy:
666 try:
666 try:
667 # we use a lock here because if we race with commit, we
667 # we use a lock here because if we race with commit, we
668 # can end up with extra data in the cloned revlogs that's
668 # can end up with extra data in the cloned revlogs that's
669 # not pointed to by changesets, thus causing verify to
669 # not pointed to by changesets, thus causing verify to
670 # fail
670 # fail
671 srclock = srcrepo.lock(wait=False)
671 srclock = srcrepo.lock(wait=False)
672 except error.LockError:
672 except error.LockError:
673 copy = False
673 copy = False
674
674
675 if copy:
675 if copy:
676 srcrepo.hook('preoutgoing', throw=True, source='clone')
676 srcrepo.hook('preoutgoing', throw=True, source='clone')
677 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
677 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
678 if not os.path.exists(dest):
678 if not os.path.exists(dest):
679 util.makedirs(dest)
679 util.makedirs(dest)
680 else:
680 else:
681 # only clean up directories we create ourselves
681 # only clean up directories we create ourselves
682 cleandir = hgdir
682 cleandir = hgdir
683 try:
683 try:
684 destpath = hgdir
684 destpath = hgdir
685 util.makedir(destpath, notindexed=True)
685 util.makedir(destpath, notindexed=True)
686 except OSError as inst:
686 except OSError as inst:
687 if inst.errno == errno.EEXIST:
687 if inst.errno == errno.EEXIST:
688 cleandir = None
688 cleandir = None
689 raise error.Abort(_("destination '%s' already exists")
689 raise error.Abort(_("destination '%s' already exists")
690 % dest)
690 % dest)
691 raise
691 raise
692
692
693 destlock = copystore(ui, srcrepo, destpath)
693 destlock = copystore(ui, srcrepo, destpath)
694 # copy bookmarks over
694 # copy bookmarks over
695 srcbookmarks = srcrepo.vfs.join('bookmarks')
695 srcbookmarks = srcrepo.vfs.join('bookmarks')
696 dstbookmarks = os.path.join(destpath, 'bookmarks')
696 dstbookmarks = os.path.join(destpath, 'bookmarks')
697 if os.path.exists(srcbookmarks):
697 if os.path.exists(srcbookmarks):
698 util.copyfile(srcbookmarks, dstbookmarks)
698 util.copyfile(srcbookmarks, dstbookmarks)
699
699
700 dstcachedir = os.path.join(destpath, 'cache')
700 dstcachedir = os.path.join(destpath, 'cache')
701 for cache in cacheutil.cachetocopy(srcrepo):
701 for cache in cacheutil.cachetocopy(srcrepo):
702 _copycache(srcrepo, dstcachedir, cache)
702 _copycache(srcrepo, dstcachedir, cache)
703
703
704 # we need to re-init the repo after manually copying the data
704 # we need to re-init the repo after manually copying the data
705 # into it
705 # into it
706 destpeer = peer(srcrepo, peeropts, dest)
706 destpeer = peer(srcrepo, peeropts, dest)
707 srcrepo.hook('outgoing', source='clone',
707 srcrepo.hook('outgoing', source='clone',
708 node=node.hex(node.nullid))
708 node=node.hex(node.nullid))
709 else:
709 else:
710 try:
710 try:
711 # only pass ui when no srcrepo
711 # only pass ui when no srcrepo
712 destpeer = peer(srcrepo or ui, peeropts, dest, create=True,
712 destpeer = peer(srcrepo or ui, peeropts, dest, create=True,
713 createopts=createopts)
713 createopts=createopts)
714 except OSError as inst:
714 except OSError as inst:
715 if inst.errno == errno.EEXIST:
715 if inst.errno == errno.EEXIST:
716 cleandir = None
716 cleandir = None
717 raise error.Abort(_("destination '%s' already exists")
717 raise error.Abort(_("destination '%s' already exists")
718 % dest)
718 % dest)
719 raise
719 raise
720
720
721 if revs:
721 if revs:
722 if not srcpeer.capable('lookup'):
722 if not srcpeer.capable('lookup'):
723 raise error.Abort(_("src repository does not support "
723 raise error.Abort(_("src repository does not support "
724 "revision lookup and so doesn't "
724 "revision lookup and so doesn't "
725 "support clone by revision"))
725 "support clone by revision"))
726
726
727 # TODO this is batchable.
727 # TODO this is batchable.
728 remoterevs = []
728 remoterevs = []
729 for rev in revs:
729 for rev in revs:
730 with srcpeer.commandexecutor() as e:
730 with srcpeer.commandexecutor() as e:
731 remoterevs.append(e.callcommand('lookup', {
731 remoterevs.append(e.callcommand('lookup', {
732 'key': rev,
732 'key': rev,
733 }).result())
733 }).result())
734 revs = remoterevs
734 revs = remoterevs
735
735
736 checkout = revs[0]
736 checkout = revs[0]
737 else:
737 else:
738 revs = None
738 revs = None
739 local = destpeer.local()
739 local = destpeer.local()
740 if local:
740 if local:
741 if narrow:
741 if narrow:
742 with local.wlock(), local.lock():
742 with local.wlock(), local.lock():
743 local.setnarrowpats(storeincludepats, storeexcludepats)
743 local.setnarrowpats(storeincludepats, storeexcludepats)
744 narrowspec.copytoworkingcopy(local)
744 narrowspec.copytoworkingcopy(local)
745
745
746 u = util.url(abspath)
746 u = util.url(abspath)
747 defaulturl = bytes(u)
747 defaulturl = bytes(u)
748 local.ui.setconfig('paths', 'default', defaulturl, 'clone')
748 local.ui.setconfig('paths', 'default', defaulturl, 'clone')
749 if not stream:
749 if not stream:
750 if pull:
750 if pull:
751 stream = False
751 stream = False
752 else:
752 else:
753 stream = None
753 stream = None
754 # internal config: ui.quietbookmarkmove
754 # internal config: ui.quietbookmarkmove
755 overrides = {('ui', 'quietbookmarkmove'): True}
755 overrides = {('ui', 'quietbookmarkmove'): True}
756 with local.ui.configoverride(overrides, 'clone'):
756 with local.ui.configoverride(overrides, 'clone'):
757 exchange.pull(local, srcpeer, revs,
757 exchange.pull(local, srcpeer, revs,
758 streamclonerequested=stream,
758 streamclonerequested=stream,
759 includepats=storeincludepats,
759 includepats=storeincludepats,
760 excludepats=storeexcludepats,
760 excludepats=storeexcludepats,
761 depth=depth)
761 depth=depth)
762 elif srcrepo:
762 elif srcrepo:
763 # TODO lift restriction once exchange.push() accepts narrow
763 # TODO lift restriction once exchange.push() accepts narrow
764 # push.
764 # push.
765 if narrow:
765 if narrow:
766 raise error.Abort(_('narrow clone not available for '
766 raise error.Abort(_('narrow clone not available for '
767 'remote destinations'))
767 'remote destinations'))
768
768
769 exchange.push(srcrepo, destpeer, revs=revs,
769 exchange.push(srcrepo, destpeer, revs=revs,
770 bookmarks=srcrepo._bookmarks.keys())
770 bookmarks=srcrepo._bookmarks.keys())
771 else:
771 else:
772 raise error.Abort(_("clone from remote to remote not supported")
772 raise error.Abort(_("clone from remote to remote not supported")
773 )
773 )
774
774
775 cleandir = None
775 cleandir = None
776
776
777 destrepo = destpeer.local()
777 destrepo = destpeer.local()
778 if destrepo:
778 if destrepo:
779 template = uimod.samplehgrcs['cloned']
779 template = uimod.samplehgrcs['cloned']
780 u = util.url(abspath)
780 u = util.url(abspath)
781 u.passwd = None
781 u.passwd = None
782 defaulturl = bytes(u)
782 defaulturl = bytes(u)
783 destrepo.vfs.write('hgrc', util.tonativeeol(template % defaulturl))
783 destrepo.vfs.write('hgrc', util.tonativeeol(template % defaulturl))
784 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
784 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
785
785
786 if ui.configbool('experimental', 'remotenames'):
786 if ui.configbool('experimental', 'remotenames'):
787 logexchange.pullremotenames(destrepo, srcpeer)
787 logexchange.pullremotenames(destrepo, srcpeer)
788
788
789 if update:
789 if update:
790 if update is not True:
790 if update is not True:
791 with srcpeer.commandexecutor() as e:
791 with srcpeer.commandexecutor() as e:
792 checkout = e.callcommand('lookup', {
792 checkout = e.callcommand('lookup', {
793 'key': update,
793 'key': update,
794 }).result()
794 }).result()
795
795
796 uprev = None
796 uprev = None
797 status = None
797 status = None
798 if checkout is not None:
798 if checkout is not None:
799 # Some extensions (at least hg-git and hg-subversion) have
799 # Some extensions (at least hg-git and hg-subversion) have
800 # a peer.lookup() implementation that returns a name instead
800 # a peer.lookup() implementation that returns a name instead
801 # of a nodeid. We work around it here until we've figured
801 # of a nodeid. We work around it here until we've figured
802 # out a better solution.
802 # out a better solution.
803 if len(checkout) == 20 and checkout in destrepo:
803 if len(checkout) == 20 and checkout in destrepo:
804 uprev = checkout
804 uprev = checkout
805 elif scmutil.isrevsymbol(destrepo, checkout):
805 elif scmutil.isrevsymbol(destrepo, checkout):
806 uprev = scmutil.revsymbol(destrepo, checkout).node()
806 uprev = scmutil.revsymbol(destrepo, checkout).node()
807 else:
807 else:
808 if update is not True:
808 if update is not True:
809 try:
809 try:
810 uprev = destrepo.lookup(update)
810 uprev = destrepo.lookup(update)
811 except error.RepoLookupError:
811 except error.RepoLookupError:
812 pass
812 pass
813 if uprev is None:
813 if uprev is None:
814 try:
814 try:
815 uprev = destrepo._bookmarks['@']
815 uprev = destrepo._bookmarks['@']
816 update = '@'
816 update = '@'
817 bn = destrepo[uprev].branch()
817 bn = destrepo[uprev].branch()
818 if bn == 'default':
818 if bn == 'default':
819 status = _("updating to bookmark @\n")
819 status = _("updating to bookmark @\n")
820 else:
820 else:
821 status = (_("updating to bookmark @ on branch %s\n")
821 status = (_("updating to bookmark @ on branch %s\n")
822 % bn)
822 % bn)
823 except KeyError:
823 except KeyError:
824 try:
824 try:
825 uprev = destrepo.branchtip('default')
825 uprev = destrepo.branchtip('default')
826 except error.RepoLookupError:
826 except error.RepoLookupError:
827 uprev = destrepo.lookup('tip')
827 uprev = destrepo.lookup('tip')
828 if not status:
828 if not status:
829 bn = destrepo[uprev].branch()
829 bn = destrepo[uprev].branch()
830 status = _("updating to branch %s\n") % bn
830 status = _("updating to branch %s\n") % bn
831 destrepo.ui.status(status)
831 destrepo.ui.status(status)
832 _update(destrepo, uprev)
832 _update(destrepo, uprev)
833 if update in destrepo._bookmarks:
833 if update in destrepo._bookmarks:
834 bookmarks.activate(destrepo, update)
834 bookmarks.activate(destrepo, update)
835 finally:
835 finally:
836 release(srclock, destlock)
836 release(srclock, destlock)
837 if cleandir is not None:
837 if cleandir is not None:
838 shutil.rmtree(cleandir, True)
838 shutil.rmtree(cleandir, True)
839 if srcpeer is not None:
839 if srcpeer is not None:
840 srcpeer.close()
840 srcpeer.close()
841 return srcpeer, destpeer
841 return srcpeer, destpeer
842
842
843 def _showstats(repo, stats, quietempty=False):
843 def _showstats(repo, stats, quietempty=False):
844 if quietempty and stats.isempty():
844 if quietempty and stats.isempty():
845 return
845 return
846 repo.ui.status(_("%d files updated, %d files merged, "
846 repo.ui.status(_("%d files updated, %d files merged, "
847 "%d files removed, %d files unresolved\n") % (
847 "%d files removed, %d files unresolved\n") % (
848 stats.updatedcount, stats.mergedcount,
848 stats.updatedcount, stats.mergedcount,
849 stats.removedcount, stats.unresolvedcount))
849 stats.removedcount, stats.unresolvedcount))
850
850
851 def updaterepo(repo, node, overwrite, updatecheck=None):
851 def updaterepo(repo, node, overwrite, updatecheck=None):
852 """Update the working directory to node.
852 """Update the working directory to node.
853
853
854 When overwrite is set, changes are clobbered, merged else
854 When overwrite is set, changes are clobbered, merged else
855
855
856 returns stats (see pydoc mercurial.merge.applyupdates)"""
856 returns stats (see pydoc mercurial.merge.applyupdates)"""
857 return mergemod.update(repo, node, branchmerge=False, force=overwrite,
857 return mergemod.update(repo, node, branchmerge=False, force=overwrite,
858 labels=['working copy', 'destination'],
858 labels=['working copy', 'destination'],
859 updatecheck=updatecheck)
859 updatecheck=updatecheck)
860
860
861 def update(repo, node, quietempty=False, updatecheck=None):
861 def update(repo, node, quietempty=False, updatecheck=None):
862 """update the working directory to node"""
862 """update the working directory to node"""
863 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
863 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
864 _showstats(repo, stats, quietempty)
864 _showstats(repo, stats, quietempty)
865 if stats.unresolvedcount:
865 if stats.unresolvedcount:
866 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
866 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
867 return stats.unresolvedcount > 0
867 return stats.unresolvedcount > 0
868
868
869 # naming conflict in clone()
869 # naming conflict in clone()
870 _update = update
870 _update = update
871
871
872 def clean(repo, node, show_stats=True, quietempty=False):
872 def clean(repo, node, show_stats=True, quietempty=False):
873 """forcibly switch the working directory to node, clobbering changes"""
873 """forcibly switch the working directory to node, clobbering changes"""
874 stats = updaterepo(repo, node, True)
874 stats = updaterepo(repo, node, True)
875 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
875 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
876 if show_stats:
876 if show_stats:
877 _showstats(repo, stats, quietempty)
877 _showstats(repo, stats, quietempty)
878 return stats.unresolvedcount > 0
878 return stats.unresolvedcount > 0
879
879
880 # naming conflict in updatetotally()
880 # naming conflict in updatetotally()
881 _clean = clean
881 _clean = clean
882
882
883 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
883 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
884 """Update the working directory with extra care for non-file components
884 """Update the working directory with extra care for non-file components
885
885
886 This takes care of non-file components below:
886 This takes care of non-file components below:
887
887
888 :bookmark: might be advanced or (in)activated
888 :bookmark: might be advanced or (in)activated
889
889
890 This takes arguments below:
890 This takes arguments below:
891
891
892 :checkout: to which revision the working directory is updated
892 :checkout: to which revision the working directory is updated
893 :brev: a name, which might be a bookmark to be activated after updating
893 :brev: a name, which might be a bookmark to be activated after updating
894 :clean: whether changes in the working directory can be discarded
894 :clean: whether changes in the working directory can be discarded
895 :updatecheck: how to deal with a dirty working directory
895 :updatecheck: how to deal with a dirty working directory
896
896
897 Valid values for updatecheck are (None => linear):
897 Valid values for updatecheck are (None => linear):
898
898
899 * abort: abort if the working directory is dirty
899 * abort: abort if the working directory is dirty
900 * none: don't check (merge working directory changes into destination)
900 * none: don't check (merge working directory changes into destination)
901 * linear: check that update is linear before merging working directory
901 * linear: check that update is linear before merging working directory
902 changes into destination
902 changes into destination
903 * noconflict: check that the update does not result in file merges
903 * noconflict: check that the update does not result in file merges
904
904
905 This returns whether conflict is detected at updating or not.
905 This returns whether conflict is detected at updating or not.
906 """
906 """
907 if updatecheck is None:
907 if updatecheck is None:
908 updatecheck = ui.config('commands', 'update.check')
908 updatecheck = ui.config('commands', 'update.check')
909 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
909 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
910 # If not configured, or invalid value configured
910 # If not configured, or invalid value configured
911 updatecheck = 'linear'
911 updatecheck = 'linear'
912 with repo.wlock():
912 with repo.wlock():
913 movemarkfrom = None
913 movemarkfrom = None
914 warndest = False
914 warndest = False
915 if checkout is None:
915 if checkout is None:
916 updata = destutil.destupdate(repo, clean=clean)
916 updata = destutil.destupdate(repo, clean=clean)
917 checkout, movemarkfrom, brev = updata
917 checkout, movemarkfrom, brev = updata
918 warndest = True
918 warndest = True
919
919
920 if clean:
920 if clean:
921 ret = _clean(repo, checkout)
921 ret = _clean(repo, checkout)
922 else:
922 else:
923 if updatecheck == 'abort':
923 if updatecheck == 'abort':
924 cmdutil.bailifchanged(repo, merge=False)
924 cmdutil.bailifchanged(repo, merge=False)
925 updatecheck = 'none'
925 updatecheck = 'none'
926 ret = _update(repo, checkout, updatecheck=updatecheck)
926 ret = _update(repo, checkout, updatecheck=updatecheck)
927
927
928 if not ret and movemarkfrom:
928 if not ret and movemarkfrom:
929 if movemarkfrom == repo['.'].node():
929 if movemarkfrom == repo['.'].node():
930 pass # no-op update
930 pass # no-op update
931 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
931 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
932 b = ui.label(repo._activebookmark, 'bookmarks.active')
932 b = ui.label(repo._activebookmark, 'bookmarks.active')
933 ui.status(_("updating bookmark %s\n") % b)
933 ui.status(_("updating bookmark %s\n") % b)
934 else:
934 else:
935 # this can happen with a non-linear update
935 # this can happen with a non-linear update
936 b = ui.label(repo._activebookmark, 'bookmarks')
936 b = ui.label(repo._activebookmark, 'bookmarks')
937 ui.status(_("(leaving bookmark %s)\n") % b)
937 ui.status(_("(leaving bookmark %s)\n") % b)
938 bookmarks.deactivate(repo)
938 bookmarks.deactivate(repo)
939 elif brev in repo._bookmarks:
939 elif brev in repo._bookmarks:
940 if brev != repo._activebookmark:
940 if brev != repo._activebookmark:
941 b = ui.label(brev, 'bookmarks.active')
941 b = ui.label(brev, 'bookmarks.active')
942 ui.status(_("(activating bookmark %s)\n") % b)
942 ui.status(_("(activating bookmark %s)\n") % b)
943 bookmarks.activate(repo, brev)
943 bookmarks.activate(repo, brev)
944 elif brev:
944 elif brev:
945 if repo._activebookmark:
945 if repo._activebookmark:
946 b = ui.label(repo._activebookmark, 'bookmarks')
946 b = ui.label(repo._activebookmark, 'bookmarks')
947 ui.status(_("(leaving bookmark %s)\n") % b)
947 ui.status(_("(leaving bookmark %s)\n") % b)
948 bookmarks.deactivate(repo)
948 bookmarks.deactivate(repo)
949
949
950 if warndest:
950 if warndest:
951 destutil.statusotherdests(ui, repo)
951 destutil.statusotherdests(ui, repo)
952
952
953 return ret
953 return ret
954
954
955 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None,
955 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None,
956 abort=False):
956 abort=False):
957 """Branch merge with node, resolving changes. Return true if any
957 """Branch merge with node, resolving changes. Return true if any
958 unresolved conflicts."""
958 unresolved conflicts."""
959 if not abort:
959 if not abort:
960 stats = mergemod.update(repo, node, branchmerge=True, force=force,
960 stats = mergemod.update(repo, node, branchmerge=True, force=force,
961 mergeforce=mergeforce, labels=labels)
961 mergeforce=mergeforce, labels=labels)
962 else:
962 else:
963 ms = mergemod.mergestate.read(repo)
963 ms = mergemod.mergestate.read(repo)
964 if ms.active():
964 if ms.active():
965 # there were conflicts
965 # there were conflicts
966 node = ms.localctx.hex()
966 node = ms.localctx.hex()
967 else:
967 else:
968 # there were no conficts, mergestate was not stored
968 # there were no conficts, mergestate was not stored
969 node = repo['.'].hex()
969 node = repo['.'].hex()
970
970
971 repo.ui.status(_("aborting the merge, updating back to"
971 repo.ui.status(_("aborting the merge, updating back to"
972 " %s\n") % node[:12])
972 " %s\n") % node[:12])
973 stats = mergemod.update(repo, node, branchmerge=False, force=True,
973 stats = mergemod.update(repo, node, branchmerge=False, force=True,
974 labels=labels)
974 labels=labels)
975
975
976 _showstats(repo, stats)
976 _showstats(repo, stats)
977 if stats.unresolvedcount:
977 if stats.unresolvedcount:
978 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
978 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
979 "or 'hg merge --abort' to abandon\n"))
979 "or 'hg merge --abort' to abandon\n"))
980 elif remind and not abort:
980 elif remind and not abort:
981 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
981 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
982 return stats.unresolvedcount > 0
982 return stats.unresolvedcount > 0
983
983
984 def _incoming(displaychlist, subreporecurse, ui, repo, source,
984 def _incoming(displaychlist, subreporecurse, ui, repo, source,
985 opts, buffered=False):
985 opts, buffered=False):
986 """
986 """
987 Helper for incoming / gincoming.
987 Helper for incoming / gincoming.
988 displaychlist gets called with
988 displaychlist gets called with
989 (remoterepo, incomingchangesetlist, displayer) parameters,
989 (remoterepo, incomingchangesetlist, displayer) parameters,
990 and is supposed to contain only code that can't be unified.
990 and is supposed to contain only code that can't be unified.
991 """
991 """
992 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
992 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
993 other = peer(repo, opts, source)
993 other = peer(repo, opts, source)
994 ui.status(_('comparing with %s\n') % util.hidepassword(source))
994 ui.status(_('comparing with %s\n') % util.hidepassword(source))
995 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
995 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
996
996
997 if revs:
997 if revs:
998 revs = [other.lookup(rev) for rev in revs]
998 revs = [other.lookup(rev) for rev in revs]
999 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
999 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
1000 revs, opts["bundle"], opts["force"])
1000 revs, opts["bundle"], opts["force"])
1001 try:
1001 try:
1002 if not chlist:
1002 if not chlist:
1003 ui.status(_("no changes found\n"))
1003 ui.status(_("no changes found\n"))
1004 return subreporecurse()
1004 return subreporecurse()
1005 ui.pager('incoming')
1005 ui.pager('incoming')
1006 displayer = logcmdutil.changesetdisplayer(ui, other, opts,
1006 displayer = logcmdutil.changesetdisplayer(ui, other, opts,
1007 buffered=buffered)
1007 buffered=buffered)
1008 displaychlist(other, chlist, displayer)
1008 displaychlist(other, chlist, displayer)
1009 displayer.close()
1009 displayer.close()
1010 finally:
1010 finally:
1011 cleanupfn()
1011 cleanupfn()
1012 subreporecurse()
1012 subreporecurse()
1013 return 0 # exit code is zero since we found incoming changes
1013 return 0 # exit code is zero since we found incoming changes
1014
1014
1015 def incoming(ui, repo, source, opts):
1015 def incoming(ui, repo, source, opts):
1016 def subreporecurse():
1016 def subreporecurse():
1017 ret = 1
1017 ret = 1
1018 if opts.get('subrepos'):
1018 if opts.get('subrepos'):
1019 ctx = repo[None]
1019 ctx = repo[None]
1020 for subpath in sorted(ctx.substate):
1020 for subpath in sorted(ctx.substate):
1021 sub = ctx.sub(subpath)
1021 sub = ctx.sub(subpath)
1022 ret = min(ret, sub.incoming(ui, source, opts))
1022 ret = min(ret, sub.incoming(ui, source, opts))
1023 return ret
1023 return ret
1024
1024
1025 def display(other, chlist, displayer):
1025 def display(other, chlist, displayer):
1026 limit = logcmdutil.getlimit(opts)
1026 limit = logcmdutil.getlimit(opts)
1027 if opts.get('newest_first'):
1027 if opts.get('newest_first'):
1028 chlist.reverse()
1028 chlist.reverse()
1029 count = 0
1029 count = 0
1030 for n in chlist:
1030 for n in chlist:
1031 if limit is not None and count >= limit:
1031 if limit is not None and count >= limit:
1032 break
1032 break
1033 parents = [p for p in other.changelog.parents(n) if p != nullid]
1033 parents = [p for p in other.changelog.parents(n) if p != nullid]
1034 if opts.get('no_merges') and len(parents) == 2:
1034 if opts.get('no_merges') and len(parents) == 2:
1035 continue
1035 continue
1036 count += 1
1036 count += 1
1037 displayer.show(other[n])
1037 displayer.show(other[n])
1038 return _incoming(display, subreporecurse, ui, repo, source, opts)
1038 return _incoming(display, subreporecurse, ui, repo, source, opts)
1039
1039
1040 def _outgoing(ui, repo, dest, opts):
1040 def _outgoing(ui, repo, dest, opts):
1041 path = ui.paths.getpath(dest, default=('default-push', 'default'))
1041 path = ui.paths.getpath(dest, default=('default-push', 'default'))
1042 if not path:
1042 if not path:
1043 raise error.Abort(_('default repository not configured!'),
1043 raise error.Abort(_('default repository not configured!'),
1044 hint=_("see 'hg help config.paths'"))
1044 hint=_("see 'hg help config.paths'"))
1045 dest = path.pushloc or path.loc
1045 dest = path.pushloc or path.loc
1046 branches = path.branch, opts.get('branch') or []
1046 branches = path.branch, opts.get('branch') or []
1047
1047
1048 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
1048 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
1049 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
1049 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
1050 if revs:
1050 if revs:
1051 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1051 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1052
1052
1053 other = peer(repo, opts, dest)
1053 other = peer(repo, opts, dest)
1054 outgoing = discovery.findcommonoutgoing(repo, other, revs,
1054 outgoing = discovery.findcommonoutgoing(repo, other, revs,
1055 force=opts.get('force'))
1055 force=opts.get('force'))
1056 o = outgoing.missing
1056 o = outgoing.missing
1057 if not o:
1057 if not o:
1058 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1058 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1059 return o, other
1059 return o, other
1060
1060
1061 def outgoing(ui, repo, dest, opts):
1061 def outgoing(ui, repo, dest, opts):
1062 def recurse():
1062 def recurse():
1063 ret = 1
1063 ret = 1
1064 if opts.get('subrepos'):
1064 if opts.get('subrepos'):
1065 ctx = repo[None]
1065 ctx = repo[None]
1066 for subpath in sorted(ctx.substate):
1066 for subpath in sorted(ctx.substate):
1067 sub = ctx.sub(subpath)
1067 sub = ctx.sub(subpath)
1068 ret = min(ret, sub.outgoing(ui, dest, opts))
1068 ret = min(ret, sub.outgoing(ui, dest, opts))
1069 return ret
1069 return ret
1070
1070
1071 limit = logcmdutil.getlimit(opts)
1071 limit = logcmdutil.getlimit(opts)
1072 o, other = _outgoing(ui, repo, dest, opts)
1072 o, other = _outgoing(ui, repo, dest, opts)
1073 if not o:
1073 if not o:
1074 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1074 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1075 return recurse()
1075 return recurse()
1076
1076
1077 if opts.get('newest_first'):
1077 if opts.get('newest_first'):
1078 o.reverse()
1078 o.reverse()
1079 ui.pager('outgoing')
1079 ui.pager('outgoing')
1080 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1080 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1081 count = 0
1081 count = 0
1082 for n in o:
1082 for n in o:
1083 if limit is not None and count >= limit:
1083 if limit is not None and count >= limit:
1084 break
1084 break
1085 parents = [p for p in repo.changelog.parents(n) if p != nullid]
1085 parents = [p for p in repo.changelog.parents(n) if p != nullid]
1086 if opts.get('no_merges') and len(parents) == 2:
1086 if opts.get('no_merges') and len(parents) == 2:
1087 continue
1087 continue
1088 count += 1
1088 count += 1
1089 displayer.show(repo[n])
1089 displayer.show(repo[n])
1090 displayer.close()
1090 displayer.close()
1091 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1091 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1092 recurse()
1092 recurse()
1093 return 0 # exit code is zero since we found outgoing changes
1093 return 0 # exit code is zero since we found outgoing changes
1094
1094
1095 def verify(repo):
1095 def verify(repo, level=None):
1096 """verify the consistency of a repository"""
1096 """verify the consistency of a repository"""
1097 ret = verifymod.verify(repo)
1097 ret = verifymod.verify(repo, level=level)
1098
1098
1099 # Broken subrepo references in hidden csets don't seem worth worrying about,
1099 # Broken subrepo references in hidden csets don't seem worth worrying about,
1100 # since they can't be pushed/pulled, and --hidden can be used if they are a
1100 # since they can't be pushed/pulled, and --hidden can be used if they are a
1101 # concern.
1101 # concern.
1102
1102
1103 # pathto() is needed for -R case
1103 # pathto() is needed for -R case
1104 revs = repo.revs("filelog(%s)",
1104 revs = repo.revs("filelog(%s)",
1105 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
1105 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
1106
1106
1107 if revs:
1107 if revs:
1108 repo.ui.status(_('checking subrepo links\n'))
1108 repo.ui.status(_('checking subrepo links\n'))
1109 for rev in revs:
1109 for rev in revs:
1110 ctx = repo[rev]
1110 ctx = repo[rev]
1111 try:
1111 try:
1112 for subpath in ctx.substate:
1112 for subpath in ctx.substate:
1113 try:
1113 try:
1114 ret = (ctx.sub(subpath, allowcreate=False).verify()
1114 ret = (ctx.sub(subpath, allowcreate=False).verify()
1115 or ret)
1115 or ret)
1116 except error.RepoError as e:
1116 except error.RepoError as e:
1117 repo.ui.warn(('%d: %s\n') % (rev, e))
1117 repo.ui.warn(('%d: %s\n') % (rev, e))
1118 except Exception:
1118 except Exception:
1119 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
1119 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
1120 node.short(ctx.node()))
1120 node.short(ctx.node()))
1121
1121
1122 return ret
1122 return ret
1123
1123
1124 def remoteui(src, opts):
1124 def remoteui(src, opts):
1125 'build a remote ui from ui or repo and opts'
1125 'build a remote ui from ui or repo and opts'
1126 if util.safehasattr(src, 'baseui'): # looks like a repository
1126 if util.safehasattr(src, 'baseui'): # looks like a repository
1127 dst = src.baseui.copy() # drop repo-specific config
1127 dst = src.baseui.copy() # drop repo-specific config
1128 src = src.ui # copy target options from repo
1128 src = src.ui # copy target options from repo
1129 else: # assume it's a global ui object
1129 else: # assume it's a global ui object
1130 dst = src.copy() # keep all global options
1130 dst = src.copy() # keep all global options
1131
1131
1132 # copy ssh-specific options
1132 # copy ssh-specific options
1133 for o in 'ssh', 'remotecmd':
1133 for o in 'ssh', 'remotecmd':
1134 v = opts.get(o) or src.config('ui', o)
1134 v = opts.get(o) or src.config('ui', o)
1135 if v:
1135 if v:
1136 dst.setconfig("ui", o, v, 'copied')
1136 dst.setconfig("ui", o, v, 'copied')
1137
1137
1138 # copy bundle-specific options
1138 # copy bundle-specific options
1139 r = src.config('bundle', 'mainreporoot')
1139 r = src.config('bundle', 'mainreporoot')
1140 if r:
1140 if r:
1141 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
1141 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
1142
1142
1143 # copy selected local settings to the remote ui
1143 # copy selected local settings to the remote ui
1144 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
1144 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
1145 for key, val in src.configitems(sect):
1145 for key, val in src.configitems(sect):
1146 dst.setconfig(sect, key, val, 'copied')
1146 dst.setconfig(sect, key, val, 'copied')
1147 v = src.config('web', 'cacerts')
1147 v = src.config('web', 'cacerts')
1148 if v:
1148 if v:
1149 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
1149 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
1150
1150
1151 return dst
1151 return dst
1152
1152
1153 # Files of interest
1153 # Files of interest
1154 # Used to check if the repository has changed looking at mtime and size of
1154 # Used to check if the repository has changed looking at mtime and size of
1155 # these files.
1155 # these files.
1156 foi = [('spath', '00changelog.i'),
1156 foi = [('spath', '00changelog.i'),
1157 ('spath', 'phaseroots'), # ! phase can change content at the same size
1157 ('spath', 'phaseroots'), # ! phase can change content at the same size
1158 ('spath', 'obsstore'),
1158 ('spath', 'obsstore'),
1159 ('path', 'bookmarks'), # ! bookmark can change content at the same size
1159 ('path', 'bookmarks'), # ! bookmark can change content at the same size
1160 ]
1160 ]
1161
1161
1162 class cachedlocalrepo(object):
1162 class cachedlocalrepo(object):
1163 """Holds a localrepository that can be cached and reused."""
1163 """Holds a localrepository that can be cached and reused."""
1164
1164
1165 def __init__(self, repo):
1165 def __init__(self, repo):
1166 """Create a new cached repo from an existing repo.
1166 """Create a new cached repo from an existing repo.
1167
1167
1168 We assume the passed in repo was recently created. If the
1168 We assume the passed in repo was recently created. If the
1169 repo has changed between when it was created and when it was
1169 repo has changed between when it was created and when it was
1170 turned into a cache, it may not refresh properly.
1170 turned into a cache, it may not refresh properly.
1171 """
1171 """
1172 assert isinstance(repo, localrepo.localrepository)
1172 assert isinstance(repo, localrepo.localrepository)
1173 self._repo = repo
1173 self._repo = repo
1174 self._state, self.mtime = self._repostate()
1174 self._state, self.mtime = self._repostate()
1175 self._filtername = repo.filtername
1175 self._filtername = repo.filtername
1176
1176
1177 def fetch(self):
1177 def fetch(self):
1178 """Refresh (if necessary) and return a repository.
1178 """Refresh (if necessary) and return a repository.
1179
1179
1180 If the cached instance is out of date, it will be recreated
1180 If the cached instance is out of date, it will be recreated
1181 automatically and returned.
1181 automatically and returned.
1182
1182
1183 Returns a tuple of the repo and a boolean indicating whether a new
1183 Returns a tuple of the repo and a boolean indicating whether a new
1184 repo instance was created.
1184 repo instance was created.
1185 """
1185 """
1186 # We compare the mtimes and sizes of some well-known files to
1186 # We compare the mtimes and sizes of some well-known files to
1187 # determine if the repo changed. This is not precise, as mtimes
1187 # determine if the repo changed. This is not precise, as mtimes
1188 # are susceptible to clock skew and imprecise filesystems and
1188 # are susceptible to clock skew and imprecise filesystems and
1189 # file content can change while maintaining the same size.
1189 # file content can change while maintaining the same size.
1190
1190
1191 state, mtime = self._repostate()
1191 state, mtime = self._repostate()
1192 if state == self._state:
1192 if state == self._state:
1193 return self._repo, False
1193 return self._repo, False
1194
1194
1195 repo = repository(self._repo.baseui, self._repo.url())
1195 repo = repository(self._repo.baseui, self._repo.url())
1196 if self._filtername:
1196 if self._filtername:
1197 self._repo = repo.filtered(self._filtername)
1197 self._repo = repo.filtered(self._filtername)
1198 else:
1198 else:
1199 self._repo = repo.unfiltered()
1199 self._repo = repo.unfiltered()
1200 self._state = state
1200 self._state = state
1201 self.mtime = mtime
1201 self.mtime = mtime
1202
1202
1203 return self._repo, True
1203 return self._repo, True
1204
1204
1205 def _repostate(self):
1205 def _repostate(self):
1206 state = []
1206 state = []
1207 maxmtime = -1
1207 maxmtime = -1
1208 for attr, fname in foi:
1208 for attr, fname in foi:
1209 prefix = getattr(self._repo, attr)
1209 prefix = getattr(self._repo, attr)
1210 p = os.path.join(prefix, fname)
1210 p = os.path.join(prefix, fname)
1211 try:
1211 try:
1212 st = os.stat(p)
1212 st = os.stat(p)
1213 except OSError:
1213 except OSError:
1214 st = os.stat(prefix)
1214 st = os.stat(prefix)
1215 state.append((st[stat.ST_MTIME], st.st_size))
1215 state.append((st[stat.ST_MTIME], st.st_size))
1216 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1216 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1217
1217
1218 return tuple(state), maxmtime
1218 return tuple(state), maxmtime
1219
1219
1220 def copy(self):
1220 def copy(self):
1221 """Obtain a copy of this class instance.
1221 """Obtain a copy of this class instance.
1222
1222
1223 A new localrepository instance is obtained. The new instance should be
1223 A new localrepository instance is obtained. The new instance should be
1224 completely independent of the original.
1224 completely independent of the original.
1225 """
1225 """
1226 repo = repository(self._repo.baseui, self._repo.origroot)
1226 repo = repository(self._repo.baseui, self._repo.origroot)
1227 if self._filtername:
1227 if self._filtername:
1228 repo = repo.filtered(self._filtername)
1228 repo = repo.filtered(self._filtername)
1229 else:
1229 else:
1230 repo = repo.unfiltered()
1230 repo = repo.unfiltered()
1231 c = cachedlocalrepo(repo)
1231 c = cachedlocalrepo(repo)
1232 c._state = self._state
1232 c._state = self._state
1233 c.mtime = self.mtime
1233 c.mtime = self.mtime
1234 return c
1234 return c
@@ -1,535 +1,541
1 # verify.py - repository integrity checking for Mercurial
1 # verify.py - repository integrity checking for Mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import os
10 import os
11
11
12 from .i18n import _
12 from .i18n import _
13 from .node import (
13 from .node import (
14 nullid,
14 nullid,
15 short,
15 short,
16 )
16 )
17
17
18 from . import (
18 from . import (
19 error,
19 error,
20 pycompat,
20 pycompat,
21 revlog,
21 revlog,
22 util,
22 util,
23 )
23 )
24
24
25 def verify(repo):
25 VERIFY_DEFAULT = 0
26
27 def verify(repo, level=None):
26 with repo.lock():
28 with repo.lock():
27 return verifier(repo).verify()
29 v = verifier(repo, level)
30 return v.verify()
28
31
29 def _normpath(f):
32 def _normpath(f):
30 # under hg < 2.4, convert didn't sanitize paths properly, so a
33 # under hg < 2.4, convert didn't sanitize paths properly, so a
31 # converted repo may contain repeated slashes
34 # converted repo may contain repeated slashes
32 while '//' in f:
35 while '//' in f:
33 f = f.replace('//', '/')
36 f = f.replace('//', '/')
34 return f
37 return f
35
38
36 class verifier(object):
39 class verifier(object):
37 def __init__(self, repo):
40 def __init__(self, repo, level=None):
38 self.repo = repo.unfiltered()
41 self.repo = repo.unfiltered()
39 self.ui = repo.ui
42 self.ui = repo.ui
40 self.match = repo.narrowmatch()
43 self.match = repo.narrowmatch()
44 if level is None:
45 level = VERIFY_DEFAULT
46 self._level = level
41 self.badrevs = set()
47 self.badrevs = set()
42 self.errors = 0
48 self.errors = 0
43 self.warnings = 0
49 self.warnings = 0
44 self.havecl = len(repo.changelog) > 0
50 self.havecl = len(repo.changelog) > 0
45 self.havemf = len(repo.manifestlog.getstorage(b'')) > 0
51 self.havemf = len(repo.manifestlog.getstorage(b'')) > 0
46 self.revlogv1 = repo.changelog.version != revlog.REVLOGV0
52 self.revlogv1 = repo.changelog.version != revlog.REVLOGV0
47 self.lrugetctx = util.lrucachefunc(repo.__getitem__)
53 self.lrugetctx = util.lrucachefunc(repo.__getitem__)
48 self.refersmf = False
54 self.refersmf = False
49 self.fncachewarned = False
55 self.fncachewarned = False
50 # developer config: verify.skipflags
56 # developer config: verify.skipflags
51 self.skipflags = repo.ui.configint('verify', 'skipflags')
57 self.skipflags = repo.ui.configint('verify', 'skipflags')
52 self.warnorphanstorefiles = True
58 self.warnorphanstorefiles = True
53
59
54 def _warn(self, msg):
60 def _warn(self, msg):
55 """record a "warning" level issue"""
61 """record a "warning" level issue"""
56 self.ui.warn(msg + "\n")
62 self.ui.warn(msg + "\n")
57 self.warnings += 1
63 self.warnings += 1
58
64
59 def _err(self, linkrev, msg, filename=None):
65 def _err(self, linkrev, msg, filename=None):
60 """record a "error" level issue"""
66 """record a "error" level issue"""
61 if linkrev is not None:
67 if linkrev is not None:
62 self.badrevs.add(linkrev)
68 self.badrevs.add(linkrev)
63 linkrev = "%d" % linkrev
69 linkrev = "%d" % linkrev
64 else:
70 else:
65 linkrev = '?'
71 linkrev = '?'
66 msg = "%s: %s" % (linkrev, msg)
72 msg = "%s: %s" % (linkrev, msg)
67 if filename:
73 if filename:
68 msg = "%s@%s" % (filename, msg)
74 msg = "%s@%s" % (filename, msg)
69 self.ui.warn(" " + msg + "\n")
75 self.ui.warn(" " + msg + "\n")
70 self.errors += 1
76 self.errors += 1
71
77
72 def _exc(self, linkrev, msg, inst, filename=None):
78 def _exc(self, linkrev, msg, inst, filename=None):
73 """record exception raised during the verify process"""
79 """record exception raised during the verify process"""
74 fmsg = pycompat.bytestr(inst)
80 fmsg = pycompat.bytestr(inst)
75 if not fmsg:
81 if not fmsg:
76 fmsg = pycompat.byterepr(inst)
82 fmsg = pycompat.byterepr(inst)
77 self._err(linkrev, "%s: %s" % (msg, fmsg), filename)
83 self._err(linkrev, "%s: %s" % (msg, fmsg), filename)
78
84
79 def _checkrevlog(self, obj, name, linkrev):
85 def _checkrevlog(self, obj, name, linkrev):
80 """verify high level property of a revlog
86 """verify high level property of a revlog
81
87
82 - revlog is present,
88 - revlog is present,
83 - revlog is non-empty,
89 - revlog is non-empty,
84 - sizes (index and data) are correct,
90 - sizes (index and data) are correct,
85 - revlog's format version is correct.
91 - revlog's format version is correct.
86 """
92 """
87 if not len(obj) and (self.havecl or self.havemf):
93 if not len(obj) and (self.havecl or self.havemf):
88 self._err(linkrev, _("empty or missing %s") % name)
94 self._err(linkrev, _("empty or missing %s") % name)
89 return
95 return
90
96
91 d = obj.checksize()
97 d = obj.checksize()
92 if d[0]:
98 if d[0]:
93 self.err(None, _("data length off by %d bytes") % d[0], name)
99 self.err(None, _("data length off by %d bytes") % d[0], name)
94 if d[1]:
100 if d[1]:
95 self.err(None, _("index contains %d extra bytes") % d[1], name)
101 self.err(None, _("index contains %d extra bytes") % d[1], name)
96
102
97 if obj.version != revlog.REVLOGV0:
103 if obj.version != revlog.REVLOGV0:
98 if not self.revlogv1:
104 if not self.revlogv1:
99 self._warn(_("warning: `%s' uses revlog format 1") % name)
105 self._warn(_("warning: `%s' uses revlog format 1") % name)
100 elif self.revlogv1:
106 elif self.revlogv1:
101 self._warn(_("warning: `%s' uses revlog format 0") % name)
107 self._warn(_("warning: `%s' uses revlog format 0") % name)
102
108
103 def _checkentry(self, obj, i, node, seen, linkrevs, f):
109 def _checkentry(self, obj, i, node, seen, linkrevs, f):
104 """verify a single revlog entry
110 """verify a single revlog entry
105
111
106 arguments are:
112 arguments are:
107 - obj: the source revlog
113 - obj: the source revlog
108 - i: the revision number
114 - i: the revision number
109 - node: the revision node id
115 - node: the revision node id
110 - seen: nodes previously seen for this revlog
116 - seen: nodes previously seen for this revlog
111 - linkrevs: [changelog-revisions] introducing "node"
117 - linkrevs: [changelog-revisions] introducing "node"
112 - f: string label ("changelog", "manifest", or filename)
118 - f: string label ("changelog", "manifest", or filename)
113
119
114 Performs the following checks:
120 Performs the following checks:
115 - linkrev points to an existing changelog revision,
121 - linkrev points to an existing changelog revision,
116 - linkrev points to a changelog revision that introduces this revision,
122 - linkrev points to a changelog revision that introduces this revision,
117 - linkrev points to the lowest of these changesets,
123 - linkrev points to the lowest of these changesets,
118 - both parents exist in the revlog,
124 - both parents exist in the revlog,
119 - the revision is not duplicated.
125 - the revision is not duplicated.
120
126
121 Return the linkrev of the revision (or None for changelog's revisions).
127 Return the linkrev of the revision (or None for changelog's revisions).
122 """
128 """
123 lr = obj.linkrev(obj.rev(node))
129 lr = obj.linkrev(obj.rev(node))
124 if lr < 0 or (self.havecl and lr not in linkrevs):
130 if lr < 0 or (self.havecl and lr not in linkrevs):
125 if lr < 0 or lr >= len(self.repo.changelog):
131 if lr < 0 or lr >= len(self.repo.changelog):
126 msg = _("rev %d points to nonexistent changeset %d")
132 msg = _("rev %d points to nonexistent changeset %d")
127 else:
133 else:
128 msg = _("rev %d points to unexpected changeset %d")
134 msg = _("rev %d points to unexpected changeset %d")
129 self._err(None, msg % (i, lr), f)
135 self._err(None, msg % (i, lr), f)
130 if linkrevs:
136 if linkrevs:
131 if f and len(linkrevs) > 1:
137 if f and len(linkrevs) > 1:
132 try:
138 try:
133 # attempt to filter down to real linkrevs
139 # attempt to filter down to real linkrevs
134 linkrevs = [l for l in linkrevs
140 linkrevs = [l for l in linkrevs
135 if self.lrugetctx(l)[f].filenode() == node]
141 if self.lrugetctx(l)[f].filenode() == node]
136 except Exception:
142 except Exception:
137 pass
143 pass
138 self._warn(_(" (expected %s)") % " ".join
144 self._warn(_(" (expected %s)") % " ".join
139 (map(pycompat.bytestr, linkrevs)))
145 (map(pycompat.bytestr, linkrevs)))
140 lr = None # can't be trusted
146 lr = None # can't be trusted
141
147
142 try:
148 try:
143 p1, p2 = obj.parents(node)
149 p1, p2 = obj.parents(node)
144 if p1 not in seen and p1 != nullid:
150 if p1 not in seen and p1 != nullid:
145 self._err(lr, _("unknown parent 1 %s of %s") %
151 self._err(lr, _("unknown parent 1 %s of %s") %
146 (short(p1), short(node)), f)
152 (short(p1), short(node)), f)
147 if p2 not in seen and p2 != nullid:
153 if p2 not in seen and p2 != nullid:
148 self._err(lr, _("unknown parent 2 %s of %s") %
154 self._err(lr, _("unknown parent 2 %s of %s") %
149 (short(p2), short(node)), f)
155 (short(p2), short(node)), f)
150 except Exception as inst:
156 except Exception as inst:
151 self._exc(lr, _("checking parents of %s") % short(node), inst, f)
157 self._exc(lr, _("checking parents of %s") % short(node), inst, f)
152
158
153 if node in seen:
159 if node in seen:
154 self._err(lr, _("duplicate revision %d (%d)") % (i, seen[node]), f)
160 self._err(lr, _("duplicate revision %d (%d)") % (i, seen[node]), f)
155 seen[node] = i
161 seen[node] = i
156 return lr
162 return lr
157
163
158 def verify(self):
164 def verify(self):
159 """verify the content of the Mercurial repository
165 """verify the content of the Mercurial repository
160
166
161 This method run all verifications, displaying issues as they are found.
167 This method run all verifications, displaying issues as they are found.
162
168
163 return 1 if any error have been encountered, 0 otherwise."""
169 return 1 if any error have been encountered, 0 otherwise."""
164 # initial validation and generic report
170 # initial validation and generic report
165 repo = self.repo
171 repo = self.repo
166 ui = repo.ui
172 ui = repo.ui
167 if not repo.url().startswith('file:'):
173 if not repo.url().startswith('file:'):
168 raise error.Abort(_("cannot verify bundle or remote repos"))
174 raise error.Abort(_("cannot verify bundle or remote repos"))
169
175
170 if os.path.exists(repo.sjoin("journal")):
176 if os.path.exists(repo.sjoin("journal")):
171 ui.warn(_("abandoned transaction found - run hg recover\n"))
177 ui.warn(_("abandoned transaction found - run hg recover\n"))
172
178
173 if ui.verbose or not self.revlogv1:
179 if ui.verbose or not self.revlogv1:
174 ui.status(_("repository uses revlog format %d\n") %
180 ui.status(_("repository uses revlog format %d\n") %
175 (self.revlogv1 and 1 or 0))
181 (self.revlogv1 and 1 or 0))
176
182
177 # data verification
183 # data verification
178 mflinkrevs, filelinkrevs = self._verifychangelog()
184 mflinkrevs, filelinkrevs = self._verifychangelog()
179 filenodes = self._verifymanifest(mflinkrevs)
185 filenodes = self._verifymanifest(mflinkrevs)
180 del mflinkrevs
186 del mflinkrevs
181 self._crosscheckfiles(filelinkrevs, filenodes)
187 self._crosscheckfiles(filelinkrevs, filenodes)
182 totalfiles, filerevisions = self._verifyfiles(filenodes, filelinkrevs)
188 totalfiles, filerevisions = self._verifyfiles(filenodes, filelinkrevs)
183
189
184 # final report
190 # final report
185 ui.status(_("checked %d changesets with %d changes to %d files\n") %
191 ui.status(_("checked %d changesets with %d changes to %d files\n") %
186 (len(repo.changelog), filerevisions, totalfiles))
192 (len(repo.changelog), filerevisions, totalfiles))
187 if self.warnings:
193 if self.warnings:
188 ui.warn(_("%d warnings encountered!\n") % self.warnings)
194 ui.warn(_("%d warnings encountered!\n") % self.warnings)
189 if self.fncachewarned:
195 if self.fncachewarned:
190 ui.warn(_('hint: run "hg debugrebuildfncache" to recover from '
196 ui.warn(_('hint: run "hg debugrebuildfncache" to recover from '
191 'corrupt fncache\n'))
197 'corrupt fncache\n'))
192 if self.errors:
198 if self.errors:
193 ui.warn(_("%d integrity errors encountered!\n") % self.errors)
199 ui.warn(_("%d integrity errors encountered!\n") % self.errors)
194 if self.badrevs:
200 if self.badrevs:
195 ui.warn(_("(first damaged changeset appears to be %d)\n")
201 ui.warn(_("(first damaged changeset appears to be %d)\n")
196 % min(self.badrevs))
202 % min(self.badrevs))
197 return 1
203 return 1
198 return 0
204 return 0
199
205
200 def _verifychangelog(self):
206 def _verifychangelog(self):
201 """verify the changelog of a repository
207 """verify the changelog of a repository
202
208
203 The following checks are performed:
209 The following checks are performed:
204 - all of `_checkrevlog` checks,
210 - all of `_checkrevlog` checks,
205 - all of `_checkentry` checks (for each revisions),
211 - all of `_checkentry` checks (for each revisions),
206 - each revision can be read.
212 - each revision can be read.
207
213
208 The function returns some of the data observed in the changesets as a
214 The function returns some of the data observed in the changesets as a
209 (mflinkrevs, filelinkrevs) tuples:
215 (mflinkrevs, filelinkrevs) tuples:
210 - mflinkrevs: is a { manifest-node -> [changelog-rev] } mapping
216 - mflinkrevs: is a { manifest-node -> [changelog-rev] } mapping
211 - filelinkrevs: is a { file-path -> [changelog-rev] } mapping
217 - filelinkrevs: is a { file-path -> [changelog-rev] } mapping
212
218
213 If a matcher was specified, filelinkrevs will only contains matched
219 If a matcher was specified, filelinkrevs will only contains matched
214 files.
220 files.
215 """
221 """
216 ui = self.ui
222 ui = self.ui
217 repo = self.repo
223 repo = self.repo
218 match = self.match
224 match = self.match
219 cl = repo.changelog
225 cl = repo.changelog
220
226
221 ui.status(_("checking changesets\n"))
227 ui.status(_("checking changesets\n"))
222 mflinkrevs = {}
228 mflinkrevs = {}
223 filelinkrevs = {}
229 filelinkrevs = {}
224 seen = {}
230 seen = {}
225 self._checkrevlog(cl, "changelog", 0)
231 self._checkrevlog(cl, "changelog", 0)
226 progress = ui.makeprogress(_('checking'), unit=_('changesets'),
232 progress = ui.makeprogress(_('checking'), unit=_('changesets'),
227 total=len(repo))
233 total=len(repo))
228 for i in repo:
234 for i in repo:
229 progress.update(i)
235 progress.update(i)
230 n = cl.node(i)
236 n = cl.node(i)
231 self._checkentry(cl, i, n, seen, [i], "changelog")
237 self._checkentry(cl, i, n, seen, [i], "changelog")
232
238
233 try:
239 try:
234 changes = cl.read(n)
240 changes = cl.read(n)
235 if changes[0] != nullid:
241 if changes[0] != nullid:
236 mflinkrevs.setdefault(changes[0], []).append(i)
242 mflinkrevs.setdefault(changes[0], []).append(i)
237 self.refersmf = True
243 self.refersmf = True
238 for f in changes[3]:
244 for f in changes[3]:
239 if match(f):
245 if match(f):
240 filelinkrevs.setdefault(_normpath(f), []).append(i)
246 filelinkrevs.setdefault(_normpath(f), []).append(i)
241 except Exception as inst:
247 except Exception as inst:
242 self.refersmf = True
248 self.refersmf = True
243 self._exc(i, _("unpacking changeset %s") % short(n), inst)
249 self._exc(i, _("unpacking changeset %s") % short(n), inst)
244 progress.complete()
250 progress.complete()
245 return mflinkrevs, filelinkrevs
251 return mflinkrevs, filelinkrevs
246
252
247 def _verifymanifest(self, mflinkrevs, dir="", storefiles=None,
253 def _verifymanifest(self, mflinkrevs, dir="", storefiles=None,
248 subdirprogress=None):
254 subdirprogress=None):
249 """verify the manifestlog content
255 """verify the manifestlog content
250
256
251 Inputs:
257 Inputs:
252 - mflinkrevs: a {manifest-node -> [changelog-revisions]} mapping
258 - mflinkrevs: a {manifest-node -> [changelog-revisions]} mapping
253 - dir: a subdirectory to check (for tree manifest repo)
259 - dir: a subdirectory to check (for tree manifest repo)
254 - storefiles: set of currently "orphan" files.
260 - storefiles: set of currently "orphan" files.
255 - subdirprogress: a progress object
261 - subdirprogress: a progress object
256
262
257 This function checks:
263 This function checks:
258 * all of `_checkrevlog` checks (for all manifest related revlogs)
264 * all of `_checkrevlog` checks (for all manifest related revlogs)
259 * all of `_checkentry` checks (for all manifest related revisions)
265 * all of `_checkentry` checks (for all manifest related revisions)
260 * nodes for subdirectory exists in the sub-directory manifest
266 * nodes for subdirectory exists in the sub-directory manifest
261 * each manifest entries have a file path
267 * each manifest entries have a file path
262 * each manifest node refered in mflinkrevs exist in the manifest log
268 * each manifest node refered in mflinkrevs exist in the manifest log
263
269
264 If tree manifest is in use and a matchers is specified, only the
270 If tree manifest is in use and a matchers is specified, only the
265 sub-directories matching it will be verified.
271 sub-directories matching it will be verified.
266
272
267 return a two level mapping:
273 return a two level mapping:
268 {"path" -> { filenode -> changelog-revision}}
274 {"path" -> { filenode -> changelog-revision}}
269
275
270 This mapping primarily contains entries for every files in the
276 This mapping primarily contains entries for every files in the
271 repository. In addition, when tree-manifest is used, it also contains
277 repository. In addition, when tree-manifest is used, it also contains
272 sub-directory entries.
278 sub-directory entries.
273
279
274 If a matcher is provided, only matching paths will be included.
280 If a matcher is provided, only matching paths will be included.
275 """
281 """
276 repo = self.repo
282 repo = self.repo
277 ui = self.ui
283 ui = self.ui
278 match = self.match
284 match = self.match
279 mfl = self.repo.manifestlog
285 mfl = self.repo.manifestlog
280 mf = mfl.getstorage(dir)
286 mf = mfl.getstorage(dir)
281
287
282 if not dir:
288 if not dir:
283 self.ui.status(_("checking manifests\n"))
289 self.ui.status(_("checking manifests\n"))
284
290
285 filenodes = {}
291 filenodes = {}
286 subdirnodes = {}
292 subdirnodes = {}
287 seen = {}
293 seen = {}
288 label = "manifest"
294 label = "manifest"
289 if dir:
295 if dir:
290 label = dir
296 label = dir
291 revlogfiles = mf.files()
297 revlogfiles = mf.files()
292 storefiles.difference_update(revlogfiles)
298 storefiles.difference_update(revlogfiles)
293 if subdirprogress: # should be true since we're in a subdirectory
299 if subdirprogress: # should be true since we're in a subdirectory
294 subdirprogress.increment()
300 subdirprogress.increment()
295 if self.refersmf:
301 if self.refersmf:
296 # Do not check manifest if there are only changelog entries with
302 # Do not check manifest if there are only changelog entries with
297 # null manifests.
303 # null manifests.
298 self._checkrevlog(mf, label, 0)
304 self._checkrevlog(mf, label, 0)
299 progress = ui.makeprogress(_('checking'), unit=_('manifests'),
305 progress = ui.makeprogress(_('checking'), unit=_('manifests'),
300 total=len(mf))
306 total=len(mf))
301 for i in mf:
307 for i in mf:
302 if not dir:
308 if not dir:
303 progress.update(i)
309 progress.update(i)
304 n = mf.node(i)
310 n = mf.node(i)
305 lr = self._checkentry(mf, i, n, seen, mflinkrevs.get(n, []), label)
311 lr = self._checkentry(mf, i, n, seen, mflinkrevs.get(n, []), label)
306 if n in mflinkrevs:
312 if n in mflinkrevs:
307 del mflinkrevs[n]
313 del mflinkrevs[n]
308 elif dir:
314 elif dir:
309 self._err(lr, _("%s not in parent-directory manifest") %
315 self._err(lr, _("%s not in parent-directory manifest") %
310 short(n), label)
316 short(n), label)
311 else:
317 else:
312 self._err(lr, _("%s not in changesets") % short(n), label)
318 self._err(lr, _("%s not in changesets") % short(n), label)
313
319
314 try:
320 try:
315 mfdelta = mfl.get(dir, n).readdelta(shallow=True)
321 mfdelta = mfl.get(dir, n).readdelta(shallow=True)
316 for f, fn, fl in mfdelta.iterentries():
322 for f, fn, fl in mfdelta.iterentries():
317 if not f:
323 if not f:
318 self._err(lr, _("entry without name in manifest"))
324 self._err(lr, _("entry without name in manifest"))
319 elif f == "/dev/null": # ignore this in very old repos
325 elif f == "/dev/null": # ignore this in very old repos
320 continue
326 continue
321 fullpath = dir + _normpath(f)
327 fullpath = dir + _normpath(f)
322 if fl == 't':
328 if fl == 't':
323 if not match.visitdir(fullpath):
329 if not match.visitdir(fullpath):
324 continue
330 continue
325 subdirnodes.setdefault(fullpath + '/', {}).setdefault(
331 subdirnodes.setdefault(fullpath + '/', {}).setdefault(
326 fn, []).append(lr)
332 fn, []).append(lr)
327 else:
333 else:
328 if not match(fullpath):
334 if not match(fullpath):
329 continue
335 continue
330 filenodes.setdefault(fullpath, {}).setdefault(fn, lr)
336 filenodes.setdefault(fullpath, {}).setdefault(fn, lr)
331 except Exception as inst:
337 except Exception as inst:
332 self._exc(lr, _("reading delta %s") % short(n), inst, label)
338 self._exc(lr, _("reading delta %s") % short(n), inst, label)
333 if not dir:
339 if not dir:
334 progress.complete()
340 progress.complete()
335
341
336 if self.havemf:
342 if self.havemf:
337 # since we delete entry in `mflinkrevs` during iteration, any
343 # since we delete entry in `mflinkrevs` during iteration, any
338 # remaining entries are "missing". We need to issue errors for them.
344 # remaining entries are "missing". We need to issue errors for them.
339 changesetpairs = [(c, m) for m in mflinkrevs for c in mflinkrevs[m]]
345 changesetpairs = [(c, m) for m in mflinkrevs for c in mflinkrevs[m]]
340 for c, m in sorted(changesetpairs):
346 for c, m in sorted(changesetpairs):
341 if dir:
347 if dir:
342 self._err(c, _("parent-directory manifest refers to unknown"
348 self._err(c, _("parent-directory manifest refers to unknown"
343 " revision %s") % short(m), label)
349 " revision %s") % short(m), label)
344 else:
350 else:
345 self._err(c, _("changeset refers to unknown revision %s") %
351 self._err(c, _("changeset refers to unknown revision %s") %
346 short(m), label)
352 short(m), label)
347
353
348 if not dir and subdirnodes:
354 if not dir and subdirnodes:
349 self.ui.status(_("checking directory manifests\n"))
355 self.ui.status(_("checking directory manifests\n"))
350 storefiles = set()
356 storefiles = set()
351 subdirs = set()
357 subdirs = set()
352 revlogv1 = self.revlogv1
358 revlogv1 = self.revlogv1
353 for f, f2, size in repo.store.datafiles():
359 for f, f2, size in repo.store.datafiles():
354 if not f:
360 if not f:
355 self._err(None, _("cannot decode filename '%s'") % f2)
361 self._err(None, _("cannot decode filename '%s'") % f2)
356 elif (size > 0 or not revlogv1) and f.startswith('meta/'):
362 elif (size > 0 or not revlogv1) and f.startswith('meta/'):
357 storefiles.add(_normpath(f))
363 storefiles.add(_normpath(f))
358 subdirs.add(os.path.dirname(f))
364 subdirs.add(os.path.dirname(f))
359 subdirprogress = ui.makeprogress(_('checking'), unit=_('manifests'),
365 subdirprogress = ui.makeprogress(_('checking'), unit=_('manifests'),
360 total=len(subdirs))
366 total=len(subdirs))
361
367
362 for subdir, linkrevs in subdirnodes.iteritems():
368 for subdir, linkrevs in subdirnodes.iteritems():
363 subdirfilenodes = self._verifymanifest(linkrevs, subdir, storefiles,
369 subdirfilenodes = self._verifymanifest(linkrevs, subdir, storefiles,
364 subdirprogress)
370 subdirprogress)
365 for f, onefilenodes in subdirfilenodes.iteritems():
371 for f, onefilenodes in subdirfilenodes.iteritems():
366 filenodes.setdefault(f, {}).update(onefilenodes)
372 filenodes.setdefault(f, {}).update(onefilenodes)
367
373
368 if not dir and subdirnodes:
374 if not dir and subdirnodes:
369 subdirprogress.complete()
375 subdirprogress.complete()
370 if self.warnorphanstorefiles:
376 if self.warnorphanstorefiles:
371 for f in sorted(storefiles):
377 for f in sorted(storefiles):
372 self._warn(_("warning: orphan data file '%s'") % f)
378 self._warn(_("warning: orphan data file '%s'") % f)
373
379
374 return filenodes
380 return filenodes
375
381
376 def _crosscheckfiles(self, filelinkrevs, filenodes):
382 def _crosscheckfiles(self, filelinkrevs, filenodes):
377 repo = self.repo
383 repo = self.repo
378 ui = self.ui
384 ui = self.ui
379 ui.status(_("crosschecking files in changesets and manifests\n"))
385 ui.status(_("crosschecking files in changesets and manifests\n"))
380
386
381 total = len(filelinkrevs) + len(filenodes)
387 total = len(filelinkrevs) + len(filenodes)
382 progress = ui.makeprogress(_('crosschecking'), unit=_('files'),
388 progress = ui.makeprogress(_('crosschecking'), unit=_('files'),
383 total=total)
389 total=total)
384 if self.havemf:
390 if self.havemf:
385 for f in sorted(filelinkrevs):
391 for f in sorted(filelinkrevs):
386 progress.increment()
392 progress.increment()
387 if f not in filenodes:
393 if f not in filenodes:
388 lr = filelinkrevs[f][0]
394 lr = filelinkrevs[f][0]
389 self._err(lr, _("in changeset but not in manifest"), f)
395 self._err(lr, _("in changeset but not in manifest"), f)
390
396
391 if self.havecl:
397 if self.havecl:
392 for f in sorted(filenodes):
398 for f in sorted(filenodes):
393 progress.increment()
399 progress.increment()
394 if f not in filelinkrevs:
400 if f not in filelinkrevs:
395 try:
401 try:
396 fl = repo.file(f)
402 fl = repo.file(f)
397 lr = min([fl.linkrev(fl.rev(n)) for n in filenodes[f]])
403 lr = min([fl.linkrev(fl.rev(n)) for n in filenodes[f]])
398 except Exception:
404 except Exception:
399 lr = None
405 lr = None
400 self._err(lr, _("in manifest but not in changeset"), f)
406 self._err(lr, _("in manifest but not in changeset"), f)
401
407
402 progress.complete()
408 progress.complete()
403
409
404 def _verifyfiles(self, filenodes, filelinkrevs):
410 def _verifyfiles(self, filenodes, filelinkrevs):
405 repo = self.repo
411 repo = self.repo
406 ui = self.ui
412 ui = self.ui
407 lrugetctx = self.lrugetctx
413 lrugetctx = self.lrugetctx
408 revlogv1 = self.revlogv1
414 revlogv1 = self.revlogv1
409 havemf = self.havemf
415 havemf = self.havemf
410 ui.status(_("checking files\n"))
416 ui.status(_("checking files\n"))
411
417
412 storefiles = set()
418 storefiles = set()
413 for f, f2, size in repo.store.datafiles():
419 for f, f2, size in repo.store.datafiles():
414 if not f:
420 if not f:
415 self._err(None, _("cannot decode filename '%s'") % f2)
421 self._err(None, _("cannot decode filename '%s'") % f2)
416 elif (size > 0 or not revlogv1) and f.startswith('data/'):
422 elif (size > 0 or not revlogv1) and f.startswith('data/'):
417 storefiles.add(_normpath(f))
423 storefiles.add(_normpath(f))
418
424
419 state = {
425 state = {
420 # TODO this assumes revlog storage for changelog.
426 # TODO this assumes revlog storage for changelog.
421 'expectedversion': self.repo.changelog.version & 0xFFFF,
427 'expectedversion': self.repo.changelog.version & 0xFFFF,
422 'skipflags': self.skipflags,
428 'skipflags': self.skipflags,
423 # experimental config: censor.policy
429 # experimental config: censor.policy
424 'erroroncensored': ui.config('censor', 'policy') == 'abort',
430 'erroroncensored': ui.config('censor', 'policy') == 'abort',
425 }
431 }
426
432
427 files = sorted(set(filenodes) | set(filelinkrevs))
433 files = sorted(set(filenodes) | set(filelinkrevs))
428 revisions = 0
434 revisions = 0
429 progress = ui.makeprogress(_('checking'), unit=_('files'),
435 progress = ui.makeprogress(_('checking'), unit=_('files'),
430 total=len(files))
436 total=len(files))
431 for i, f in enumerate(files):
437 for i, f in enumerate(files):
432 progress.update(i, item=f)
438 progress.update(i, item=f)
433 try:
439 try:
434 linkrevs = filelinkrevs[f]
440 linkrevs = filelinkrevs[f]
435 except KeyError:
441 except KeyError:
436 # in manifest but not in changelog
442 # in manifest but not in changelog
437 linkrevs = []
443 linkrevs = []
438
444
439 if linkrevs:
445 if linkrevs:
440 lr = linkrevs[0]
446 lr = linkrevs[0]
441 else:
447 else:
442 lr = None
448 lr = None
443
449
444 try:
450 try:
445 fl = repo.file(f)
451 fl = repo.file(f)
446 except error.StorageError as e:
452 except error.StorageError as e:
447 self._err(lr, _("broken revlog! (%s)") % e, f)
453 self._err(lr, _("broken revlog! (%s)") % e, f)
448 continue
454 continue
449
455
450 for ff in fl.files():
456 for ff in fl.files():
451 try:
457 try:
452 storefiles.remove(ff)
458 storefiles.remove(ff)
453 except KeyError:
459 except KeyError:
454 if self.warnorphanstorefiles:
460 if self.warnorphanstorefiles:
455 self._warn(_(" warning: revlog '%s' not in fncache!") %
461 self._warn(_(" warning: revlog '%s' not in fncache!") %
456 ff)
462 ff)
457 self.fncachewarned = True
463 self.fncachewarned = True
458
464
459 if not len(fl) and (self.havecl or self.havemf):
465 if not len(fl) and (self.havecl or self.havemf):
460 self._err(lr, _("empty or missing %s") % f)
466 self._err(lr, _("empty or missing %s") % f)
461 else:
467 else:
462 # Guard against implementations not setting this.
468 # Guard against implementations not setting this.
463 state['skipread'] = set()
469 state['skipread'] = set()
464 for problem in fl.verifyintegrity(state):
470 for problem in fl.verifyintegrity(state):
465 if problem.node is not None:
471 if problem.node is not None:
466 linkrev = fl.linkrev(fl.rev(problem.node))
472 linkrev = fl.linkrev(fl.rev(problem.node))
467 else:
473 else:
468 linkrev = None
474 linkrev = None
469
475
470 if problem.warning:
476 if problem.warning:
471 self._warn(problem.warning)
477 self._warn(problem.warning)
472 elif problem.error:
478 elif problem.error:
473 self._err(linkrev if linkrev is not None else lr,
479 self._err(linkrev if linkrev is not None else lr,
474 problem.error, f)
480 problem.error, f)
475 else:
481 else:
476 raise error.ProgrammingError(
482 raise error.ProgrammingError(
477 'problem instance does not set warning or error '
483 'problem instance does not set warning or error '
478 'attribute: %s' % problem.msg)
484 'attribute: %s' % problem.msg)
479
485
480 seen = {}
486 seen = {}
481 for i in fl:
487 for i in fl:
482 revisions += 1
488 revisions += 1
483 n = fl.node(i)
489 n = fl.node(i)
484 lr = self._checkentry(fl, i, n, seen, linkrevs, f)
490 lr = self._checkentry(fl, i, n, seen, linkrevs, f)
485 if f in filenodes:
491 if f in filenodes:
486 if havemf and n not in filenodes[f]:
492 if havemf and n not in filenodes[f]:
487 self._err(lr, _("%s not in manifests") % (short(n)), f)
493 self._err(lr, _("%s not in manifests") % (short(n)), f)
488 else:
494 else:
489 del filenodes[f][n]
495 del filenodes[f][n]
490
496
491 if n in state['skipread']:
497 if n in state['skipread']:
492 continue
498 continue
493
499
494 # check renames
500 # check renames
495 try:
501 try:
496 # This requires resolving fulltext (at least on revlogs). We
502 # This requires resolving fulltext (at least on revlogs). We
497 # may want ``verifyintegrity()`` to pass a set of nodes with
503 # may want ``verifyintegrity()`` to pass a set of nodes with
498 # rename metadata as an optimization.
504 # rename metadata as an optimization.
499 rp = fl.renamed(n)
505 rp = fl.renamed(n)
500 if rp:
506 if rp:
501 if lr is not None and ui.verbose:
507 if lr is not None and ui.verbose:
502 ctx = lrugetctx(lr)
508 ctx = lrugetctx(lr)
503 if not any(rp[0] in pctx for pctx in ctx.parents()):
509 if not any(rp[0] in pctx for pctx in ctx.parents()):
504 self._warn(_("warning: copy source of '%s' not"
510 self._warn(_("warning: copy source of '%s' not"
505 " in parents of %s") % (f, ctx))
511 " in parents of %s") % (f, ctx))
506 fl2 = repo.file(rp[0])
512 fl2 = repo.file(rp[0])
507 if not len(fl2):
513 if not len(fl2):
508 self._err(lr,
514 self._err(lr,
509 _("empty or missing copy source revlog "
515 _("empty or missing copy source revlog "
510 "%s:%s") % (rp[0],
516 "%s:%s") % (rp[0],
511 short(rp[1])),
517 short(rp[1])),
512 f)
518 f)
513 elif rp[1] == nullid:
519 elif rp[1] == nullid:
514 ui.note(_("warning: %s@%s: copy source"
520 ui.note(_("warning: %s@%s: copy source"
515 " revision is nullid %s:%s\n")
521 " revision is nullid %s:%s\n")
516 % (f, lr, rp[0], short(rp[1])))
522 % (f, lr, rp[0], short(rp[1])))
517 else:
523 else:
518 fl2.rev(rp[1])
524 fl2.rev(rp[1])
519 except Exception as inst:
525 except Exception as inst:
520 self._exc(lr, _("checking rename of %s") % short(n),
526 self._exc(lr, _("checking rename of %s") % short(n),
521 inst, f)
527 inst, f)
522
528
523 # cross-check
529 # cross-check
524 if f in filenodes:
530 if f in filenodes:
525 fns = [(v, k) for k, v in filenodes[f].iteritems()]
531 fns = [(v, k) for k, v in filenodes[f].iteritems()]
526 for lr, node in sorted(fns):
532 for lr, node in sorted(fns):
527 self._err(lr, _("manifest refers to unknown revision %s") %
533 self._err(lr, _("manifest refers to unknown revision %s") %
528 short(node), f)
534 short(node), f)
529 progress.complete()
535 progress.complete()
530
536
531 if self.warnorphanstorefiles:
537 if self.warnorphanstorefiles:
532 for f in sorted(storefiles):
538 for f in sorted(storefiles):
533 self._warn(_("warning: orphan data file '%s'") % f)
539 self._warn(_("warning: orphan data file '%s'") % f)
534
540
535 return len(files), revisions
541 return len(files), revisions
General Comments 0
You need to be logged in to leave comments. Login now