##// END OF EJS Templates
peer: stop having a `peer()` method on `peer()`...
marmoute -
r50643:78af51ba default
parent child Browse files
Show More
@@ -1,1669 +1,1667 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9
9
10 import os
10 import os
11 import posixpath
11 import posixpath
12 import shutil
12 import shutil
13 import stat
13 import stat
14 import weakref
14 import weakref
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import (
17 from .node import (
18 hex,
18 hex,
19 sha1nodeconstants,
19 sha1nodeconstants,
20 short,
20 short,
21 )
21 )
22 from .pycompat import getattr
22 from .pycompat import getattr
23
23
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 bundlerepo,
26 bundlerepo,
27 cmdutil,
27 cmdutil,
28 destutil,
28 destutil,
29 discovery,
29 discovery,
30 error,
30 error,
31 exchange,
31 exchange,
32 extensions,
32 extensions,
33 graphmod,
33 graphmod,
34 httppeer,
34 httppeer,
35 localrepo,
35 localrepo,
36 lock,
36 lock,
37 logcmdutil,
37 logcmdutil,
38 logexchange,
38 logexchange,
39 merge as mergemod,
39 merge as mergemod,
40 mergestate as mergestatemod,
40 mergestate as mergestatemod,
41 narrowspec,
41 narrowspec,
42 phases,
42 phases,
43 requirements,
43 requirements,
44 scmutil,
44 scmutil,
45 sshpeer,
45 sshpeer,
46 statichttprepo,
46 statichttprepo,
47 ui as uimod,
47 ui as uimod,
48 unionrepo,
48 unionrepo,
49 url,
49 url,
50 util,
50 util,
51 verify as verifymod,
51 verify as verifymod,
52 vfs as vfsmod,
52 vfs as vfsmod,
53 )
53 )
54 from .interfaces import repository as repositorymod
54 from .interfaces import repository as repositorymod
55 from .utils import (
55 from .utils import (
56 hashutil,
56 hashutil,
57 stringutil,
57 stringutil,
58 urlutil,
58 urlutil,
59 )
59 )
60
60
61
61
62 release = lock.release
62 release = lock.release
63
63
64 # shared features
64 # shared features
65 sharedbookmarks = b'bookmarks'
65 sharedbookmarks = b'bookmarks'
66
66
67
67
68 def addbranchrevs(lrepo, other, branches, revs):
68 def addbranchrevs(lrepo, other, branches, revs):
69 if util.safehasattr(other, 'peer'):
69 if util.safehasattr(other, 'peer'):
70 # a courtesy to callers using a localrepo for other
70 # a courtesy to callers using a localrepo for other
71 peer = other.peer()
71 peer = other.peer()
72 else:
72 else:
73 peer = other
73 peer = other
74 hashbranch, branches = branches
74 hashbranch, branches = branches
75 if not hashbranch and not branches:
75 if not hashbranch and not branches:
76 x = revs or None
76 x = revs or None
77 if revs:
77 if revs:
78 y = revs[0]
78 y = revs[0]
79 else:
79 else:
80 y = None
80 y = None
81 return x, y
81 return x, y
82 if revs:
82 if revs:
83 revs = list(revs)
83 revs = list(revs)
84 else:
84 else:
85 revs = []
85 revs = []
86
86
87 if not peer.capable(b'branchmap'):
87 if not peer.capable(b'branchmap'):
88 if branches:
88 if branches:
89 raise error.Abort(_(b"remote branch lookup not supported"))
89 raise error.Abort(_(b"remote branch lookup not supported"))
90 revs.append(hashbranch)
90 revs.append(hashbranch)
91 return revs, revs[0]
91 return revs, revs[0]
92
92
93 with peer.commandexecutor() as e:
93 with peer.commandexecutor() as e:
94 branchmap = e.callcommand(b'branchmap', {}).result()
94 branchmap = e.callcommand(b'branchmap', {}).result()
95
95
96 def primary(branch):
96 def primary(branch):
97 if branch == b'.':
97 if branch == b'.':
98 if not lrepo:
98 if not lrepo:
99 raise error.Abort(_(b"dirstate branch not accessible"))
99 raise error.Abort(_(b"dirstate branch not accessible"))
100 branch = lrepo.dirstate.branch()
100 branch = lrepo.dirstate.branch()
101 if branch in branchmap:
101 if branch in branchmap:
102 revs.extend(hex(r) for r in reversed(branchmap[branch]))
102 revs.extend(hex(r) for r in reversed(branchmap[branch]))
103 return True
103 return True
104 else:
104 else:
105 return False
105 return False
106
106
107 for branch in branches:
107 for branch in branches:
108 if not primary(branch):
108 if not primary(branch):
109 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
109 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
110 if hashbranch:
110 if hashbranch:
111 if not primary(hashbranch):
111 if not primary(hashbranch):
112 revs.append(hashbranch)
112 revs.append(hashbranch)
113 return revs, revs[0]
113 return revs, revs[0]
114
114
115
115
116 def _isfile(path):
116 def _isfile(path):
117 try:
117 try:
118 # we use os.stat() directly here instead of os.path.isfile()
118 # we use os.stat() directly here instead of os.path.isfile()
119 # because the latter started returning `False` on invalid path
119 # because the latter started returning `False` on invalid path
120 # exceptions starting in 3.8 and we care about handling
120 # exceptions starting in 3.8 and we care about handling
121 # invalid paths specially here.
121 # invalid paths specially here.
122 st = os.stat(path)
122 st = os.stat(path)
123 except ValueError as e:
123 except ValueError as e:
124 msg = stringutil.forcebytestr(e)
124 msg = stringutil.forcebytestr(e)
125 raise error.Abort(_(b'invalid path %s: %s') % (path, msg))
125 raise error.Abort(_(b'invalid path %s: %s') % (path, msg))
126 except OSError:
126 except OSError:
127 return False
127 return False
128 else:
128 else:
129 return stat.S_ISREG(st.st_mode)
129 return stat.S_ISREG(st.st_mode)
130
130
131
131
132 class LocalFactory:
132 class LocalFactory:
133 """thin wrapper to dispatch between localrepo and bundle repo"""
133 """thin wrapper to dispatch between localrepo and bundle repo"""
134
134
135 @staticmethod
135 @staticmethod
136 def islocal(path: bytes) -> bool:
136 def islocal(path: bytes) -> bool:
137 path = util.expandpath(urlutil.urllocalpath(path))
137 path = util.expandpath(urlutil.urllocalpath(path))
138 return not _isfile(path)
138 return not _isfile(path)
139
139
140 @staticmethod
140 @staticmethod
141 def instance(ui, path, *args, **kwargs):
141 def instance(ui, path, *args, **kwargs):
142 path = util.expandpath(urlutil.urllocalpath(path))
142 path = util.expandpath(urlutil.urllocalpath(path))
143 if _isfile(path):
143 if _isfile(path):
144 cls = bundlerepo
144 cls = bundlerepo
145 else:
145 else:
146 cls = localrepo
146 cls = localrepo
147 return cls.instance(ui, path, *args, **kwargs)
147 return cls.instance(ui, path, *args, **kwargs)
148
148
149
149
150 repo_schemes = {
150 repo_schemes = {
151 b'bundle': bundlerepo,
151 b'bundle': bundlerepo,
152 b'union': unionrepo,
152 b'union': unionrepo,
153 b'file': LocalFactory,
153 b'file': LocalFactory,
154 }
154 }
155
155
156 peer_schemes = {
156 peer_schemes = {
157 b'http': httppeer,
157 b'http': httppeer,
158 b'https': httppeer,
158 b'https': httppeer,
159 b'ssh': sshpeer,
159 b'ssh': sshpeer,
160 b'static-http': statichttprepo,
160 b'static-http': statichttprepo,
161 }
161 }
162
162
163
163
164 def _peerlookup(path):
164 def _peerlookup(path):
165 u = urlutil.url(path)
165 u = urlutil.url(path)
166 scheme = u.scheme or b'file'
166 scheme = u.scheme or b'file'
167 if scheme in peer_schemes:
167 if scheme in peer_schemes:
168 return peer_schemes[scheme]
168 return peer_schemes[scheme]
169 if scheme in repo_schemes:
169 if scheme in repo_schemes:
170 return repo_schemes[scheme]
170 return repo_schemes[scheme]
171 return LocalFactory
171 return LocalFactory
172
172
173
173
174 def islocal(repo):
174 def islocal(repo):
175 '''return true if repo (or path pointing to repo) is local'''
175 '''return true if repo (or path pointing to repo) is local'''
176 if isinstance(repo, bytes):
176 if isinstance(repo, bytes):
177 cls = _peerlookup(repo)
177 cls = _peerlookup(repo)
178 cls.instance # make sure we load the module
178 cls.instance # make sure we load the module
179 if util.safehasattr(cls, 'islocal'):
179 if util.safehasattr(cls, 'islocal'):
180 return cls.islocal(repo) # pytype: disable=module-attr
180 return cls.islocal(repo) # pytype: disable=module-attr
181 return False
181 return False
182 repo.ui.deprecwarn(b"use obj.local() instead of islocal(obj)", b"6.4")
182 repo.ui.deprecwarn(b"use obj.local() instead of islocal(obj)", b"6.4")
183 return repo.local()
183 return repo.local()
184
184
185
185
186 def openpath(ui, path, sendaccept=True):
186 def openpath(ui, path, sendaccept=True):
187 '''open path with open if local, url.open if remote'''
187 '''open path with open if local, url.open if remote'''
188 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
188 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
189 if pathurl.islocal():
189 if pathurl.islocal():
190 return util.posixfile(pathurl.localpath(), b'rb')
190 return util.posixfile(pathurl.localpath(), b'rb')
191 else:
191 else:
192 return url.open(ui, path, sendaccept=sendaccept)
192 return url.open(ui, path, sendaccept=sendaccept)
193
193
194
194
195 # a list of (ui, repo) functions called for wire peer initialization
195 # a list of (ui, repo) functions called for wire peer initialization
196 wirepeersetupfuncs = []
196 wirepeersetupfuncs = []
197
197
198
198
199 def _setup_repo_or_peer(ui, obj, presetupfuncs=None):
199 def _setup_repo_or_peer(ui, obj, presetupfuncs=None):
200 ui = getattr(obj, "ui", ui)
200 ui = getattr(obj, "ui", ui)
201 for f in presetupfuncs or []:
201 for f in presetupfuncs or []:
202 f(ui, obj)
202 f(ui, obj)
203 ui.log(b'extension', b'- executing reposetup hooks\n')
203 ui.log(b'extension', b'- executing reposetup hooks\n')
204 with util.timedcm('all reposetup') as allreposetupstats:
204 with util.timedcm('all reposetup') as allreposetupstats:
205 for name, module in extensions.extensions(ui):
205 for name, module in extensions.extensions(ui):
206 ui.log(b'extension', b' - running reposetup for %s\n', name)
206 ui.log(b'extension', b' - running reposetup for %s\n', name)
207 hook = getattr(module, 'reposetup', None)
207 hook = getattr(module, 'reposetup', None)
208 if hook:
208 if hook:
209 with util.timedcm('reposetup %r', name) as stats:
209 with util.timedcm('reposetup %r', name) as stats:
210 hook(ui, obj)
210 hook(ui, obj)
211 msg = b' > reposetup for %s took %s\n'
211 msg = b' > reposetup for %s took %s\n'
212 ui.log(b'extension', msg, name, stats)
212 ui.log(b'extension', msg, name, stats)
213 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
213 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
214 if not obj.local():
214 if not obj.local():
215 for f in wirepeersetupfuncs:
215 for f in wirepeersetupfuncs:
216 f(ui, obj)
216 f(ui, obj)
217
217
218
218
219 def repository(
219 def repository(
220 ui,
220 ui,
221 path=b'',
221 path=b'',
222 create=False,
222 create=False,
223 presetupfuncs=None,
223 presetupfuncs=None,
224 intents=None,
224 intents=None,
225 createopts=None,
225 createopts=None,
226 ):
226 ):
227 """return a repository object for the specified path"""
227 """return a repository object for the specified path"""
228 scheme = urlutil.url(path).scheme
228 scheme = urlutil.url(path).scheme
229 if scheme is None:
229 if scheme is None:
230 scheme = b'file'
230 scheme = b'file'
231 cls = repo_schemes.get(scheme)
231 cls = repo_schemes.get(scheme)
232 if cls is None:
232 if cls is None:
233 if scheme in peer_schemes:
233 if scheme in peer_schemes:
234 raise error.Abort(_(b"repository '%s' is not local") % path)
234 raise error.Abort(_(b"repository '%s' is not local") % path)
235 cls = LocalFactory
235 cls = LocalFactory
236 repo = cls.instance(
236 repo = cls.instance(
237 ui,
237 ui,
238 path,
238 path,
239 create,
239 create,
240 intents=intents,
240 intents=intents,
241 createopts=createopts,
241 createopts=createopts,
242 )
242 )
243 _setup_repo_or_peer(ui, repo, presetupfuncs=presetupfuncs)
243 _setup_repo_or_peer(ui, repo, presetupfuncs=presetupfuncs)
244 return repo.filtered(b'visible')
244 return repo.filtered(b'visible')
245
245
246
246
247 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
247 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
248 '''return a repository peer for the specified path'''
248 '''return a repository peer for the specified path'''
249 rui = remoteui(uiorrepo, opts)
249 rui = remoteui(uiorrepo, opts)
250 if util.safehasattr(path, 'url'):
250 if util.safehasattr(path, 'url'):
251 # this is a urlutil.path object
251 # this is a urlutil.path object
252 scheme = path.url.scheme # pytype: disable=attribute-error
252 scheme = path.url.scheme # pytype: disable=attribute-error
253 # XXX for now we don't do anything more than that
253 # XXX for now we don't do anything more than that
254 path = path.loc # pytype: disable=attribute-error
254 path = path.loc # pytype: disable=attribute-error
255 else:
255 else:
256 scheme = urlutil.url(path).scheme
256 scheme = urlutil.url(path).scheme
257 if scheme in peer_schemes:
257 if scheme in peer_schemes:
258 cls = peer_schemes[scheme]
258 cls = peer_schemes[scheme]
259 peer = cls.instance(
259 peer = cls.instance(
260 rui,
260 rui,
261 path,
261 path,
262 create,
262 create,
263 intents=intents,
263 intents=intents,
264 createopts=createopts,
264 createopts=createopts,
265 )
265 )
266 _setup_repo_or_peer(rui, peer)
266 _setup_repo_or_peer(rui, peer)
267 else:
267 else:
268 # this is a repository
268 # this is a repository
269 repo = repository(
269 repo = repository(
270 rui,
270 rui,
271 path,
271 path,
272 create,
272 create,
273 intents=intents,
273 intents=intents,
274 createopts=createopts,
274 createopts=createopts,
275 )
275 )
276 peer = repo.peer()
276 peer = repo.peer()
277 return peer
277 return peer
278
278
279
279
280 def defaultdest(source):
280 def defaultdest(source):
281 """return default destination of clone if none is given
281 """return default destination of clone if none is given
282
282
283 >>> defaultdest(b'foo')
283 >>> defaultdest(b'foo')
284 'foo'
284 'foo'
285 >>> defaultdest(b'/foo/bar')
285 >>> defaultdest(b'/foo/bar')
286 'bar'
286 'bar'
287 >>> defaultdest(b'/')
287 >>> defaultdest(b'/')
288 ''
288 ''
289 >>> defaultdest(b'')
289 >>> defaultdest(b'')
290 ''
290 ''
291 >>> defaultdest(b'http://example.org/')
291 >>> defaultdest(b'http://example.org/')
292 ''
292 ''
293 >>> defaultdest(b'http://example.org/foo/')
293 >>> defaultdest(b'http://example.org/foo/')
294 'foo'
294 'foo'
295 """
295 """
296 path = urlutil.url(source).path
296 path = urlutil.url(source).path
297 if not path:
297 if not path:
298 return b''
298 return b''
299 return os.path.basename(os.path.normpath(path))
299 return os.path.basename(os.path.normpath(path))
300
300
301
301
302 def sharedreposource(repo):
302 def sharedreposource(repo):
303 """Returns repository object for source repository of a shared repo.
303 """Returns repository object for source repository of a shared repo.
304
304
305 If repo is not a shared repository, returns None.
305 If repo is not a shared repository, returns None.
306 """
306 """
307 if repo.sharedpath == repo.path:
307 if repo.sharedpath == repo.path:
308 return None
308 return None
309
309
310 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
310 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
311 return repo.srcrepo
311 return repo.srcrepo
312
312
313 # the sharedpath always ends in the .hg; we want the path to the repo
313 # the sharedpath always ends in the .hg; we want the path to the repo
314 source = repo.vfs.split(repo.sharedpath)[0]
314 source = repo.vfs.split(repo.sharedpath)[0]
315 srcurl, branches = urlutil.parseurl(source)
315 srcurl, branches = urlutil.parseurl(source)
316 srcrepo = repository(repo.ui, srcurl)
316 srcrepo = repository(repo.ui, srcurl)
317 repo.srcrepo = srcrepo
317 repo.srcrepo = srcrepo
318 return srcrepo
318 return srcrepo
319
319
320
320
321 def share(
321 def share(
322 ui,
322 ui,
323 source,
323 source,
324 dest=None,
324 dest=None,
325 update=True,
325 update=True,
326 bookmarks=True,
326 bookmarks=True,
327 defaultpath=None,
327 defaultpath=None,
328 relative=False,
328 relative=False,
329 ):
329 ):
330 '''create a shared repository'''
330 '''create a shared repository'''
331
331
332 not_local_msg = _(b'can only share local repositories')
332 not_local_msg = _(b'can only share local repositories')
333 if util.safehasattr(source, 'local'):
333 if util.safehasattr(source, 'local'):
334 if source.local() is None:
334 if source.local() is None:
335 raise error.Abort(not_local_msg)
335 raise error.Abort(not_local_msg)
336 elif not islocal(source):
336 elif not islocal(source):
337 # XXX why are we getting bytes here ?
337 # XXX why are we getting bytes here ?
338 raise error.Abort(not_local_msg)
338 raise error.Abort(not_local_msg)
339
339
340 if not dest:
340 if not dest:
341 dest = defaultdest(source)
341 dest = defaultdest(source)
342 else:
342 else:
343 dest = urlutil.get_clone_path_obj(ui, dest).loc
343 dest = urlutil.get_clone_path_obj(ui, dest).loc
344
344
345 if isinstance(source, bytes):
345 if isinstance(source, bytes):
346 source_path = urlutil.get_clone_path_obj(ui, source)
346 source_path = urlutil.get_clone_path_obj(ui, source)
347 srcrepo = repository(ui, source_path.loc)
347 srcrepo = repository(ui, source_path.loc)
348 branches = (source_path.branch, [])
348 branches = (source_path.branch, [])
349 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
349 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
350 else:
350 else:
351 srcrepo = source.local()
351 srcrepo = source.local()
352 checkout = None
352 checkout = None
353
353
354 shareditems = set()
354 shareditems = set()
355 if bookmarks:
355 if bookmarks:
356 shareditems.add(sharedbookmarks)
356 shareditems.add(sharedbookmarks)
357
357
358 r = repository(
358 r = repository(
359 ui,
359 ui,
360 dest,
360 dest,
361 create=True,
361 create=True,
362 createopts={
362 createopts={
363 b'sharedrepo': srcrepo,
363 b'sharedrepo': srcrepo,
364 b'sharedrelative': relative,
364 b'sharedrelative': relative,
365 b'shareditems': shareditems,
365 b'shareditems': shareditems,
366 },
366 },
367 )
367 )
368
368
369 postshare(srcrepo, r, defaultpath=defaultpath)
369 postshare(srcrepo, r, defaultpath=defaultpath)
370 r = repository(ui, dest)
370 r = repository(ui, dest)
371 _postshareupdate(r, update, checkout=checkout)
371 _postshareupdate(r, update, checkout=checkout)
372 return r
372 return r
373
373
374
374
375 def _prependsourcehgrc(repo):
375 def _prependsourcehgrc(repo):
376 """copies the source repo config and prepend it in current repo .hg/hgrc
376 """copies the source repo config and prepend it in current repo .hg/hgrc
377 on unshare. This is only done if the share was perfomed using share safe
377 on unshare. This is only done if the share was perfomed using share safe
378 method where we share config of source in shares"""
378 method where we share config of source in shares"""
379 srcvfs = vfsmod.vfs(repo.sharedpath)
379 srcvfs = vfsmod.vfs(repo.sharedpath)
380 dstvfs = vfsmod.vfs(repo.path)
380 dstvfs = vfsmod.vfs(repo.path)
381
381
382 if not srcvfs.exists(b'hgrc'):
382 if not srcvfs.exists(b'hgrc'):
383 return
383 return
384
384
385 currentconfig = b''
385 currentconfig = b''
386 if dstvfs.exists(b'hgrc'):
386 if dstvfs.exists(b'hgrc'):
387 currentconfig = dstvfs.read(b'hgrc')
387 currentconfig = dstvfs.read(b'hgrc')
388
388
389 with dstvfs(b'hgrc', b'wb') as fp:
389 with dstvfs(b'hgrc', b'wb') as fp:
390 sourceconfig = srcvfs.read(b'hgrc')
390 sourceconfig = srcvfs.read(b'hgrc')
391 fp.write(b"# Config copied from shared source\n")
391 fp.write(b"# Config copied from shared source\n")
392 fp.write(sourceconfig)
392 fp.write(sourceconfig)
393 fp.write(b'\n')
393 fp.write(b'\n')
394 fp.write(currentconfig)
394 fp.write(currentconfig)
395
395
396
396
397 def unshare(ui, repo):
397 def unshare(ui, repo):
398 """convert a shared repository to a normal one
398 """convert a shared repository to a normal one
399
399
400 Copy the store data to the repo and remove the sharedpath data.
400 Copy the store data to the repo and remove the sharedpath data.
401
401
402 Returns a new repository object representing the unshared repository.
402 Returns a new repository object representing the unshared repository.
403
403
404 The passed repository object is not usable after this function is
404 The passed repository object is not usable after this function is
405 called.
405 called.
406 """
406 """
407
407
408 with repo.lock():
408 with repo.lock():
409 # we use locks here because if we race with commit, we
409 # we use locks here because if we race with commit, we
410 # can end up with extra data in the cloned revlogs that's
410 # can end up with extra data in the cloned revlogs that's
411 # not pointed to by changesets, thus causing verify to
411 # not pointed to by changesets, thus causing verify to
412 # fail
412 # fail
413 destlock = copystore(ui, repo, repo.path)
413 destlock = copystore(ui, repo, repo.path)
414 with destlock or util.nullcontextmanager():
414 with destlock or util.nullcontextmanager():
415 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
415 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
416 # we were sharing .hg/hgrc of the share source with the current
416 # we were sharing .hg/hgrc of the share source with the current
417 # repo. We need to copy that while unsharing otherwise it can
417 # repo. We need to copy that while unsharing otherwise it can
418 # disable hooks and other checks
418 # disable hooks and other checks
419 _prependsourcehgrc(repo)
419 _prependsourcehgrc(repo)
420
420
421 sharefile = repo.vfs.join(b'sharedpath')
421 sharefile = repo.vfs.join(b'sharedpath')
422 util.rename(sharefile, sharefile + b'.old')
422 util.rename(sharefile, sharefile + b'.old')
423
423
424 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
424 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
425 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
425 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
426 scmutil.writereporequirements(repo)
426 scmutil.writereporequirements(repo)
427
427
428 # Removing share changes some fundamental properties of the repo instance.
428 # Removing share changes some fundamental properties of the repo instance.
429 # So we instantiate a new repo object and operate on it rather than
429 # So we instantiate a new repo object and operate on it rather than
430 # try to keep the existing repo usable.
430 # try to keep the existing repo usable.
431 newrepo = repository(repo.baseui, repo.root, create=False)
431 newrepo = repository(repo.baseui, repo.root, create=False)
432
432
433 # TODO: figure out how to access subrepos that exist, but were previously
433 # TODO: figure out how to access subrepos that exist, but were previously
434 # removed from .hgsub
434 # removed from .hgsub
435 c = newrepo[b'.']
435 c = newrepo[b'.']
436 subs = c.substate
436 subs = c.substate
437 for s in sorted(subs):
437 for s in sorted(subs):
438 c.sub(s).unshare()
438 c.sub(s).unshare()
439
439
440 localrepo.poisonrepository(repo)
440 localrepo.poisonrepository(repo)
441
441
442 return newrepo
442 return newrepo
443
443
444
444
445 def postshare(sourcerepo, destrepo, defaultpath=None):
445 def postshare(sourcerepo, destrepo, defaultpath=None):
446 """Called after a new shared repo is created.
446 """Called after a new shared repo is created.
447
447
448 The new repo only has a requirements file and pointer to the source.
448 The new repo only has a requirements file and pointer to the source.
449 This function configures additional shared data.
449 This function configures additional shared data.
450
450
451 Extensions can wrap this function and write additional entries to
451 Extensions can wrap this function and write additional entries to
452 destrepo/.hg/shared to indicate additional pieces of data to be shared.
452 destrepo/.hg/shared to indicate additional pieces of data to be shared.
453 """
453 """
454 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
454 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
455 if default:
455 if default:
456 template = b'[paths]\ndefault = %s\n'
456 template = b'[paths]\ndefault = %s\n'
457 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
457 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
458 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
458 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
459 with destrepo.wlock():
459 with destrepo.wlock():
460 narrowspec.copytoworkingcopy(destrepo)
460 narrowspec.copytoworkingcopy(destrepo)
461
461
462
462
463 def _postshareupdate(repo, update, checkout=None):
463 def _postshareupdate(repo, update, checkout=None):
464 """Maybe perform a working directory update after a shared repo is created.
464 """Maybe perform a working directory update after a shared repo is created.
465
465
466 ``update`` can be a boolean or a revision to update to.
466 ``update`` can be a boolean or a revision to update to.
467 """
467 """
468 if not update:
468 if not update:
469 return
469 return
470
470
471 repo.ui.status(_(b"updating working directory\n"))
471 repo.ui.status(_(b"updating working directory\n"))
472 if update is not True:
472 if update is not True:
473 checkout = update
473 checkout = update
474 for test in (checkout, b'default', b'tip'):
474 for test in (checkout, b'default', b'tip'):
475 if test is None:
475 if test is None:
476 continue
476 continue
477 try:
477 try:
478 uprev = repo.lookup(test)
478 uprev = repo.lookup(test)
479 break
479 break
480 except error.RepoLookupError:
480 except error.RepoLookupError:
481 continue
481 continue
482 _update(repo, uprev)
482 _update(repo, uprev)
483
483
484
484
485 def copystore(ui, srcrepo, destpath):
485 def copystore(ui, srcrepo, destpath):
486 """copy files from store of srcrepo in destpath
486 """copy files from store of srcrepo in destpath
487
487
488 returns destlock
488 returns destlock
489 """
489 """
490 destlock = None
490 destlock = None
491 try:
491 try:
492 hardlink = None
492 hardlink = None
493 topic = _(b'linking') if hardlink else _(b'copying')
493 topic = _(b'linking') if hardlink else _(b'copying')
494 with ui.makeprogress(topic, unit=_(b'files')) as progress:
494 with ui.makeprogress(topic, unit=_(b'files')) as progress:
495 num = 0
495 num = 0
496 srcpublishing = srcrepo.publishing()
496 srcpublishing = srcrepo.publishing()
497 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
497 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
498 dstvfs = vfsmod.vfs(destpath)
498 dstvfs = vfsmod.vfs(destpath)
499 for f in srcrepo.store.copylist():
499 for f in srcrepo.store.copylist():
500 if srcpublishing and f.endswith(b'phaseroots'):
500 if srcpublishing and f.endswith(b'phaseroots'):
501 continue
501 continue
502 dstbase = os.path.dirname(f)
502 dstbase = os.path.dirname(f)
503 if dstbase and not dstvfs.exists(dstbase):
503 if dstbase and not dstvfs.exists(dstbase):
504 dstvfs.mkdir(dstbase)
504 dstvfs.mkdir(dstbase)
505 if srcvfs.exists(f):
505 if srcvfs.exists(f):
506 if f.endswith(b'data'):
506 if f.endswith(b'data'):
507 # 'dstbase' may be empty (e.g. revlog format 0)
507 # 'dstbase' may be empty (e.g. revlog format 0)
508 lockfile = os.path.join(dstbase, b"lock")
508 lockfile = os.path.join(dstbase, b"lock")
509 # lock to avoid premature writing to the target
509 # lock to avoid premature writing to the target
510 destlock = lock.lock(dstvfs, lockfile)
510 destlock = lock.lock(dstvfs, lockfile)
511 hardlink, n = util.copyfiles(
511 hardlink, n = util.copyfiles(
512 srcvfs.join(f), dstvfs.join(f), hardlink, progress
512 srcvfs.join(f), dstvfs.join(f), hardlink, progress
513 )
513 )
514 num += n
514 num += n
515 if hardlink:
515 if hardlink:
516 ui.debug(b"linked %d files\n" % num)
516 ui.debug(b"linked %d files\n" % num)
517 else:
517 else:
518 ui.debug(b"copied %d files\n" % num)
518 ui.debug(b"copied %d files\n" % num)
519 return destlock
519 return destlock
520 except: # re-raises
520 except: # re-raises
521 release(destlock)
521 release(destlock)
522 raise
522 raise
523
523
524
524
525 def clonewithshare(
525 def clonewithshare(
526 ui,
526 ui,
527 peeropts,
527 peeropts,
528 sharepath,
528 sharepath,
529 source,
529 source,
530 srcpeer,
530 srcpeer,
531 dest,
531 dest,
532 pull=False,
532 pull=False,
533 rev=None,
533 rev=None,
534 update=True,
534 update=True,
535 stream=False,
535 stream=False,
536 ):
536 ):
537 """Perform a clone using a shared repo.
537 """Perform a clone using a shared repo.
538
538
539 The store for the repository will be located at <sharepath>/.hg. The
539 The store for the repository will be located at <sharepath>/.hg. The
540 specified revisions will be cloned or pulled from "source". A shared repo
540 specified revisions will be cloned or pulled from "source". A shared repo
541 will be created at "dest" and a working copy will be created if "update" is
541 will be created at "dest" and a working copy will be created if "update" is
542 True.
542 True.
543 """
543 """
544 revs = None
544 revs = None
545 if rev:
545 if rev:
546 if not srcpeer.capable(b'lookup'):
546 if not srcpeer.capable(b'lookup'):
547 raise error.Abort(
547 raise error.Abort(
548 _(
548 _(
549 b"src repository does not support "
549 b"src repository does not support "
550 b"revision lookup and so doesn't "
550 b"revision lookup and so doesn't "
551 b"support clone by revision"
551 b"support clone by revision"
552 )
552 )
553 )
553 )
554
554
555 # TODO this is batchable.
555 # TODO this is batchable.
556 remoterevs = []
556 remoterevs = []
557 for r in rev:
557 for r in rev:
558 with srcpeer.commandexecutor() as e:
558 with srcpeer.commandexecutor() as e:
559 remoterevs.append(
559 remoterevs.append(
560 e.callcommand(
560 e.callcommand(
561 b'lookup',
561 b'lookup',
562 {
562 {
563 b'key': r,
563 b'key': r,
564 },
564 },
565 ).result()
565 ).result()
566 )
566 )
567 revs = remoterevs
567 revs = remoterevs
568
568
569 # Obtain a lock before checking for or cloning the pooled repo otherwise
569 # Obtain a lock before checking for or cloning the pooled repo otherwise
570 # 2 clients may race creating or populating it.
570 # 2 clients may race creating or populating it.
571 pooldir = os.path.dirname(sharepath)
571 pooldir = os.path.dirname(sharepath)
572 # lock class requires the directory to exist.
572 # lock class requires the directory to exist.
573 try:
573 try:
574 util.makedir(pooldir, False)
574 util.makedir(pooldir, False)
575 except FileExistsError:
575 except FileExistsError:
576 pass
576 pass
577
577
578 poolvfs = vfsmod.vfs(pooldir)
578 poolvfs = vfsmod.vfs(pooldir)
579 basename = os.path.basename(sharepath)
579 basename = os.path.basename(sharepath)
580
580
581 with lock.lock(poolvfs, b'%s.lock' % basename):
581 with lock.lock(poolvfs, b'%s.lock' % basename):
582 if os.path.exists(sharepath):
582 if os.path.exists(sharepath):
583 ui.status(
583 ui.status(
584 _(b'(sharing from existing pooled repository %s)\n') % basename
584 _(b'(sharing from existing pooled repository %s)\n') % basename
585 )
585 )
586 else:
586 else:
587 ui.status(
587 ui.status(
588 _(b'(sharing from new pooled repository %s)\n') % basename
588 _(b'(sharing from new pooled repository %s)\n') % basename
589 )
589 )
590 # Always use pull mode because hardlinks in share mode don't work
590 # Always use pull mode because hardlinks in share mode don't work
591 # well. Never update because working copies aren't necessary in
591 # well. Never update because working copies aren't necessary in
592 # share mode.
592 # share mode.
593 clone(
593 clone(
594 ui,
594 ui,
595 peeropts,
595 peeropts,
596 source,
596 source,
597 dest=sharepath,
597 dest=sharepath,
598 pull=True,
598 pull=True,
599 revs=rev,
599 revs=rev,
600 update=False,
600 update=False,
601 stream=stream,
601 stream=stream,
602 )
602 )
603
603
604 # Resolve the value to put in [paths] section for the source.
604 # Resolve the value to put in [paths] section for the source.
605 if islocal(source):
605 if islocal(source):
606 defaultpath = util.abspath(urlutil.urllocalpath(source))
606 defaultpath = util.abspath(urlutil.urllocalpath(source))
607 else:
607 else:
608 defaultpath = source
608 defaultpath = source
609
609
610 sharerepo = repository(ui, path=sharepath)
610 sharerepo = repository(ui, path=sharepath)
611 destrepo = share(
611 destrepo = share(
612 ui,
612 ui,
613 sharerepo,
613 sharerepo,
614 dest=dest,
614 dest=dest,
615 update=False,
615 update=False,
616 bookmarks=False,
616 bookmarks=False,
617 defaultpath=defaultpath,
617 defaultpath=defaultpath,
618 )
618 )
619
619
620 # We need to perform a pull against the dest repo to fetch bookmarks
620 # We need to perform a pull against the dest repo to fetch bookmarks
621 # and other non-store data that isn't shared by default. In the case of
621 # and other non-store data that isn't shared by default. In the case of
622 # non-existing shared repo, this means we pull from the remote twice. This
622 # non-existing shared repo, this means we pull from the remote twice. This
623 # is a bit weird. But at the time it was implemented, there wasn't an easy
623 # is a bit weird. But at the time it was implemented, there wasn't an easy
624 # way to pull just non-changegroup data.
624 # way to pull just non-changegroup data.
625 exchange.pull(destrepo, srcpeer, heads=revs)
625 exchange.pull(destrepo, srcpeer, heads=revs)
626
626
627 _postshareupdate(destrepo, update)
627 _postshareupdate(destrepo, update)
628
628
629 return srcpeer, peer(ui, peeropts, dest)
629 return srcpeer, peer(ui, peeropts, dest)
630
630
631
631
632 # Recomputing caches is often slow on big repos, so copy them.
632 # Recomputing caches is often slow on big repos, so copy them.
633 def _copycache(srcrepo, dstcachedir, fname):
633 def _copycache(srcrepo, dstcachedir, fname):
634 """copy a cache from srcrepo to destcachedir (if it exists)"""
634 """copy a cache from srcrepo to destcachedir (if it exists)"""
635 srcfname = srcrepo.cachevfs.join(fname)
635 srcfname = srcrepo.cachevfs.join(fname)
636 dstfname = os.path.join(dstcachedir, fname)
636 dstfname = os.path.join(dstcachedir, fname)
637 if os.path.exists(srcfname):
637 if os.path.exists(srcfname):
638 if not os.path.exists(dstcachedir):
638 if not os.path.exists(dstcachedir):
639 os.mkdir(dstcachedir)
639 os.mkdir(dstcachedir)
640 util.copyfile(srcfname, dstfname)
640 util.copyfile(srcfname, dstfname)
641
641
642
642
643 def clone(
643 def clone(
644 ui,
644 ui,
645 peeropts,
645 peeropts,
646 source,
646 source,
647 dest=None,
647 dest=None,
648 pull=False,
648 pull=False,
649 revs=None,
649 revs=None,
650 update=True,
650 update=True,
651 stream=False,
651 stream=False,
652 branch=None,
652 branch=None,
653 shareopts=None,
653 shareopts=None,
654 storeincludepats=None,
654 storeincludepats=None,
655 storeexcludepats=None,
655 storeexcludepats=None,
656 depth=None,
656 depth=None,
657 ):
657 ):
658 """Make a copy of an existing repository.
658 """Make a copy of an existing repository.
659
659
660 Create a copy of an existing repository in a new directory. The
660 Create a copy of an existing repository in a new directory. The
661 source and destination are URLs, as passed to the repository
661 source and destination are URLs, as passed to the repository
662 function. Returns a pair of repository peers, the source and
662 function. Returns a pair of repository peers, the source and
663 newly created destination.
663 newly created destination.
664
664
665 The location of the source is added to the new repository's
665 The location of the source is added to the new repository's
666 .hg/hgrc file, as the default to be used for future pulls and
666 .hg/hgrc file, as the default to be used for future pulls and
667 pushes.
667 pushes.
668
668
669 If an exception is raised, the partly cloned/updated destination
669 If an exception is raised, the partly cloned/updated destination
670 repository will be deleted.
670 repository will be deleted.
671
671
672 Arguments:
672 Arguments:
673
673
674 source: repository object or URL
674 source: repository object or URL
675
675
676 dest: URL of destination repository to create (defaults to base
676 dest: URL of destination repository to create (defaults to base
677 name of source repository)
677 name of source repository)
678
678
679 pull: always pull from source repository, even in local case or if the
679 pull: always pull from source repository, even in local case or if the
680 server prefers streaming
680 server prefers streaming
681
681
682 stream: stream raw data uncompressed from repository (fast over
682 stream: stream raw data uncompressed from repository (fast over
683 LAN, slow over WAN)
683 LAN, slow over WAN)
684
684
685 revs: revision to clone up to (implies pull=True)
685 revs: revision to clone up to (implies pull=True)
686
686
687 update: update working directory after clone completes, if
687 update: update working directory after clone completes, if
688 destination is local repository (True means update to default rev,
688 destination is local repository (True means update to default rev,
689 anything else is treated as a revision)
689 anything else is treated as a revision)
690
690
691 branch: branches to clone
691 branch: branches to clone
692
692
693 shareopts: dict of options to control auto sharing behavior. The "pool" key
693 shareopts: dict of options to control auto sharing behavior. The "pool" key
694 activates auto sharing mode and defines the directory for stores. The
694 activates auto sharing mode and defines the directory for stores. The
695 "mode" key determines how to construct the directory name of the shared
695 "mode" key determines how to construct the directory name of the shared
696 repository. "identity" means the name is derived from the node of the first
696 repository. "identity" means the name is derived from the node of the first
697 changeset in the repository. "remote" means the name is derived from the
697 changeset in the repository. "remote" means the name is derived from the
698 remote's path/URL. Defaults to "identity."
698 remote's path/URL. Defaults to "identity."
699
699
700 storeincludepats and storeexcludepats: sets of file patterns to include and
700 storeincludepats and storeexcludepats: sets of file patterns to include and
701 exclude in the repository copy, respectively. If not defined, all files
701 exclude in the repository copy, respectively. If not defined, all files
702 will be included (a "full" clone). Otherwise a "narrow" clone containing
702 will be included (a "full" clone). Otherwise a "narrow" clone containing
703 only the requested files will be performed. If ``storeincludepats`` is not
703 only the requested files will be performed. If ``storeincludepats`` is not
704 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
704 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
705 ``path:.``. If both are empty sets, no files will be cloned.
705 ``path:.``. If both are empty sets, no files will be cloned.
706 """
706 """
707
707
708 if isinstance(source, bytes):
708 if isinstance(source, bytes):
709 src_path = urlutil.get_clone_path_obj(ui, source)
709 src_path = urlutil.get_clone_path_obj(ui, source)
710 if src_path is None:
710 if src_path is None:
711 srcpeer = peer(ui, peeropts, b'')
711 srcpeer = peer(ui, peeropts, b'')
712 origsource = source = b''
712 origsource = source = b''
713 branches = (None, branch or [])
713 branches = (None, branch or [])
714 else:
714 else:
715 srcpeer = peer(ui, peeropts, src_path)
715 srcpeer = peer(ui, peeropts, src_path)
716 origsource = src_path.rawloc
716 origsource = src_path.rawloc
717 branches = (src_path.branch, branch or [])
717 branches = (src_path.branch, branch or [])
718 source = src_path.loc
718 source = src_path.loc
719 else:
719 else:
720 if util.safehasattr(source, 'peer'):
720 if util.safehasattr(source, 'peer'):
721 srcpeer = source.peer() # in case we were called with a localrepo
721 srcpeer = source.peer() # in case we were called with a localrepo
722 else:
722 else:
723 srcpeer = source
723 srcpeer = source
724 branches = (None, branch or [])
724 branches = (None, branch or [])
725 # XXX path: simply use the peer `path` object when this become available
725 # XXX path: simply use the peer `path` object when this become available
726 srcpeer = source.peer() # in case we were called with a localrepo
727 branches = (None, branch or [])
728 origsource = source = srcpeer.url()
726 origsource = source = srcpeer.url()
729 srclock = destlock = destwlock = cleandir = None
727 srclock = destlock = destwlock = cleandir = None
730 destpeer = None
728 destpeer = None
731 try:
729 try:
732 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
730 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
733
731
734 if dest is None:
732 if dest is None:
735 dest = defaultdest(source)
733 dest = defaultdest(source)
736 if dest:
734 if dest:
737 ui.status(_(b"destination directory: %s\n") % dest)
735 ui.status(_(b"destination directory: %s\n") % dest)
738 else:
736 else:
739 dest_path = urlutil.get_clone_path_obj(ui, dest)
737 dest_path = urlutil.get_clone_path_obj(ui, dest)
740 if dest_path is not None:
738 if dest_path is not None:
741 dest = dest_path.rawloc
739 dest = dest_path.rawloc
742 else:
740 else:
743 dest = b''
741 dest = b''
744
742
745 dest = urlutil.urllocalpath(dest)
743 dest = urlutil.urllocalpath(dest)
746 source = urlutil.urllocalpath(source)
744 source = urlutil.urllocalpath(source)
747
745
748 if not dest:
746 if not dest:
749 raise error.InputError(_(b"empty destination path is not valid"))
747 raise error.InputError(_(b"empty destination path is not valid"))
750
748
751 destvfs = vfsmod.vfs(dest, expandpath=True)
749 destvfs = vfsmod.vfs(dest, expandpath=True)
752 if destvfs.lexists():
750 if destvfs.lexists():
753 if not destvfs.isdir():
751 if not destvfs.isdir():
754 raise error.InputError(
752 raise error.InputError(
755 _(b"destination '%s' already exists") % dest
753 _(b"destination '%s' already exists") % dest
756 )
754 )
757 elif destvfs.listdir():
755 elif destvfs.listdir():
758 raise error.InputError(
756 raise error.InputError(
759 _(b"destination '%s' is not empty") % dest
757 _(b"destination '%s' is not empty") % dest
760 )
758 )
761
759
762 createopts = {}
760 createopts = {}
763 narrow = False
761 narrow = False
764
762
765 if storeincludepats is not None:
763 if storeincludepats is not None:
766 narrowspec.validatepatterns(storeincludepats)
764 narrowspec.validatepatterns(storeincludepats)
767 narrow = True
765 narrow = True
768
766
769 if storeexcludepats is not None:
767 if storeexcludepats is not None:
770 narrowspec.validatepatterns(storeexcludepats)
768 narrowspec.validatepatterns(storeexcludepats)
771 narrow = True
769 narrow = True
772
770
773 if narrow:
771 if narrow:
774 # Include everything by default if only exclusion patterns defined.
772 # Include everything by default if only exclusion patterns defined.
775 if storeexcludepats and not storeincludepats:
773 if storeexcludepats and not storeincludepats:
776 storeincludepats = {b'path:.'}
774 storeincludepats = {b'path:.'}
777
775
778 createopts[b'narrowfiles'] = True
776 createopts[b'narrowfiles'] = True
779
777
780 if depth:
778 if depth:
781 createopts[b'shallowfilestore'] = True
779 createopts[b'shallowfilestore'] = True
782
780
783 if srcpeer.capable(b'lfs-serve'):
781 if srcpeer.capable(b'lfs-serve'):
784 # Repository creation honors the config if it disabled the extension, so
782 # Repository creation honors the config if it disabled the extension, so
785 # we can't just announce that lfs will be enabled. This check avoids
783 # we can't just announce that lfs will be enabled. This check avoids
786 # saying that lfs will be enabled, and then saying it's an unknown
784 # saying that lfs will be enabled, and then saying it's an unknown
787 # feature. The lfs creation option is set in either case so that a
785 # feature. The lfs creation option is set in either case so that a
788 # requirement is added. If the extension is explicitly disabled but the
786 # requirement is added. If the extension is explicitly disabled but the
789 # requirement is set, the clone aborts early, before transferring any
787 # requirement is set, the clone aborts early, before transferring any
790 # data.
788 # data.
791 createopts[b'lfs'] = True
789 createopts[b'lfs'] = True
792
790
793 if extensions.disabled_help(b'lfs'):
791 if extensions.disabled_help(b'lfs'):
794 ui.status(
792 ui.status(
795 _(
793 _(
796 b'(remote is using large file support (lfs), but it is '
794 b'(remote is using large file support (lfs), but it is '
797 b'explicitly disabled in the local configuration)\n'
795 b'explicitly disabled in the local configuration)\n'
798 )
796 )
799 )
797 )
800 else:
798 else:
801 ui.status(
799 ui.status(
802 _(
800 _(
803 b'(remote is using large file support (lfs); lfs will '
801 b'(remote is using large file support (lfs); lfs will '
804 b'be enabled for this repository)\n'
802 b'be enabled for this repository)\n'
805 )
803 )
806 )
804 )
807
805
808 shareopts = shareopts or {}
806 shareopts = shareopts or {}
809 sharepool = shareopts.get(b'pool')
807 sharepool = shareopts.get(b'pool')
810 sharenamemode = shareopts.get(b'mode')
808 sharenamemode = shareopts.get(b'mode')
811 if sharepool and islocal(dest):
809 if sharepool and islocal(dest):
812 sharepath = None
810 sharepath = None
813 if sharenamemode == b'identity':
811 if sharenamemode == b'identity':
814 # Resolve the name from the initial changeset in the remote
812 # Resolve the name from the initial changeset in the remote
815 # repository. This returns nullid when the remote is empty. It
813 # repository. This returns nullid when the remote is empty. It
816 # raises RepoLookupError if revision 0 is filtered or otherwise
814 # raises RepoLookupError if revision 0 is filtered or otherwise
817 # not available. If we fail to resolve, sharing is not enabled.
815 # not available. If we fail to resolve, sharing is not enabled.
818 try:
816 try:
819 with srcpeer.commandexecutor() as e:
817 with srcpeer.commandexecutor() as e:
820 rootnode = e.callcommand(
818 rootnode = e.callcommand(
821 b'lookup',
819 b'lookup',
822 {
820 {
823 b'key': b'0',
821 b'key': b'0',
824 },
822 },
825 ).result()
823 ).result()
826
824
827 if rootnode != sha1nodeconstants.nullid:
825 if rootnode != sha1nodeconstants.nullid:
828 sharepath = os.path.join(sharepool, hex(rootnode))
826 sharepath = os.path.join(sharepool, hex(rootnode))
829 else:
827 else:
830 ui.status(
828 ui.status(
831 _(
829 _(
832 b'(not using pooled storage: '
830 b'(not using pooled storage: '
833 b'remote appears to be empty)\n'
831 b'remote appears to be empty)\n'
834 )
832 )
835 )
833 )
836 except error.RepoLookupError:
834 except error.RepoLookupError:
837 ui.status(
835 ui.status(
838 _(
836 _(
839 b'(not using pooled storage: '
837 b'(not using pooled storage: '
840 b'unable to resolve identity of remote)\n'
838 b'unable to resolve identity of remote)\n'
841 )
839 )
842 )
840 )
843 elif sharenamemode == b'remote':
841 elif sharenamemode == b'remote':
844 sharepath = os.path.join(
842 sharepath = os.path.join(
845 sharepool, hex(hashutil.sha1(source).digest())
843 sharepool, hex(hashutil.sha1(source).digest())
846 )
844 )
847 else:
845 else:
848 raise error.Abort(
846 raise error.Abort(
849 _(b'unknown share naming mode: %s') % sharenamemode
847 _(b'unknown share naming mode: %s') % sharenamemode
850 )
848 )
851
849
852 # TODO this is a somewhat arbitrary restriction.
850 # TODO this is a somewhat arbitrary restriction.
853 if narrow:
851 if narrow:
854 ui.status(
852 ui.status(
855 _(b'(pooled storage not supported for narrow clones)\n')
853 _(b'(pooled storage not supported for narrow clones)\n')
856 )
854 )
857 sharepath = None
855 sharepath = None
858
856
859 if sharepath:
857 if sharepath:
860 return clonewithshare(
858 return clonewithshare(
861 ui,
859 ui,
862 peeropts,
860 peeropts,
863 sharepath,
861 sharepath,
864 source,
862 source,
865 srcpeer,
863 srcpeer,
866 dest,
864 dest,
867 pull=pull,
865 pull=pull,
868 rev=revs,
866 rev=revs,
869 update=update,
867 update=update,
870 stream=stream,
868 stream=stream,
871 )
869 )
872
870
873 srcrepo = srcpeer.local()
871 srcrepo = srcpeer.local()
874
872
875 abspath = origsource
873 abspath = origsource
876 if islocal(origsource):
874 if islocal(origsource):
877 abspath = util.abspath(urlutil.urllocalpath(origsource))
875 abspath = util.abspath(urlutil.urllocalpath(origsource))
878
876
879 if islocal(dest):
877 if islocal(dest):
880 if os.path.exists(dest):
878 if os.path.exists(dest):
881 # only clean up directories we create ourselves
879 # only clean up directories we create ourselves
882 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
880 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
883 cleandir = hgdir
881 cleandir = hgdir
884 else:
882 else:
885 cleandir = dest
883 cleandir = dest
886
884
887 copy = False
885 copy = False
888 if (
886 if (
889 srcrepo
887 srcrepo
890 and srcrepo.cancopy()
888 and srcrepo.cancopy()
891 and islocal(dest)
889 and islocal(dest)
892 and not phases.hassecret(srcrepo)
890 and not phases.hassecret(srcrepo)
893 ):
891 ):
894 copy = not pull and not revs
892 copy = not pull and not revs
895
893
896 # TODO this is a somewhat arbitrary restriction.
894 # TODO this is a somewhat arbitrary restriction.
897 if narrow:
895 if narrow:
898 copy = False
896 copy = False
899
897
900 if copy:
898 if copy:
901 try:
899 try:
902 # we use a lock here because if we race with commit, we
900 # we use a lock here because if we race with commit, we
903 # can end up with extra data in the cloned revlogs that's
901 # can end up with extra data in the cloned revlogs that's
904 # not pointed to by changesets, thus causing verify to
902 # not pointed to by changesets, thus causing verify to
905 # fail
903 # fail
906 srclock = srcrepo.lock(wait=False)
904 srclock = srcrepo.lock(wait=False)
907 except error.LockError:
905 except error.LockError:
908 copy = False
906 copy = False
909
907
910 if copy:
908 if copy:
911 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
909 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
912
910
913 destrootpath = urlutil.urllocalpath(dest)
911 destrootpath = urlutil.urllocalpath(dest)
914 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
912 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
915 localrepo.createrepository(
913 localrepo.createrepository(
916 ui,
914 ui,
917 destrootpath,
915 destrootpath,
918 requirements=dest_reqs,
916 requirements=dest_reqs,
919 )
917 )
920 destrepo = localrepo.makelocalrepository(ui, destrootpath)
918 destrepo = localrepo.makelocalrepository(ui, destrootpath)
921
919
922 destwlock = destrepo.wlock()
920 destwlock = destrepo.wlock()
923 destlock = destrepo.lock()
921 destlock = destrepo.lock()
924 from . import streamclone # avoid cycle
922 from . import streamclone # avoid cycle
925
923
926 streamclone.local_copy(srcrepo, destrepo)
924 streamclone.local_copy(srcrepo, destrepo)
927
925
928 # we need to re-init the repo after manually copying the data
926 # we need to re-init the repo after manually copying the data
929 # into it
927 # into it
930 destpeer = peer(srcrepo, peeropts, dest)
928 destpeer = peer(srcrepo, peeropts, dest)
931
929
932 # make the peer aware that is it already locked
930 # make the peer aware that is it already locked
933 #
931 #
934 # important:
932 # important:
935 #
933 #
936 # We still need to release that lock at the end of the function
934 # We still need to release that lock at the end of the function
937 destpeer.local()._lockref = weakref.ref(destlock)
935 destpeer.local()._lockref = weakref.ref(destlock)
938 destpeer.local()._wlockref = weakref.ref(destwlock)
936 destpeer.local()._wlockref = weakref.ref(destwlock)
939 # dirstate also needs to be copied because `_wlockref` has a reference
937 # dirstate also needs to be copied because `_wlockref` has a reference
940 # to it: this dirstate is saved to disk when the wlock is released
938 # to it: this dirstate is saved to disk when the wlock is released
941 destpeer.local().dirstate = destrepo.dirstate
939 destpeer.local().dirstate = destrepo.dirstate
942
940
943 srcrepo.hook(
941 srcrepo.hook(
944 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
942 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
945 )
943 )
946 else:
944 else:
947 try:
945 try:
948 # only pass ui when no srcrepo
946 # only pass ui when no srcrepo
949 destpeer = peer(
947 destpeer = peer(
950 srcrepo or ui,
948 srcrepo or ui,
951 peeropts,
949 peeropts,
952 dest,
950 dest,
953 create=True,
951 create=True,
954 createopts=createopts,
952 createopts=createopts,
955 )
953 )
956 except FileExistsError:
954 except FileExistsError:
957 cleandir = None
955 cleandir = None
958 raise error.Abort(_(b"destination '%s' already exists") % dest)
956 raise error.Abort(_(b"destination '%s' already exists") % dest)
959
957
960 if revs:
958 if revs:
961 if not srcpeer.capable(b'lookup'):
959 if not srcpeer.capable(b'lookup'):
962 raise error.Abort(
960 raise error.Abort(
963 _(
961 _(
964 b"src repository does not support "
962 b"src repository does not support "
965 b"revision lookup and so doesn't "
963 b"revision lookup and so doesn't "
966 b"support clone by revision"
964 b"support clone by revision"
967 )
965 )
968 )
966 )
969
967
970 # TODO this is batchable.
968 # TODO this is batchable.
971 remoterevs = []
969 remoterevs = []
972 for rev in revs:
970 for rev in revs:
973 with srcpeer.commandexecutor() as e:
971 with srcpeer.commandexecutor() as e:
974 remoterevs.append(
972 remoterevs.append(
975 e.callcommand(
973 e.callcommand(
976 b'lookup',
974 b'lookup',
977 {
975 {
978 b'key': rev,
976 b'key': rev,
979 },
977 },
980 ).result()
978 ).result()
981 )
979 )
982 revs = remoterevs
980 revs = remoterevs
983
981
984 checkout = revs[0]
982 checkout = revs[0]
985 else:
983 else:
986 revs = None
984 revs = None
987 local = destpeer.local()
985 local = destpeer.local()
988 if local:
986 if local:
989 if narrow:
987 if narrow:
990 with local.wlock(), local.lock():
988 with local.wlock(), local.lock():
991 local.setnarrowpats(storeincludepats, storeexcludepats)
989 local.setnarrowpats(storeincludepats, storeexcludepats)
992 narrowspec.copytoworkingcopy(local)
990 narrowspec.copytoworkingcopy(local)
993
991
994 u = urlutil.url(abspath)
992 u = urlutil.url(abspath)
995 defaulturl = bytes(u)
993 defaulturl = bytes(u)
996 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
994 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
997 if not stream:
995 if not stream:
998 if pull:
996 if pull:
999 stream = False
997 stream = False
1000 else:
998 else:
1001 stream = None
999 stream = None
1002 # internal config: ui.quietbookmarkmove
1000 # internal config: ui.quietbookmarkmove
1003 overrides = {(b'ui', b'quietbookmarkmove'): True}
1001 overrides = {(b'ui', b'quietbookmarkmove'): True}
1004 with local.ui.configoverride(overrides, b'clone'):
1002 with local.ui.configoverride(overrides, b'clone'):
1005 exchange.pull(
1003 exchange.pull(
1006 local,
1004 local,
1007 srcpeer,
1005 srcpeer,
1008 heads=revs,
1006 heads=revs,
1009 streamclonerequested=stream,
1007 streamclonerequested=stream,
1010 includepats=storeincludepats,
1008 includepats=storeincludepats,
1011 excludepats=storeexcludepats,
1009 excludepats=storeexcludepats,
1012 depth=depth,
1010 depth=depth,
1013 )
1011 )
1014 elif srcrepo:
1012 elif srcrepo:
1015 # TODO lift restriction once exchange.push() accepts narrow
1013 # TODO lift restriction once exchange.push() accepts narrow
1016 # push.
1014 # push.
1017 if narrow:
1015 if narrow:
1018 raise error.Abort(
1016 raise error.Abort(
1019 _(
1017 _(
1020 b'narrow clone not available for '
1018 b'narrow clone not available for '
1021 b'remote destinations'
1019 b'remote destinations'
1022 )
1020 )
1023 )
1021 )
1024
1022
1025 exchange.push(
1023 exchange.push(
1026 srcrepo,
1024 srcrepo,
1027 destpeer,
1025 destpeer,
1028 revs=revs,
1026 revs=revs,
1029 bookmarks=srcrepo._bookmarks.keys(),
1027 bookmarks=srcrepo._bookmarks.keys(),
1030 )
1028 )
1031 else:
1029 else:
1032 raise error.Abort(
1030 raise error.Abort(
1033 _(b"clone from remote to remote not supported")
1031 _(b"clone from remote to remote not supported")
1034 )
1032 )
1035
1033
1036 cleandir = None
1034 cleandir = None
1037
1035
1038 destrepo = destpeer.local()
1036 destrepo = destpeer.local()
1039 if destrepo:
1037 if destrepo:
1040 template = uimod.samplehgrcs[b'cloned']
1038 template = uimod.samplehgrcs[b'cloned']
1041 u = urlutil.url(abspath)
1039 u = urlutil.url(abspath)
1042 u.passwd = None
1040 u.passwd = None
1043 defaulturl = bytes(u)
1041 defaulturl = bytes(u)
1044 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
1042 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
1045 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
1043 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
1046
1044
1047 if ui.configbool(b'experimental', b'remotenames'):
1045 if ui.configbool(b'experimental', b'remotenames'):
1048 logexchange.pullremotenames(destrepo, srcpeer)
1046 logexchange.pullremotenames(destrepo, srcpeer)
1049
1047
1050 if update:
1048 if update:
1051 if update is not True:
1049 if update is not True:
1052 with srcpeer.commandexecutor() as e:
1050 with srcpeer.commandexecutor() as e:
1053 checkout = e.callcommand(
1051 checkout = e.callcommand(
1054 b'lookup',
1052 b'lookup',
1055 {
1053 {
1056 b'key': update,
1054 b'key': update,
1057 },
1055 },
1058 ).result()
1056 ).result()
1059
1057
1060 uprev = None
1058 uprev = None
1061 status = None
1059 status = None
1062 if checkout is not None:
1060 if checkout is not None:
1063 # Some extensions (at least hg-git and hg-subversion) have
1061 # Some extensions (at least hg-git and hg-subversion) have
1064 # a peer.lookup() implementation that returns a name instead
1062 # a peer.lookup() implementation that returns a name instead
1065 # of a nodeid. We work around it here until we've figured
1063 # of a nodeid. We work around it here until we've figured
1066 # out a better solution.
1064 # out a better solution.
1067 if len(checkout) == 20 and checkout in destrepo:
1065 if len(checkout) == 20 and checkout in destrepo:
1068 uprev = checkout
1066 uprev = checkout
1069 elif scmutil.isrevsymbol(destrepo, checkout):
1067 elif scmutil.isrevsymbol(destrepo, checkout):
1070 uprev = scmutil.revsymbol(destrepo, checkout).node()
1068 uprev = scmutil.revsymbol(destrepo, checkout).node()
1071 else:
1069 else:
1072 if update is not True:
1070 if update is not True:
1073 try:
1071 try:
1074 uprev = destrepo.lookup(update)
1072 uprev = destrepo.lookup(update)
1075 except error.RepoLookupError:
1073 except error.RepoLookupError:
1076 pass
1074 pass
1077 if uprev is None:
1075 if uprev is None:
1078 try:
1076 try:
1079 if destrepo._activebookmark:
1077 if destrepo._activebookmark:
1080 uprev = destrepo.lookup(destrepo._activebookmark)
1078 uprev = destrepo.lookup(destrepo._activebookmark)
1081 update = destrepo._activebookmark
1079 update = destrepo._activebookmark
1082 else:
1080 else:
1083 uprev = destrepo._bookmarks[b'@']
1081 uprev = destrepo._bookmarks[b'@']
1084 update = b'@'
1082 update = b'@'
1085 bn = destrepo[uprev].branch()
1083 bn = destrepo[uprev].branch()
1086 if bn == b'default':
1084 if bn == b'default':
1087 status = _(b"updating to bookmark %s\n" % update)
1085 status = _(b"updating to bookmark %s\n" % update)
1088 else:
1086 else:
1089 status = (
1087 status = (
1090 _(b"updating to bookmark %s on branch %s\n")
1088 _(b"updating to bookmark %s on branch %s\n")
1091 ) % (update, bn)
1089 ) % (update, bn)
1092 except KeyError:
1090 except KeyError:
1093 try:
1091 try:
1094 uprev = destrepo.branchtip(b'default')
1092 uprev = destrepo.branchtip(b'default')
1095 except error.RepoLookupError:
1093 except error.RepoLookupError:
1096 uprev = destrepo.lookup(b'tip')
1094 uprev = destrepo.lookup(b'tip')
1097 if not status:
1095 if not status:
1098 bn = destrepo[uprev].branch()
1096 bn = destrepo[uprev].branch()
1099 status = _(b"updating to branch %s\n") % bn
1097 status = _(b"updating to branch %s\n") % bn
1100 destrepo.ui.status(status)
1098 destrepo.ui.status(status)
1101 _update(destrepo, uprev)
1099 _update(destrepo, uprev)
1102 if update in destrepo._bookmarks:
1100 if update in destrepo._bookmarks:
1103 bookmarks.activate(destrepo, update)
1101 bookmarks.activate(destrepo, update)
1104 if destlock is not None:
1102 if destlock is not None:
1105 release(destlock)
1103 release(destlock)
1106 if destwlock is not None:
1104 if destwlock is not None:
1107 release(destlock)
1105 release(destlock)
1108 # here is a tiny windows were someone could end up writing the
1106 # here is a tiny windows were someone could end up writing the
1109 # repository before the cache are sure to be warm. This is "fine"
1107 # repository before the cache are sure to be warm. This is "fine"
1110 # as the only "bad" outcome would be some slowness. That potential
1108 # as the only "bad" outcome would be some slowness. That potential
1111 # slowness already affect reader.
1109 # slowness already affect reader.
1112 with destrepo.lock():
1110 with destrepo.lock():
1113 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1111 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1114 finally:
1112 finally:
1115 release(srclock, destlock, destwlock)
1113 release(srclock, destlock, destwlock)
1116 if cleandir is not None:
1114 if cleandir is not None:
1117 shutil.rmtree(cleandir, True)
1115 shutil.rmtree(cleandir, True)
1118 if srcpeer is not None:
1116 if srcpeer is not None:
1119 srcpeer.close()
1117 srcpeer.close()
1120 if destpeer and destpeer.local() is None:
1118 if destpeer and destpeer.local() is None:
1121 destpeer.close()
1119 destpeer.close()
1122 return srcpeer, destpeer
1120 return srcpeer, destpeer
1123
1121
1124
1122
1125 def _showstats(repo, stats, quietempty=False):
1123 def _showstats(repo, stats, quietempty=False):
1126 if quietempty and stats.isempty():
1124 if quietempty and stats.isempty():
1127 return
1125 return
1128 repo.ui.status(
1126 repo.ui.status(
1129 _(
1127 _(
1130 b"%d files updated, %d files merged, "
1128 b"%d files updated, %d files merged, "
1131 b"%d files removed, %d files unresolved\n"
1129 b"%d files removed, %d files unresolved\n"
1132 )
1130 )
1133 % (
1131 % (
1134 stats.updatedcount,
1132 stats.updatedcount,
1135 stats.mergedcount,
1133 stats.mergedcount,
1136 stats.removedcount,
1134 stats.removedcount,
1137 stats.unresolvedcount,
1135 stats.unresolvedcount,
1138 )
1136 )
1139 )
1137 )
1140
1138
1141
1139
1142 def updaterepo(repo, node, overwrite, updatecheck=None):
1140 def updaterepo(repo, node, overwrite, updatecheck=None):
1143 """Update the working directory to node.
1141 """Update the working directory to node.
1144
1142
1145 When overwrite is set, changes are clobbered, merged else
1143 When overwrite is set, changes are clobbered, merged else
1146
1144
1147 returns stats (see pydoc mercurial.merge.applyupdates)"""
1145 returns stats (see pydoc mercurial.merge.applyupdates)"""
1148 repo.ui.deprecwarn(
1146 repo.ui.deprecwarn(
1149 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1147 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1150 b'5.7',
1148 b'5.7',
1151 )
1149 )
1152 return mergemod._update(
1150 return mergemod._update(
1153 repo,
1151 repo,
1154 node,
1152 node,
1155 branchmerge=False,
1153 branchmerge=False,
1156 force=overwrite,
1154 force=overwrite,
1157 labels=[b'working copy', b'destination'],
1155 labels=[b'working copy', b'destination'],
1158 updatecheck=updatecheck,
1156 updatecheck=updatecheck,
1159 )
1157 )
1160
1158
1161
1159
1162 def update(repo, node, quietempty=False, updatecheck=None):
1160 def update(repo, node, quietempty=False, updatecheck=None):
1163 """update the working directory to node"""
1161 """update the working directory to node"""
1164 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1162 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1165 _showstats(repo, stats, quietempty)
1163 _showstats(repo, stats, quietempty)
1166 if stats.unresolvedcount:
1164 if stats.unresolvedcount:
1167 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1165 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1168 return stats.unresolvedcount > 0
1166 return stats.unresolvedcount > 0
1169
1167
1170
1168
1171 # naming conflict in clone()
1169 # naming conflict in clone()
1172 _update = update
1170 _update = update
1173
1171
1174
1172
1175 def clean(repo, node, show_stats=True, quietempty=False):
1173 def clean(repo, node, show_stats=True, quietempty=False):
1176 """forcibly switch the working directory to node, clobbering changes"""
1174 """forcibly switch the working directory to node, clobbering changes"""
1177 stats = mergemod.clean_update(repo[node])
1175 stats = mergemod.clean_update(repo[node])
1178 assert stats.unresolvedcount == 0
1176 assert stats.unresolvedcount == 0
1179 if show_stats:
1177 if show_stats:
1180 _showstats(repo, stats, quietempty)
1178 _showstats(repo, stats, quietempty)
1181 return False
1179 return False
1182
1180
1183
1181
1184 # naming conflict in updatetotally()
1182 # naming conflict in updatetotally()
1185 _clean = clean
1183 _clean = clean
1186
1184
1187 _VALID_UPDATECHECKS = {
1185 _VALID_UPDATECHECKS = {
1188 mergemod.UPDATECHECK_ABORT,
1186 mergemod.UPDATECHECK_ABORT,
1189 mergemod.UPDATECHECK_NONE,
1187 mergemod.UPDATECHECK_NONE,
1190 mergemod.UPDATECHECK_LINEAR,
1188 mergemod.UPDATECHECK_LINEAR,
1191 mergemod.UPDATECHECK_NO_CONFLICT,
1189 mergemod.UPDATECHECK_NO_CONFLICT,
1192 }
1190 }
1193
1191
1194
1192
1195 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1193 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1196 """Update the working directory with extra care for non-file components
1194 """Update the working directory with extra care for non-file components
1197
1195
1198 This takes care of non-file components below:
1196 This takes care of non-file components below:
1199
1197
1200 :bookmark: might be advanced or (in)activated
1198 :bookmark: might be advanced or (in)activated
1201
1199
1202 This takes arguments below:
1200 This takes arguments below:
1203
1201
1204 :checkout: to which revision the working directory is updated
1202 :checkout: to which revision the working directory is updated
1205 :brev: a name, which might be a bookmark to be activated after updating
1203 :brev: a name, which might be a bookmark to be activated after updating
1206 :clean: whether changes in the working directory can be discarded
1204 :clean: whether changes in the working directory can be discarded
1207 :updatecheck: how to deal with a dirty working directory
1205 :updatecheck: how to deal with a dirty working directory
1208
1206
1209 Valid values for updatecheck are the UPDATECHECK_* constants
1207 Valid values for updatecheck are the UPDATECHECK_* constants
1210 defined in the merge module. Passing `None` will result in using the
1208 defined in the merge module. Passing `None` will result in using the
1211 configured default.
1209 configured default.
1212
1210
1213 * ABORT: abort if the working directory is dirty
1211 * ABORT: abort if the working directory is dirty
1214 * NONE: don't check (merge working directory changes into destination)
1212 * NONE: don't check (merge working directory changes into destination)
1215 * LINEAR: check that update is linear before merging working directory
1213 * LINEAR: check that update is linear before merging working directory
1216 changes into destination
1214 changes into destination
1217 * NO_CONFLICT: check that the update does not result in file merges
1215 * NO_CONFLICT: check that the update does not result in file merges
1218
1216
1219 This returns whether conflict is detected at updating or not.
1217 This returns whether conflict is detected at updating or not.
1220 """
1218 """
1221 if updatecheck is None:
1219 if updatecheck is None:
1222 updatecheck = ui.config(b'commands', b'update.check')
1220 updatecheck = ui.config(b'commands', b'update.check')
1223 if updatecheck not in _VALID_UPDATECHECKS:
1221 if updatecheck not in _VALID_UPDATECHECKS:
1224 # If not configured, or invalid value configured
1222 # If not configured, or invalid value configured
1225 updatecheck = mergemod.UPDATECHECK_LINEAR
1223 updatecheck = mergemod.UPDATECHECK_LINEAR
1226 if updatecheck not in _VALID_UPDATECHECKS:
1224 if updatecheck not in _VALID_UPDATECHECKS:
1227 raise ValueError(
1225 raise ValueError(
1228 r'Invalid updatecheck value %r (can accept %r)'
1226 r'Invalid updatecheck value %r (can accept %r)'
1229 % (updatecheck, _VALID_UPDATECHECKS)
1227 % (updatecheck, _VALID_UPDATECHECKS)
1230 )
1228 )
1231 with repo.wlock():
1229 with repo.wlock():
1232 movemarkfrom = None
1230 movemarkfrom = None
1233 warndest = False
1231 warndest = False
1234 if checkout is None:
1232 if checkout is None:
1235 updata = destutil.destupdate(repo, clean=clean)
1233 updata = destutil.destupdate(repo, clean=clean)
1236 checkout, movemarkfrom, brev = updata
1234 checkout, movemarkfrom, brev = updata
1237 warndest = True
1235 warndest = True
1238
1236
1239 if clean:
1237 if clean:
1240 ret = _clean(repo, checkout)
1238 ret = _clean(repo, checkout)
1241 else:
1239 else:
1242 if updatecheck == mergemod.UPDATECHECK_ABORT:
1240 if updatecheck == mergemod.UPDATECHECK_ABORT:
1243 cmdutil.bailifchanged(repo, merge=False)
1241 cmdutil.bailifchanged(repo, merge=False)
1244 updatecheck = mergemod.UPDATECHECK_NONE
1242 updatecheck = mergemod.UPDATECHECK_NONE
1245 ret = _update(repo, checkout, updatecheck=updatecheck)
1243 ret = _update(repo, checkout, updatecheck=updatecheck)
1246
1244
1247 if not ret and movemarkfrom:
1245 if not ret and movemarkfrom:
1248 if movemarkfrom == repo[b'.'].node():
1246 if movemarkfrom == repo[b'.'].node():
1249 pass # no-op update
1247 pass # no-op update
1250 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1248 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1251 b = ui.label(repo._activebookmark, b'bookmarks.active')
1249 b = ui.label(repo._activebookmark, b'bookmarks.active')
1252 ui.status(_(b"updating bookmark %s\n") % b)
1250 ui.status(_(b"updating bookmark %s\n") % b)
1253 else:
1251 else:
1254 # this can happen with a non-linear update
1252 # this can happen with a non-linear update
1255 b = ui.label(repo._activebookmark, b'bookmarks')
1253 b = ui.label(repo._activebookmark, b'bookmarks')
1256 ui.status(_(b"(leaving bookmark %s)\n") % b)
1254 ui.status(_(b"(leaving bookmark %s)\n") % b)
1257 bookmarks.deactivate(repo)
1255 bookmarks.deactivate(repo)
1258 elif brev in repo._bookmarks:
1256 elif brev in repo._bookmarks:
1259 if brev != repo._activebookmark:
1257 if brev != repo._activebookmark:
1260 b = ui.label(brev, b'bookmarks.active')
1258 b = ui.label(brev, b'bookmarks.active')
1261 ui.status(_(b"(activating bookmark %s)\n") % b)
1259 ui.status(_(b"(activating bookmark %s)\n") % b)
1262 bookmarks.activate(repo, brev)
1260 bookmarks.activate(repo, brev)
1263 elif brev:
1261 elif brev:
1264 if repo._activebookmark:
1262 if repo._activebookmark:
1265 b = ui.label(repo._activebookmark, b'bookmarks')
1263 b = ui.label(repo._activebookmark, b'bookmarks')
1266 ui.status(_(b"(leaving bookmark %s)\n") % b)
1264 ui.status(_(b"(leaving bookmark %s)\n") % b)
1267 bookmarks.deactivate(repo)
1265 bookmarks.deactivate(repo)
1268
1266
1269 if warndest:
1267 if warndest:
1270 destutil.statusotherdests(ui, repo)
1268 destutil.statusotherdests(ui, repo)
1271
1269
1272 return ret
1270 return ret
1273
1271
1274
1272
1275 def merge(
1273 def merge(
1276 ctx,
1274 ctx,
1277 force=False,
1275 force=False,
1278 remind=True,
1276 remind=True,
1279 labels=None,
1277 labels=None,
1280 ):
1278 ):
1281 """Branch merge with node, resolving changes. Return true if any
1279 """Branch merge with node, resolving changes. Return true if any
1282 unresolved conflicts."""
1280 unresolved conflicts."""
1283 repo = ctx.repo()
1281 repo = ctx.repo()
1284 stats = mergemod.merge(ctx, force=force, labels=labels)
1282 stats = mergemod.merge(ctx, force=force, labels=labels)
1285 _showstats(repo, stats)
1283 _showstats(repo, stats)
1286 if stats.unresolvedcount:
1284 if stats.unresolvedcount:
1287 repo.ui.status(
1285 repo.ui.status(
1288 _(
1286 _(
1289 b"use 'hg resolve' to retry unresolved file merges "
1287 b"use 'hg resolve' to retry unresolved file merges "
1290 b"or 'hg merge --abort' to abandon\n"
1288 b"or 'hg merge --abort' to abandon\n"
1291 )
1289 )
1292 )
1290 )
1293 elif remind:
1291 elif remind:
1294 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1292 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1295 return stats.unresolvedcount > 0
1293 return stats.unresolvedcount > 0
1296
1294
1297
1295
1298 def abortmerge(ui, repo):
1296 def abortmerge(ui, repo):
1299 ms = mergestatemod.mergestate.read(repo)
1297 ms = mergestatemod.mergestate.read(repo)
1300 if ms.active():
1298 if ms.active():
1301 # there were conflicts
1299 # there were conflicts
1302 node = ms.localctx.hex()
1300 node = ms.localctx.hex()
1303 else:
1301 else:
1304 # there were no conficts, mergestate was not stored
1302 # there were no conficts, mergestate was not stored
1305 node = repo[b'.'].hex()
1303 node = repo[b'.'].hex()
1306
1304
1307 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1305 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1308 stats = mergemod.clean_update(repo[node])
1306 stats = mergemod.clean_update(repo[node])
1309 assert stats.unresolvedcount == 0
1307 assert stats.unresolvedcount == 0
1310 _showstats(repo, stats)
1308 _showstats(repo, stats)
1311
1309
1312
1310
1313 def _incoming(
1311 def _incoming(
1314 displaychlist,
1312 displaychlist,
1315 subreporecurse,
1313 subreporecurse,
1316 ui,
1314 ui,
1317 repo,
1315 repo,
1318 source,
1316 source,
1319 opts,
1317 opts,
1320 buffered=False,
1318 buffered=False,
1321 subpath=None,
1319 subpath=None,
1322 ):
1320 ):
1323 """
1321 """
1324 Helper for incoming / gincoming.
1322 Helper for incoming / gincoming.
1325 displaychlist gets called with
1323 displaychlist gets called with
1326 (remoterepo, incomingchangesetlist, displayer) parameters,
1324 (remoterepo, incomingchangesetlist, displayer) parameters,
1327 and is supposed to contain only code that can't be unified.
1325 and is supposed to contain only code that can't be unified.
1328 """
1326 """
1329 srcs = urlutil.get_pull_paths(repo, ui, [source])
1327 srcs = urlutil.get_pull_paths(repo, ui, [source])
1330 srcs = list(srcs)
1328 srcs = list(srcs)
1331 if len(srcs) != 1:
1329 if len(srcs) != 1:
1332 msg = _(b'for now, incoming supports only a single source, %d provided')
1330 msg = _(b'for now, incoming supports only a single source, %d provided')
1333 msg %= len(srcs)
1331 msg %= len(srcs)
1334 raise error.Abort(msg)
1332 raise error.Abort(msg)
1335 path = srcs[0]
1333 path = srcs[0]
1336 if subpath is None:
1334 if subpath is None:
1337 peer_path = path
1335 peer_path = path
1338 url = path.loc
1336 url = path.loc
1339 else:
1337 else:
1340 # XXX path: we are losing the `path` object here. Keeping it would be
1338 # XXX path: we are losing the `path` object here. Keeping it would be
1341 # valuable. For example as a "variant" as we do for pushes.
1339 # valuable. For example as a "variant" as we do for pushes.
1342 subpath = urlutil.url(subpath)
1340 subpath = urlutil.url(subpath)
1343 if subpath.isabs():
1341 if subpath.isabs():
1344 peer_path = url = bytes(subpath)
1342 peer_path = url = bytes(subpath)
1345 else:
1343 else:
1346 p = urlutil.url(path.loc)
1344 p = urlutil.url(path.loc)
1347 if p.islocal():
1345 if p.islocal():
1348 normpath = os.path.normpath
1346 normpath = os.path.normpath
1349 else:
1347 else:
1350 normpath = posixpath.normpath
1348 normpath = posixpath.normpath
1351 p.path = normpath(b'%s/%s' % (p.path, subpath))
1349 p.path = normpath(b'%s/%s' % (p.path, subpath))
1352 peer_path = url = bytes(p)
1350 peer_path = url = bytes(p)
1353 other = peer(repo, opts, peer_path)
1351 other = peer(repo, opts, peer_path)
1354 cleanupfn = other.close
1352 cleanupfn = other.close
1355 try:
1353 try:
1356 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(url))
1354 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(url))
1357 branches = (path.branch, opts.get(b'branch', []))
1355 branches = (path.branch, opts.get(b'branch', []))
1358 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1356 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1359
1357
1360 if revs:
1358 if revs:
1361 revs = [other.lookup(rev) for rev in revs]
1359 revs = [other.lookup(rev) for rev in revs]
1362 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1360 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1363 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1361 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1364 )
1362 )
1365
1363
1366 if not chlist:
1364 if not chlist:
1367 ui.status(_(b"no changes found\n"))
1365 ui.status(_(b"no changes found\n"))
1368 return subreporecurse()
1366 return subreporecurse()
1369 ui.pager(b'incoming')
1367 ui.pager(b'incoming')
1370 displayer = logcmdutil.changesetdisplayer(
1368 displayer = logcmdutil.changesetdisplayer(
1371 ui, other, opts, buffered=buffered
1369 ui, other, opts, buffered=buffered
1372 )
1370 )
1373 displaychlist(other, chlist, displayer)
1371 displaychlist(other, chlist, displayer)
1374 displayer.close()
1372 displayer.close()
1375 finally:
1373 finally:
1376 cleanupfn()
1374 cleanupfn()
1377 subreporecurse()
1375 subreporecurse()
1378 return 0 # exit code is zero since we found incoming changes
1376 return 0 # exit code is zero since we found incoming changes
1379
1377
1380
1378
1381 def incoming(ui, repo, source, opts, subpath=None):
1379 def incoming(ui, repo, source, opts, subpath=None):
1382 def subreporecurse():
1380 def subreporecurse():
1383 ret = 1
1381 ret = 1
1384 if opts.get(b'subrepos'):
1382 if opts.get(b'subrepos'):
1385 ctx = repo[None]
1383 ctx = repo[None]
1386 for subpath in sorted(ctx.substate):
1384 for subpath in sorted(ctx.substate):
1387 sub = ctx.sub(subpath)
1385 sub = ctx.sub(subpath)
1388 ret = min(ret, sub.incoming(ui, source, opts))
1386 ret = min(ret, sub.incoming(ui, source, opts))
1389 return ret
1387 return ret
1390
1388
1391 def display(other, chlist, displayer):
1389 def display(other, chlist, displayer):
1392 limit = logcmdutil.getlimit(opts)
1390 limit = logcmdutil.getlimit(opts)
1393 if opts.get(b'newest_first'):
1391 if opts.get(b'newest_first'):
1394 chlist.reverse()
1392 chlist.reverse()
1395 count = 0
1393 count = 0
1396 for n in chlist:
1394 for n in chlist:
1397 if limit is not None and count >= limit:
1395 if limit is not None and count >= limit:
1398 break
1396 break
1399 parents = [
1397 parents = [
1400 p for p in other.changelog.parents(n) if p != repo.nullid
1398 p for p in other.changelog.parents(n) if p != repo.nullid
1401 ]
1399 ]
1402 if opts.get(b'no_merges') and len(parents) == 2:
1400 if opts.get(b'no_merges') and len(parents) == 2:
1403 continue
1401 continue
1404 count += 1
1402 count += 1
1405 displayer.show(other[n])
1403 displayer.show(other[n])
1406
1404
1407 return _incoming(
1405 return _incoming(
1408 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1406 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1409 )
1407 )
1410
1408
1411
1409
1412 def _outgoing(ui, repo, dests, opts, subpath=None):
1410 def _outgoing(ui, repo, dests, opts, subpath=None):
1413 out = set()
1411 out = set()
1414 others = []
1412 others = []
1415 for path in urlutil.get_push_paths(repo, ui, dests):
1413 for path in urlutil.get_push_paths(repo, ui, dests):
1416 dest = path.loc
1414 dest = path.loc
1417 if subpath is not None:
1415 if subpath is not None:
1418 subpath = urlutil.url(subpath)
1416 subpath = urlutil.url(subpath)
1419 if subpath.isabs():
1417 if subpath.isabs():
1420 dest = bytes(subpath)
1418 dest = bytes(subpath)
1421 else:
1419 else:
1422 p = urlutil.url(dest)
1420 p = urlutil.url(dest)
1423 if p.islocal():
1421 if p.islocal():
1424 normpath = os.path.normpath
1422 normpath = os.path.normpath
1425 else:
1423 else:
1426 normpath = posixpath.normpath
1424 normpath = posixpath.normpath
1427 p.path = normpath(b'%s/%s' % (p.path, subpath))
1425 p.path = normpath(b'%s/%s' % (p.path, subpath))
1428 dest = bytes(p)
1426 dest = bytes(p)
1429 branches = path.branch, opts.get(b'branch') or []
1427 branches = path.branch, opts.get(b'branch') or []
1430
1428
1431 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1429 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1432 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1430 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1433 if revs:
1431 if revs:
1434 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1432 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1435
1433
1436 other = peer(repo, opts, dest)
1434 other = peer(repo, opts, dest)
1437 try:
1435 try:
1438 outgoing = discovery.findcommonoutgoing(
1436 outgoing = discovery.findcommonoutgoing(
1439 repo, other, revs, force=opts.get(b'force')
1437 repo, other, revs, force=opts.get(b'force')
1440 )
1438 )
1441 o = outgoing.missing
1439 o = outgoing.missing
1442 out.update(o)
1440 out.update(o)
1443 if not o:
1441 if not o:
1444 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1442 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1445 others.append(other)
1443 others.append(other)
1446 except: # re-raises
1444 except: # re-raises
1447 other.close()
1445 other.close()
1448 raise
1446 raise
1449 # make sure this is ordered by revision number
1447 # make sure this is ordered by revision number
1450 outgoing_revs = list(out)
1448 outgoing_revs = list(out)
1451 cl = repo.changelog
1449 cl = repo.changelog
1452 outgoing_revs.sort(key=cl.rev)
1450 outgoing_revs.sort(key=cl.rev)
1453 return outgoing_revs, others
1451 return outgoing_revs, others
1454
1452
1455
1453
1456 def _outgoing_recurse(ui, repo, dests, opts):
1454 def _outgoing_recurse(ui, repo, dests, opts):
1457 ret = 1
1455 ret = 1
1458 if opts.get(b'subrepos'):
1456 if opts.get(b'subrepos'):
1459 ctx = repo[None]
1457 ctx = repo[None]
1460 for subpath in sorted(ctx.substate):
1458 for subpath in sorted(ctx.substate):
1461 sub = ctx.sub(subpath)
1459 sub = ctx.sub(subpath)
1462 ret = min(ret, sub.outgoing(ui, dests, opts))
1460 ret = min(ret, sub.outgoing(ui, dests, opts))
1463 return ret
1461 return ret
1464
1462
1465
1463
1466 def _outgoing_filter(repo, revs, opts):
1464 def _outgoing_filter(repo, revs, opts):
1467 """apply revision filtering/ordering option for outgoing"""
1465 """apply revision filtering/ordering option for outgoing"""
1468 limit = logcmdutil.getlimit(opts)
1466 limit = logcmdutil.getlimit(opts)
1469 no_merges = opts.get(b'no_merges')
1467 no_merges = opts.get(b'no_merges')
1470 if opts.get(b'newest_first'):
1468 if opts.get(b'newest_first'):
1471 revs.reverse()
1469 revs.reverse()
1472 if limit is None and not no_merges:
1470 if limit is None and not no_merges:
1473 for r in revs:
1471 for r in revs:
1474 yield r
1472 yield r
1475 return
1473 return
1476
1474
1477 count = 0
1475 count = 0
1478 cl = repo.changelog
1476 cl = repo.changelog
1479 for n in revs:
1477 for n in revs:
1480 if limit is not None and count >= limit:
1478 if limit is not None and count >= limit:
1481 break
1479 break
1482 parents = [p for p in cl.parents(n) if p != repo.nullid]
1480 parents = [p for p in cl.parents(n) if p != repo.nullid]
1483 if no_merges and len(parents) == 2:
1481 if no_merges and len(parents) == 2:
1484 continue
1482 continue
1485 count += 1
1483 count += 1
1486 yield n
1484 yield n
1487
1485
1488
1486
1489 def outgoing(ui, repo, dests, opts, subpath=None):
1487 def outgoing(ui, repo, dests, opts, subpath=None):
1490 if opts.get(b'graph'):
1488 if opts.get(b'graph'):
1491 logcmdutil.checkunsupportedgraphflags([], opts)
1489 logcmdutil.checkunsupportedgraphflags([], opts)
1492 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1490 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1493 ret = 1
1491 ret = 1
1494 try:
1492 try:
1495 if o:
1493 if o:
1496 ret = 0
1494 ret = 0
1497
1495
1498 if opts.get(b'graph'):
1496 if opts.get(b'graph'):
1499 revdag = logcmdutil.graphrevs(repo, o, opts)
1497 revdag = logcmdutil.graphrevs(repo, o, opts)
1500 ui.pager(b'outgoing')
1498 ui.pager(b'outgoing')
1501 displayer = logcmdutil.changesetdisplayer(
1499 displayer = logcmdutil.changesetdisplayer(
1502 ui, repo, opts, buffered=True
1500 ui, repo, opts, buffered=True
1503 )
1501 )
1504 logcmdutil.displaygraph(
1502 logcmdutil.displaygraph(
1505 ui, repo, revdag, displayer, graphmod.asciiedges
1503 ui, repo, revdag, displayer, graphmod.asciiedges
1506 )
1504 )
1507 else:
1505 else:
1508 ui.pager(b'outgoing')
1506 ui.pager(b'outgoing')
1509 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1507 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1510 for n in _outgoing_filter(repo, o, opts):
1508 for n in _outgoing_filter(repo, o, opts):
1511 displayer.show(repo[n])
1509 displayer.show(repo[n])
1512 displayer.close()
1510 displayer.close()
1513 for oth in others:
1511 for oth in others:
1514 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1512 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1515 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1513 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1516 return ret # exit code is zero since we found outgoing changes
1514 return ret # exit code is zero since we found outgoing changes
1517 finally:
1515 finally:
1518 for oth in others:
1516 for oth in others:
1519 oth.close()
1517 oth.close()
1520
1518
1521
1519
1522 def verify(repo, level=None):
1520 def verify(repo, level=None):
1523 """verify the consistency of a repository"""
1521 """verify the consistency of a repository"""
1524 ret = verifymod.verify(repo, level=level)
1522 ret = verifymod.verify(repo, level=level)
1525
1523
1526 # Broken subrepo references in hidden csets don't seem worth worrying about,
1524 # Broken subrepo references in hidden csets don't seem worth worrying about,
1527 # since they can't be pushed/pulled, and --hidden can be used if they are a
1525 # since they can't be pushed/pulled, and --hidden can be used if they are a
1528 # concern.
1526 # concern.
1529
1527
1530 # pathto() is needed for -R case
1528 # pathto() is needed for -R case
1531 revs = repo.revs(
1529 revs = repo.revs(
1532 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1530 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1533 )
1531 )
1534
1532
1535 if revs:
1533 if revs:
1536 repo.ui.status(_(b'checking subrepo links\n'))
1534 repo.ui.status(_(b'checking subrepo links\n'))
1537 for rev in revs:
1535 for rev in revs:
1538 ctx = repo[rev]
1536 ctx = repo[rev]
1539 try:
1537 try:
1540 for subpath in ctx.substate:
1538 for subpath in ctx.substate:
1541 try:
1539 try:
1542 ret = (
1540 ret = (
1543 ctx.sub(subpath, allowcreate=False).verify() or ret
1541 ctx.sub(subpath, allowcreate=False).verify() or ret
1544 )
1542 )
1545 except error.RepoError as e:
1543 except error.RepoError as e:
1546 repo.ui.warn(b'%d: %s\n' % (rev, e))
1544 repo.ui.warn(b'%d: %s\n' % (rev, e))
1547 except Exception:
1545 except Exception:
1548 repo.ui.warn(
1546 repo.ui.warn(
1549 _(b'.hgsubstate is corrupt in revision %s\n')
1547 _(b'.hgsubstate is corrupt in revision %s\n')
1550 % short(ctx.node())
1548 % short(ctx.node())
1551 )
1549 )
1552
1550
1553 return ret
1551 return ret
1554
1552
1555
1553
1556 def remoteui(src, opts):
1554 def remoteui(src, opts):
1557 """build a remote ui from ui or repo and opts"""
1555 """build a remote ui from ui or repo and opts"""
1558 if util.safehasattr(src, b'baseui'): # looks like a repository
1556 if util.safehasattr(src, b'baseui'): # looks like a repository
1559 dst = src.baseui.copy() # drop repo-specific config
1557 dst = src.baseui.copy() # drop repo-specific config
1560 src = src.ui # copy target options from repo
1558 src = src.ui # copy target options from repo
1561 else: # assume it's a global ui object
1559 else: # assume it's a global ui object
1562 dst = src.copy() # keep all global options
1560 dst = src.copy() # keep all global options
1563
1561
1564 # copy ssh-specific options
1562 # copy ssh-specific options
1565 for o in b'ssh', b'remotecmd':
1563 for o in b'ssh', b'remotecmd':
1566 v = opts.get(o) or src.config(b'ui', o)
1564 v = opts.get(o) or src.config(b'ui', o)
1567 if v:
1565 if v:
1568 dst.setconfig(b"ui", o, v, b'copied')
1566 dst.setconfig(b"ui", o, v, b'copied')
1569
1567
1570 # copy bundle-specific options
1568 # copy bundle-specific options
1571 r = src.config(b'bundle', b'mainreporoot')
1569 r = src.config(b'bundle', b'mainreporoot')
1572 if r:
1570 if r:
1573 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1571 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1574
1572
1575 # copy selected local settings to the remote ui
1573 # copy selected local settings to the remote ui
1576 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1574 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1577 for key, val in src.configitems(sect):
1575 for key, val in src.configitems(sect):
1578 dst.setconfig(sect, key, val, b'copied')
1576 dst.setconfig(sect, key, val, b'copied')
1579 v = src.config(b'web', b'cacerts')
1577 v = src.config(b'web', b'cacerts')
1580 if v:
1578 if v:
1581 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1579 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1582
1580
1583 return dst
1581 return dst
1584
1582
1585
1583
1586 # Files of interest
1584 # Files of interest
1587 # Used to check if the repository has changed looking at mtime and size of
1585 # Used to check if the repository has changed looking at mtime and size of
1588 # these files.
1586 # these files.
1589 foi = [
1587 foi = [
1590 (b'spath', b'00changelog.i'),
1588 (b'spath', b'00changelog.i'),
1591 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1589 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1592 (b'spath', b'obsstore'),
1590 (b'spath', b'obsstore'),
1593 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1591 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1594 ]
1592 ]
1595
1593
1596
1594
1597 class cachedlocalrepo:
1595 class cachedlocalrepo:
1598 """Holds a localrepository that can be cached and reused."""
1596 """Holds a localrepository that can be cached and reused."""
1599
1597
1600 def __init__(self, repo):
1598 def __init__(self, repo):
1601 """Create a new cached repo from an existing repo.
1599 """Create a new cached repo from an existing repo.
1602
1600
1603 We assume the passed in repo was recently created. If the
1601 We assume the passed in repo was recently created. If the
1604 repo has changed between when it was created and when it was
1602 repo has changed between when it was created and when it was
1605 turned into a cache, it may not refresh properly.
1603 turned into a cache, it may not refresh properly.
1606 """
1604 """
1607 assert isinstance(repo, localrepo.localrepository)
1605 assert isinstance(repo, localrepo.localrepository)
1608 self._repo = repo
1606 self._repo = repo
1609 self._state, self.mtime = self._repostate()
1607 self._state, self.mtime = self._repostate()
1610 self._filtername = repo.filtername
1608 self._filtername = repo.filtername
1611
1609
1612 def fetch(self):
1610 def fetch(self):
1613 """Refresh (if necessary) and return a repository.
1611 """Refresh (if necessary) and return a repository.
1614
1612
1615 If the cached instance is out of date, it will be recreated
1613 If the cached instance is out of date, it will be recreated
1616 automatically and returned.
1614 automatically and returned.
1617
1615
1618 Returns a tuple of the repo and a boolean indicating whether a new
1616 Returns a tuple of the repo and a boolean indicating whether a new
1619 repo instance was created.
1617 repo instance was created.
1620 """
1618 """
1621 # We compare the mtimes and sizes of some well-known files to
1619 # We compare the mtimes and sizes of some well-known files to
1622 # determine if the repo changed. This is not precise, as mtimes
1620 # determine if the repo changed. This is not precise, as mtimes
1623 # are susceptible to clock skew and imprecise filesystems and
1621 # are susceptible to clock skew and imprecise filesystems and
1624 # file content can change while maintaining the same size.
1622 # file content can change while maintaining the same size.
1625
1623
1626 state, mtime = self._repostate()
1624 state, mtime = self._repostate()
1627 if state == self._state:
1625 if state == self._state:
1628 return self._repo, False
1626 return self._repo, False
1629
1627
1630 repo = repository(self._repo.baseui, self._repo.url())
1628 repo = repository(self._repo.baseui, self._repo.url())
1631 if self._filtername:
1629 if self._filtername:
1632 self._repo = repo.filtered(self._filtername)
1630 self._repo = repo.filtered(self._filtername)
1633 else:
1631 else:
1634 self._repo = repo.unfiltered()
1632 self._repo = repo.unfiltered()
1635 self._state = state
1633 self._state = state
1636 self.mtime = mtime
1634 self.mtime = mtime
1637
1635
1638 return self._repo, True
1636 return self._repo, True
1639
1637
1640 def _repostate(self):
1638 def _repostate(self):
1641 state = []
1639 state = []
1642 maxmtime = -1
1640 maxmtime = -1
1643 for attr, fname in foi:
1641 for attr, fname in foi:
1644 prefix = getattr(self._repo, attr)
1642 prefix = getattr(self._repo, attr)
1645 p = os.path.join(prefix, fname)
1643 p = os.path.join(prefix, fname)
1646 try:
1644 try:
1647 st = os.stat(p)
1645 st = os.stat(p)
1648 except OSError:
1646 except OSError:
1649 st = os.stat(prefix)
1647 st = os.stat(prefix)
1650 state.append((st[stat.ST_MTIME], st.st_size))
1648 state.append((st[stat.ST_MTIME], st.st_size))
1651 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1649 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1652
1650
1653 return tuple(state), maxmtime
1651 return tuple(state), maxmtime
1654
1652
1655 def copy(self):
1653 def copy(self):
1656 """Obtain a copy of this class instance.
1654 """Obtain a copy of this class instance.
1657
1655
1658 A new localrepository instance is obtained. The new instance should be
1656 A new localrepository instance is obtained. The new instance should be
1659 completely independent of the original.
1657 completely independent of the original.
1660 """
1658 """
1661 repo = repository(self._repo.baseui, self._repo.origroot)
1659 repo = repository(self._repo.baseui, self._repo.origroot)
1662 if self._filtername:
1660 if self._filtername:
1663 repo = repo.filtered(self._filtername)
1661 repo = repo.filtered(self._filtername)
1664 else:
1662 else:
1665 repo = repo.unfiltered()
1663 repo = repo.unfiltered()
1666 c = cachedlocalrepo(repo)
1664 c = cachedlocalrepo(repo)
1667 c._state = self._state
1665 c._state = self._state
1668 c.mtime = self.mtime
1666 c.mtime = self.mtime
1669 return c
1667 return c
@@ -1,645 +1,642 b''
1 # httppeer.py - HTTP repository proxy classes for mercurial
1 # httppeer.py - HTTP repository proxy classes for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9
9
10 import errno
10 import errno
11 import io
11 import io
12 import os
12 import os
13 import socket
13 import socket
14 import struct
14 import struct
15
15
16 from concurrent import futures
16 from concurrent import futures
17 from .i18n import _
17 from .i18n import _
18 from .pycompat import getattr
18 from .pycompat import getattr
19 from . import (
19 from . import (
20 bundle2,
20 bundle2,
21 error,
21 error,
22 httpconnection,
22 httpconnection,
23 pycompat,
23 pycompat,
24 statichttprepo,
24 statichttprepo,
25 url as urlmod,
25 url as urlmod,
26 util,
26 util,
27 wireprotov1peer,
27 wireprotov1peer,
28 )
28 )
29 from .utils import urlutil
29 from .utils import urlutil
30
30
31 httplib = util.httplib
31 httplib = util.httplib
32 urlerr = util.urlerr
32 urlerr = util.urlerr
33 urlreq = util.urlreq
33 urlreq = util.urlreq
34
34
35
35
36 def encodevalueinheaders(value, header, limit):
36 def encodevalueinheaders(value, header, limit):
37 """Encode a string value into multiple HTTP headers.
37 """Encode a string value into multiple HTTP headers.
38
38
39 ``value`` will be encoded into 1 or more HTTP headers with the names
39 ``value`` will be encoded into 1 or more HTTP headers with the names
40 ``header-<N>`` where ``<N>`` is an integer starting at 1. Each header
40 ``header-<N>`` where ``<N>`` is an integer starting at 1. Each header
41 name + value will be at most ``limit`` bytes long.
41 name + value will be at most ``limit`` bytes long.
42
42
43 Returns an iterable of 2-tuples consisting of header names and
43 Returns an iterable of 2-tuples consisting of header names and
44 values as native strings.
44 values as native strings.
45 """
45 """
46 # HTTP Headers are ASCII. Python 3 requires them to be unicodes,
46 # HTTP Headers are ASCII. Python 3 requires them to be unicodes,
47 # not bytes. This function always takes bytes in as arguments.
47 # not bytes. This function always takes bytes in as arguments.
48 fmt = pycompat.strurl(header) + r'-%s'
48 fmt = pycompat.strurl(header) + r'-%s'
49 # Note: it is *NOT* a bug that the last bit here is a bytestring
49 # Note: it is *NOT* a bug that the last bit here is a bytestring
50 # and not a unicode: we're just getting the encoded length anyway,
50 # and not a unicode: we're just getting the encoded length anyway,
51 # and using an r-string to make it portable between Python 2 and 3
51 # and using an r-string to make it portable between Python 2 and 3
52 # doesn't work because then the \r is a literal backslash-r
52 # doesn't work because then the \r is a literal backslash-r
53 # instead of a carriage return.
53 # instead of a carriage return.
54 valuelen = limit - len(fmt % '000') - len(b': \r\n')
54 valuelen = limit - len(fmt % '000') - len(b': \r\n')
55 result = []
55 result = []
56
56
57 n = 0
57 n = 0
58 for i in range(0, len(value), valuelen):
58 for i in range(0, len(value), valuelen):
59 n += 1
59 n += 1
60 result.append((fmt % str(n), pycompat.strurl(value[i : i + valuelen])))
60 result.append((fmt % str(n), pycompat.strurl(value[i : i + valuelen])))
61
61
62 return result
62 return result
63
63
64
64
65 class _multifile:
65 class _multifile:
66 def __init__(self, *fileobjs):
66 def __init__(self, *fileobjs):
67 for f in fileobjs:
67 for f in fileobjs:
68 if not util.safehasattr(f, b'length'):
68 if not util.safehasattr(f, b'length'):
69 raise ValueError(
69 raise ValueError(
70 b'_multifile only supports file objects that '
70 b'_multifile only supports file objects that '
71 b'have a length but this one does not:',
71 b'have a length but this one does not:',
72 type(f),
72 type(f),
73 f,
73 f,
74 )
74 )
75 self._fileobjs = fileobjs
75 self._fileobjs = fileobjs
76 self._index = 0
76 self._index = 0
77
77
78 @property
78 @property
79 def length(self):
79 def length(self):
80 return sum(f.length for f in self._fileobjs)
80 return sum(f.length for f in self._fileobjs)
81
81
82 def read(self, amt=None):
82 def read(self, amt=None):
83 if amt <= 0:
83 if amt <= 0:
84 return b''.join(f.read() for f in self._fileobjs)
84 return b''.join(f.read() for f in self._fileobjs)
85 parts = []
85 parts = []
86 while amt and self._index < len(self._fileobjs):
86 while amt and self._index < len(self._fileobjs):
87 parts.append(self._fileobjs[self._index].read(amt))
87 parts.append(self._fileobjs[self._index].read(amt))
88 got = len(parts[-1])
88 got = len(parts[-1])
89 if got < amt:
89 if got < amt:
90 self._index += 1
90 self._index += 1
91 amt -= got
91 amt -= got
92 return b''.join(parts)
92 return b''.join(parts)
93
93
94 def seek(self, offset, whence=os.SEEK_SET):
94 def seek(self, offset, whence=os.SEEK_SET):
95 if whence != os.SEEK_SET:
95 if whence != os.SEEK_SET:
96 raise NotImplementedError(
96 raise NotImplementedError(
97 b'_multifile does not support anything other'
97 b'_multifile does not support anything other'
98 b' than os.SEEK_SET for whence on seek()'
98 b' than os.SEEK_SET for whence on seek()'
99 )
99 )
100 if offset != 0:
100 if offset != 0:
101 raise NotImplementedError(
101 raise NotImplementedError(
102 b'_multifile only supports seeking to start, but that '
102 b'_multifile only supports seeking to start, but that '
103 b'could be fixed if you need it'
103 b'could be fixed if you need it'
104 )
104 )
105 for f in self._fileobjs:
105 for f in self._fileobjs:
106 f.seek(0)
106 f.seek(0)
107 self._index = 0
107 self._index = 0
108
108
109
109
110 def makev1commandrequest(
110 def makev1commandrequest(
111 ui, requestbuilder, caps, capablefn, repobaseurl, cmd, args
111 ui, requestbuilder, caps, capablefn, repobaseurl, cmd, args
112 ):
112 ):
113 """Make an HTTP request to run a command for a version 1 client.
113 """Make an HTTP request to run a command for a version 1 client.
114
114
115 ``caps`` is a set of known server capabilities. The value may be
115 ``caps`` is a set of known server capabilities. The value may be
116 None if capabilities are not yet known.
116 None if capabilities are not yet known.
117
117
118 ``capablefn`` is a function to evaluate a capability.
118 ``capablefn`` is a function to evaluate a capability.
119
119
120 ``cmd``, ``args``, and ``data`` define the command, its arguments, and
120 ``cmd``, ``args``, and ``data`` define the command, its arguments, and
121 raw data to pass to it.
121 raw data to pass to it.
122 """
122 """
123 if cmd == b'pushkey':
123 if cmd == b'pushkey':
124 args[b'data'] = b''
124 args[b'data'] = b''
125 data = args.pop(b'data', None)
125 data = args.pop(b'data', None)
126 headers = args.pop(b'headers', {})
126 headers = args.pop(b'headers', {})
127
127
128 ui.debug(b"sending %s command\n" % cmd)
128 ui.debug(b"sending %s command\n" % cmd)
129 q = [(b'cmd', cmd)]
129 q = [(b'cmd', cmd)]
130 headersize = 0
130 headersize = 0
131 # Important: don't use self.capable() here or else you end up
131 # Important: don't use self.capable() here or else you end up
132 # with infinite recursion when trying to look up capabilities
132 # with infinite recursion when trying to look up capabilities
133 # for the first time.
133 # for the first time.
134 postargsok = caps is not None and b'httppostargs' in caps
134 postargsok = caps is not None and b'httppostargs' in caps
135
135
136 # Send arguments via POST.
136 # Send arguments via POST.
137 if postargsok and args:
137 if postargsok and args:
138 strargs = urlreq.urlencode(sorted(args.items()))
138 strargs = urlreq.urlencode(sorted(args.items()))
139 if not data:
139 if not data:
140 data = strargs
140 data = strargs
141 else:
141 else:
142 if isinstance(data, bytes):
142 if isinstance(data, bytes):
143 i = io.BytesIO(data)
143 i = io.BytesIO(data)
144 i.length = len(data)
144 i.length = len(data)
145 data = i
145 data = i
146 argsio = io.BytesIO(strargs)
146 argsio = io.BytesIO(strargs)
147 argsio.length = len(strargs)
147 argsio.length = len(strargs)
148 data = _multifile(argsio, data)
148 data = _multifile(argsio, data)
149 headers['X-HgArgs-Post'] = len(strargs)
149 headers['X-HgArgs-Post'] = len(strargs)
150 elif args:
150 elif args:
151 # Calling self.capable() can infinite loop if we are calling
151 # Calling self.capable() can infinite loop if we are calling
152 # "capabilities". But that command should never accept wire
152 # "capabilities". But that command should never accept wire
153 # protocol arguments. So this should never happen.
153 # protocol arguments. So this should never happen.
154 assert cmd != b'capabilities'
154 assert cmd != b'capabilities'
155 httpheader = capablefn(b'httpheader')
155 httpheader = capablefn(b'httpheader')
156 if httpheader:
156 if httpheader:
157 headersize = int(httpheader.split(b',', 1)[0])
157 headersize = int(httpheader.split(b',', 1)[0])
158
158
159 # Send arguments via HTTP headers.
159 # Send arguments via HTTP headers.
160 if headersize > 0:
160 if headersize > 0:
161 # The headers can typically carry more data than the URL.
161 # The headers can typically carry more data than the URL.
162 encoded_args = urlreq.urlencode(sorted(args.items()))
162 encoded_args = urlreq.urlencode(sorted(args.items()))
163 for header, value in encodevalueinheaders(
163 for header, value in encodevalueinheaders(
164 encoded_args, b'X-HgArg', headersize
164 encoded_args, b'X-HgArg', headersize
165 ):
165 ):
166 headers[header] = value
166 headers[header] = value
167 # Send arguments via query string (Mercurial <1.9).
167 # Send arguments via query string (Mercurial <1.9).
168 else:
168 else:
169 q += sorted(args.items())
169 q += sorted(args.items())
170
170
171 qs = b'?%s' % urlreq.urlencode(q)
171 qs = b'?%s' % urlreq.urlencode(q)
172 cu = b"%s%s" % (repobaseurl, qs)
172 cu = b"%s%s" % (repobaseurl, qs)
173 size = 0
173 size = 0
174 if util.safehasattr(data, b'length'):
174 if util.safehasattr(data, b'length'):
175 size = data.length
175 size = data.length
176 elif data is not None:
176 elif data is not None:
177 size = len(data)
177 size = len(data)
178 if data is not None and 'Content-Type' not in headers:
178 if data is not None and 'Content-Type' not in headers:
179 headers['Content-Type'] = 'application/mercurial-0.1'
179 headers['Content-Type'] = 'application/mercurial-0.1'
180
180
181 # Tell the server we accept application/mercurial-0.2 and multiple
181 # Tell the server we accept application/mercurial-0.2 and multiple
182 # compression formats if the server is capable of emitting those
182 # compression formats if the server is capable of emitting those
183 # payloads.
183 # payloads.
184 # Note: Keep this set empty by default, as client advertisement of
184 # Note: Keep this set empty by default, as client advertisement of
185 # protocol parameters should only occur after the handshake.
185 # protocol parameters should only occur after the handshake.
186 protoparams = set()
186 protoparams = set()
187
187
188 mediatypes = set()
188 mediatypes = set()
189 if caps is not None:
189 if caps is not None:
190 mt = capablefn(b'httpmediatype')
190 mt = capablefn(b'httpmediatype')
191 if mt:
191 if mt:
192 protoparams.add(b'0.1')
192 protoparams.add(b'0.1')
193 mediatypes = set(mt.split(b','))
193 mediatypes = set(mt.split(b','))
194
194
195 protoparams.add(b'partial-pull')
195 protoparams.add(b'partial-pull')
196
196
197 if b'0.2tx' in mediatypes:
197 if b'0.2tx' in mediatypes:
198 protoparams.add(b'0.2')
198 protoparams.add(b'0.2')
199
199
200 if b'0.2tx' in mediatypes and capablefn(b'compression'):
200 if b'0.2tx' in mediatypes and capablefn(b'compression'):
201 # We /could/ compare supported compression formats and prune
201 # We /could/ compare supported compression formats and prune
202 # non-mutually supported or error if nothing is mutually supported.
202 # non-mutually supported or error if nothing is mutually supported.
203 # For now, send the full list to the server and have it error.
203 # For now, send the full list to the server and have it error.
204 comps = [
204 comps = [
205 e.wireprotosupport().name
205 e.wireprotosupport().name
206 for e in util.compengines.supportedwireengines(util.CLIENTROLE)
206 for e in util.compengines.supportedwireengines(util.CLIENTROLE)
207 ]
207 ]
208 protoparams.add(b'comp=%s' % b','.join(comps))
208 protoparams.add(b'comp=%s' % b','.join(comps))
209
209
210 if protoparams:
210 if protoparams:
211 protoheaders = encodevalueinheaders(
211 protoheaders = encodevalueinheaders(
212 b' '.join(sorted(protoparams)), b'X-HgProto', headersize or 1024
212 b' '.join(sorted(protoparams)), b'X-HgProto', headersize or 1024
213 )
213 )
214 for header, value in protoheaders:
214 for header, value in protoheaders:
215 headers[header] = value
215 headers[header] = value
216
216
217 varyheaders = []
217 varyheaders = []
218 for header in headers:
218 for header in headers:
219 if header.lower().startswith('x-hg'):
219 if header.lower().startswith('x-hg'):
220 varyheaders.append(header)
220 varyheaders.append(header)
221
221
222 if varyheaders:
222 if varyheaders:
223 headers['Vary'] = ','.join(sorted(varyheaders))
223 headers['Vary'] = ','.join(sorted(varyheaders))
224
224
225 req = requestbuilder(pycompat.strurl(cu), data, headers)
225 req = requestbuilder(pycompat.strurl(cu), data, headers)
226
226
227 if data is not None:
227 if data is not None:
228 ui.debug(b"sending %d bytes\n" % size)
228 ui.debug(b"sending %d bytes\n" % size)
229 req.add_unredirected_header('Content-Length', '%d' % size)
229 req.add_unredirected_header('Content-Length', '%d' % size)
230
230
231 return req, cu, qs
231 return req, cu, qs
232
232
233
233
234 def sendrequest(ui, opener, req):
234 def sendrequest(ui, opener, req):
235 """Send a prepared HTTP request.
235 """Send a prepared HTTP request.
236
236
237 Returns the response object.
237 Returns the response object.
238 """
238 """
239 dbg = ui.debug
239 dbg = ui.debug
240 if ui.debugflag and ui.configbool(b'devel', b'debug.peer-request'):
240 if ui.debugflag and ui.configbool(b'devel', b'debug.peer-request'):
241 line = b'devel-peer-request: %s\n'
241 line = b'devel-peer-request: %s\n'
242 dbg(
242 dbg(
243 line
243 line
244 % b'%s %s'
244 % b'%s %s'
245 % (
245 % (
246 pycompat.bytesurl(req.get_method()),
246 pycompat.bytesurl(req.get_method()),
247 pycompat.bytesurl(req.get_full_url()),
247 pycompat.bytesurl(req.get_full_url()),
248 )
248 )
249 )
249 )
250 hgargssize = None
250 hgargssize = None
251
251
252 for header, value in sorted(req.header_items()):
252 for header, value in sorted(req.header_items()):
253 header = pycompat.bytesurl(header)
253 header = pycompat.bytesurl(header)
254 value = pycompat.bytesurl(value)
254 value = pycompat.bytesurl(value)
255 if header.startswith(b'X-hgarg-'):
255 if header.startswith(b'X-hgarg-'):
256 if hgargssize is None:
256 if hgargssize is None:
257 hgargssize = 0
257 hgargssize = 0
258 hgargssize += len(value)
258 hgargssize += len(value)
259 else:
259 else:
260 dbg(line % b' %s %s' % (header, value))
260 dbg(line % b' %s %s' % (header, value))
261
261
262 if hgargssize is not None:
262 if hgargssize is not None:
263 dbg(
263 dbg(
264 line
264 line
265 % b' %d bytes of commands arguments in headers'
265 % b' %d bytes of commands arguments in headers'
266 % hgargssize
266 % hgargssize
267 )
267 )
268 data = req.data
268 data = req.data
269 if data is not None:
269 if data is not None:
270 length = getattr(data, 'length', None)
270 length = getattr(data, 'length', None)
271 if length is None:
271 if length is None:
272 length = len(data)
272 length = len(data)
273 dbg(line % b' %d bytes of data' % length)
273 dbg(line % b' %d bytes of data' % length)
274
274
275 start = util.timer()
275 start = util.timer()
276
276
277 res = None
277 res = None
278 try:
278 try:
279 res = opener.open(req)
279 res = opener.open(req)
280 except urlerr.httperror as inst:
280 except urlerr.httperror as inst:
281 if inst.code == 401:
281 if inst.code == 401:
282 raise error.Abort(_(b'authorization failed'))
282 raise error.Abort(_(b'authorization failed'))
283 raise
283 raise
284 except httplib.HTTPException as inst:
284 except httplib.HTTPException as inst:
285 ui.debug(
285 ui.debug(
286 b'http error requesting %s\n'
286 b'http error requesting %s\n'
287 % urlutil.hidepassword(req.get_full_url())
287 % urlutil.hidepassword(req.get_full_url())
288 )
288 )
289 ui.traceback()
289 ui.traceback()
290 raise IOError(None, inst)
290 raise IOError(None, inst)
291 finally:
291 finally:
292 if ui.debugflag and ui.configbool(b'devel', b'debug.peer-request'):
292 if ui.debugflag and ui.configbool(b'devel', b'debug.peer-request'):
293 code = res.code if res else -1
293 code = res.code if res else -1
294 dbg(
294 dbg(
295 line
295 line
296 % b' finished in %.4f seconds (%d)'
296 % b' finished in %.4f seconds (%d)'
297 % (util.timer() - start, code)
297 % (util.timer() - start, code)
298 )
298 )
299
299
300 # Insert error handlers for common I/O failures.
300 # Insert error handlers for common I/O failures.
301 urlmod.wrapresponse(res)
301 urlmod.wrapresponse(res)
302
302
303 return res
303 return res
304
304
305
305
306 class RedirectedRepoError(error.RepoError):
306 class RedirectedRepoError(error.RepoError):
307 def __init__(self, msg, respurl):
307 def __init__(self, msg, respurl):
308 super(RedirectedRepoError, self).__init__(msg)
308 super(RedirectedRepoError, self).__init__(msg)
309 self.respurl = respurl
309 self.respurl = respurl
310
310
311
311
312 def parsev1commandresponse(ui, baseurl, requrl, qs, resp, compressible):
312 def parsev1commandresponse(ui, baseurl, requrl, qs, resp, compressible):
313 # record the url we got redirected to
313 # record the url we got redirected to
314 redirected = False
314 redirected = False
315 respurl = pycompat.bytesurl(resp.geturl())
315 respurl = pycompat.bytesurl(resp.geturl())
316 if respurl.endswith(qs):
316 if respurl.endswith(qs):
317 respurl = respurl[: -len(qs)]
317 respurl = respurl[: -len(qs)]
318 qsdropped = False
318 qsdropped = False
319 else:
319 else:
320 qsdropped = True
320 qsdropped = True
321
321
322 if baseurl.rstrip(b'/') != respurl.rstrip(b'/'):
322 if baseurl.rstrip(b'/') != respurl.rstrip(b'/'):
323 redirected = True
323 redirected = True
324 if not ui.quiet:
324 if not ui.quiet:
325 ui.warn(_(b'real URL is %s\n') % respurl)
325 ui.warn(_(b'real URL is %s\n') % respurl)
326
326
327 try:
327 try:
328 proto = pycompat.bytesurl(resp.getheader('content-type', ''))
328 proto = pycompat.bytesurl(resp.getheader('content-type', ''))
329 except AttributeError:
329 except AttributeError:
330 proto = pycompat.bytesurl(resp.headers.get('content-type', ''))
330 proto = pycompat.bytesurl(resp.headers.get('content-type', ''))
331
331
332 safeurl = urlutil.hidepassword(baseurl)
332 safeurl = urlutil.hidepassword(baseurl)
333 if proto.startswith(b'application/hg-error'):
333 if proto.startswith(b'application/hg-error'):
334 raise error.OutOfBandError(resp.read())
334 raise error.OutOfBandError(resp.read())
335
335
336 # Pre 1.0 versions of Mercurial used text/plain and
336 # Pre 1.0 versions of Mercurial used text/plain and
337 # application/hg-changegroup. We don't support such old servers.
337 # application/hg-changegroup. We don't support such old servers.
338 if not proto.startswith(b'application/mercurial-'):
338 if not proto.startswith(b'application/mercurial-'):
339 ui.debug(b"requested URL: '%s'\n" % urlutil.hidepassword(requrl))
339 ui.debug(b"requested URL: '%s'\n" % urlutil.hidepassword(requrl))
340 msg = _(
340 msg = _(
341 b"'%s' does not appear to be an hg repository:\n"
341 b"'%s' does not appear to be an hg repository:\n"
342 b"---%%<--- (%s)\n%s\n---%%<---\n"
342 b"---%%<--- (%s)\n%s\n---%%<---\n"
343 ) % (safeurl, proto or b'no content-type', resp.read(1024))
343 ) % (safeurl, proto or b'no content-type', resp.read(1024))
344
344
345 # Some servers may strip the query string from the redirect. We
345 # Some servers may strip the query string from the redirect. We
346 # raise a special error type so callers can react to this specially.
346 # raise a special error type so callers can react to this specially.
347 if redirected and qsdropped:
347 if redirected and qsdropped:
348 raise RedirectedRepoError(msg, respurl)
348 raise RedirectedRepoError(msg, respurl)
349 else:
349 else:
350 raise error.RepoError(msg)
350 raise error.RepoError(msg)
351
351
352 try:
352 try:
353 subtype = proto.split(b'-', 1)[1]
353 subtype = proto.split(b'-', 1)[1]
354
354
355 version_info = tuple([int(n) for n in subtype.split(b'.')])
355 version_info = tuple([int(n) for n in subtype.split(b'.')])
356 except ValueError:
356 except ValueError:
357 raise error.RepoError(
357 raise error.RepoError(
358 _(b"'%s' sent a broken Content-Type header (%s)") % (safeurl, proto)
358 _(b"'%s' sent a broken Content-Type header (%s)") % (safeurl, proto)
359 )
359 )
360
360
361 # TODO consider switching to a decompression reader that uses
361 # TODO consider switching to a decompression reader that uses
362 # generators.
362 # generators.
363 if version_info == (0, 1):
363 if version_info == (0, 1):
364 if compressible:
364 if compressible:
365 resp = util.compengines[b'zlib'].decompressorreader(resp)
365 resp = util.compengines[b'zlib'].decompressorreader(resp)
366
366
367 elif version_info == (0, 2):
367 elif version_info == (0, 2):
368 # application/mercurial-0.2 always identifies the compression
368 # application/mercurial-0.2 always identifies the compression
369 # engine in the payload header.
369 # engine in the payload header.
370 elen = struct.unpack(b'B', util.readexactly(resp, 1))[0]
370 elen = struct.unpack(b'B', util.readexactly(resp, 1))[0]
371 ename = util.readexactly(resp, elen)
371 ename = util.readexactly(resp, elen)
372 engine = util.compengines.forwiretype(ename)
372 engine = util.compengines.forwiretype(ename)
373
373
374 resp = engine.decompressorreader(resp)
374 resp = engine.decompressorreader(resp)
375 else:
375 else:
376 raise error.RepoError(
376 raise error.RepoError(
377 _(b"'%s' uses newer protocol %s") % (safeurl, subtype)
377 _(b"'%s' uses newer protocol %s") % (safeurl, subtype)
378 )
378 )
379
379
380 return respurl, proto, resp
380 return respurl, proto, resp
381
381
382
382
383 class httppeer(wireprotov1peer.wirepeer):
383 class httppeer(wireprotov1peer.wirepeer):
384 def __init__(self, ui, path, url, opener, requestbuilder, caps):
384 def __init__(self, ui, path, url, opener, requestbuilder, caps):
385 self.ui = ui
385 self.ui = ui
386 self._path = path
386 self._path = path
387 self._url = url
387 self._url = url
388 self._caps = caps
388 self._caps = caps
389 self.limitedarguments = caps is not None and b'httppostargs' not in caps
389 self.limitedarguments = caps is not None and b'httppostargs' not in caps
390 self._urlopener = opener
390 self._urlopener = opener
391 self._requestbuilder = requestbuilder
391 self._requestbuilder = requestbuilder
392
392
393 def __del__(self):
393 def __del__(self):
394 for h in self._urlopener.handlers:
394 for h in self._urlopener.handlers:
395 h.close()
395 h.close()
396 getattr(h, "close_all", lambda: None)()
396 getattr(h, "close_all", lambda: None)()
397
397
398 # Begin of ipeerconnection interface.
398 # Begin of ipeerconnection interface.
399
399
400 def url(self):
400 def url(self):
401 return self._path
401 return self._path
402
402
403 def local(self):
403 def local(self):
404 return None
404 return None
405
405
406 def peer(self):
407 return self
408
409 def canpush(self):
406 def canpush(self):
410 return True
407 return True
411
408
412 def close(self):
409 def close(self):
413 try:
410 try:
414 reqs, sent, recv = (
411 reqs, sent, recv = (
415 self._urlopener.requestscount,
412 self._urlopener.requestscount,
416 self._urlopener.sentbytescount,
413 self._urlopener.sentbytescount,
417 self._urlopener.receivedbytescount,
414 self._urlopener.receivedbytescount,
418 )
415 )
419 except AttributeError:
416 except AttributeError:
420 return
417 return
421 self.ui.note(
418 self.ui.note(
422 _(
419 _(
423 b'(sent %d HTTP requests and %d bytes; '
420 b'(sent %d HTTP requests and %d bytes; '
424 b'received %d bytes in responses)\n'
421 b'received %d bytes in responses)\n'
425 )
422 )
426 % (reqs, sent, recv)
423 % (reqs, sent, recv)
427 )
424 )
428
425
429 # End of ipeerconnection interface.
426 # End of ipeerconnection interface.
430
427
431 # Begin of ipeercommands interface.
428 # Begin of ipeercommands interface.
432
429
433 def capabilities(self):
430 def capabilities(self):
434 return self._caps
431 return self._caps
435
432
436 # End of ipeercommands interface.
433 # End of ipeercommands interface.
437
434
438 def _callstream(self, cmd, _compressible=False, **args):
435 def _callstream(self, cmd, _compressible=False, **args):
439 args = pycompat.byteskwargs(args)
436 args = pycompat.byteskwargs(args)
440
437
441 req, cu, qs = makev1commandrequest(
438 req, cu, qs = makev1commandrequest(
442 self.ui,
439 self.ui,
443 self._requestbuilder,
440 self._requestbuilder,
444 self._caps,
441 self._caps,
445 self.capable,
442 self.capable,
446 self._url,
443 self._url,
447 cmd,
444 cmd,
448 args,
445 args,
449 )
446 )
450
447
451 resp = sendrequest(self.ui, self._urlopener, req)
448 resp = sendrequest(self.ui, self._urlopener, req)
452
449
453 self._url, ct, resp = parsev1commandresponse(
450 self._url, ct, resp = parsev1commandresponse(
454 self.ui, self._url, cu, qs, resp, _compressible
451 self.ui, self._url, cu, qs, resp, _compressible
455 )
452 )
456
453
457 return resp
454 return resp
458
455
459 def _call(self, cmd, **args):
456 def _call(self, cmd, **args):
460 fp = self._callstream(cmd, **args)
457 fp = self._callstream(cmd, **args)
461 try:
458 try:
462 return fp.read()
459 return fp.read()
463 finally:
460 finally:
464 # if using keepalive, allow connection to be reused
461 # if using keepalive, allow connection to be reused
465 fp.close()
462 fp.close()
466
463
467 def _callpush(self, cmd, cg, **args):
464 def _callpush(self, cmd, cg, **args):
468 # have to stream bundle to a temp file because we do not have
465 # have to stream bundle to a temp file because we do not have
469 # http 1.1 chunked transfer.
466 # http 1.1 chunked transfer.
470
467
471 types = self.capable(b'unbundle')
468 types = self.capable(b'unbundle')
472 try:
469 try:
473 types = types.split(b',')
470 types = types.split(b',')
474 except AttributeError:
471 except AttributeError:
475 # servers older than d1b16a746db6 will send 'unbundle' as a
472 # servers older than d1b16a746db6 will send 'unbundle' as a
476 # boolean capability. They only support headerless/uncompressed
473 # boolean capability. They only support headerless/uncompressed
477 # bundles.
474 # bundles.
478 types = [b""]
475 types = [b""]
479 for x in types:
476 for x in types:
480 if x in bundle2.bundletypes:
477 if x in bundle2.bundletypes:
481 type = x
478 type = x
482 break
479 break
483
480
484 tempname = bundle2.writebundle(self.ui, cg, None, type)
481 tempname = bundle2.writebundle(self.ui, cg, None, type)
485 fp = httpconnection.httpsendfile(self.ui, tempname, b"rb")
482 fp = httpconnection.httpsendfile(self.ui, tempname, b"rb")
486 headers = {'Content-Type': 'application/mercurial-0.1'}
483 headers = {'Content-Type': 'application/mercurial-0.1'}
487
484
488 try:
485 try:
489 r = self._call(cmd, data=fp, headers=headers, **args)
486 r = self._call(cmd, data=fp, headers=headers, **args)
490 vals = r.split(b'\n', 1)
487 vals = r.split(b'\n', 1)
491 if len(vals) < 2:
488 if len(vals) < 2:
492 raise error.ResponseError(_(b"unexpected response:"), r)
489 raise error.ResponseError(_(b"unexpected response:"), r)
493 return vals
490 return vals
494 except urlerr.httperror:
491 except urlerr.httperror:
495 # Catch and re-raise these so we don't try and treat them
492 # Catch and re-raise these so we don't try and treat them
496 # like generic socket errors. They lack any values in
493 # like generic socket errors. They lack any values in
497 # .args on Python 3 which breaks our socket.error block.
494 # .args on Python 3 which breaks our socket.error block.
498 raise
495 raise
499 except socket.error as err:
496 except socket.error as err:
500 if err.args[0] in (errno.ECONNRESET, errno.EPIPE):
497 if err.args[0] in (errno.ECONNRESET, errno.EPIPE):
501 raise error.Abort(_(b'push failed: %s') % err.args[1])
498 raise error.Abort(_(b'push failed: %s') % err.args[1])
502 raise error.Abort(err.args[1])
499 raise error.Abort(err.args[1])
503 finally:
500 finally:
504 fp.close()
501 fp.close()
505 os.unlink(tempname)
502 os.unlink(tempname)
506
503
507 def _calltwowaystream(self, cmd, fp, **args):
504 def _calltwowaystream(self, cmd, fp, **args):
508 filename = None
505 filename = None
509 try:
506 try:
510 # dump bundle to disk
507 # dump bundle to disk
511 fd, filename = pycompat.mkstemp(prefix=b"hg-bundle-", suffix=b".hg")
508 fd, filename = pycompat.mkstemp(prefix=b"hg-bundle-", suffix=b".hg")
512 with os.fdopen(fd, "wb") as fh:
509 with os.fdopen(fd, "wb") as fh:
513 d = fp.read(4096)
510 d = fp.read(4096)
514 while d:
511 while d:
515 fh.write(d)
512 fh.write(d)
516 d = fp.read(4096)
513 d = fp.read(4096)
517 # start http push
514 # start http push
518 with httpconnection.httpsendfile(self.ui, filename, b"rb") as fp_:
515 with httpconnection.httpsendfile(self.ui, filename, b"rb") as fp_:
519 headers = {'Content-Type': 'application/mercurial-0.1'}
516 headers = {'Content-Type': 'application/mercurial-0.1'}
520 return self._callstream(cmd, data=fp_, headers=headers, **args)
517 return self._callstream(cmd, data=fp_, headers=headers, **args)
521 finally:
518 finally:
522 if filename is not None:
519 if filename is not None:
523 os.unlink(filename)
520 os.unlink(filename)
524
521
525 def _callcompressable(self, cmd, **args):
522 def _callcompressable(self, cmd, **args):
526 return self._callstream(cmd, _compressible=True, **args)
523 return self._callstream(cmd, _compressible=True, **args)
527
524
528 def _abort(self, exception):
525 def _abort(self, exception):
529 raise exception
526 raise exception
530
527
531
528
532 class queuedcommandfuture(futures.Future):
529 class queuedcommandfuture(futures.Future):
533 """Wraps result() on command futures to trigger submission on call."""
530 """Wraps result() on command futures to trigger submission on call."""
534
531
535 def result(self, timeout=None):
532 def result(self, timeout=None):
536 if self.done():
533 if self.done():
537 return futures.Future.result(self, timeout)
534 return futures.Future.result(self, timeout)
538
535
539 self._peerexecutor.sendcommands()
536 self._peerexecutor.sendcommands()
540
537
541 # sendcommands() will restore the original __class__ and self.result
538 # sendcommands() will restore the original __class__ and self.result
542 # will resolve to Future.result.
539 # will resolve to Future.result.
543 return self.result(timeout)
540 return self.result(timeout)
544
541
545
542
546 def performhandshake(ui, url, opener, requestbuilder):
543 def performhandshake(ui, url, opener, requestbuilder):
547 # The handshake is a request to the capabilities command.
544 # The handshake is a request to the capabilities command.
548
545
549 caps = None
546 caps = None
550
547
551 def capable(x):
548 def capable(x):
552 raise error.ProgrammingError(b'should not be called')
549 raise error.ProgrammingError(b'should not be called')
553
550
554 args = {}
551 args = {}
555
552
556 req, requrl, qs = makev1commandrequest(
553 req, requrl, qs = makev1commandrequest(
557 ui, requestbuilder, caps, capable, url, b'capabilities', args
554 ui, requestbuilder, caps, capable, url, b'capabilities', args
558 )
555 )
559 resp = sendrequest(ui, opener, req)
556 resp = sendrequest(ui, opener, req)
560
557
561 # The server may redirect us to the repo root, stripping the
558 # The server may redirect us to the repo root, stripping the
562 # ?cmd=capabilities query string from the URL. The server would likely
559 # ?cmd=capabilities query string from the URL. The server would likely
563 # return HTML in this case and ``parsev1commandresponse()`` would raise.
560 # return HTML in this case and ``parsev1commandresponse()`` would raise.
564 # We catch this special case and re-issue the capabilities request against
561 # We catch this special case and re-issue the capabilities request against
565 # the new URL.
562 # the new URL.
566 #
563 #
567 # We should ideally not do this, as a redirect that drops the query
564 # We should ideally not do this, as a redirect that drops the query
568 # string from the URL is arguably a server bug. (Garbage in, garbage out).
565 # string from the URL is arguably a server bug. (Garbage in, garbage out).
569 # However, Mercurial clients for several years appeared to handle this
566 # However, Mercurial clients for several years appeared to handle this
570 # issue without behavior degradation. And according to issue 5860, it may
567 # issue without behavior degradation. And according to issue 5860, it may
571 # be a longstanding bug in some server implementations. So we allow a
568 # be a longstanding bug in some server implementations. So we allow a
572 # redirect that drops the query string to "just work."
569 # redirect that drops the query string to "just work."
573 try:
570 try:
574 respurl, ct, resp = parsev1commandresponse(
571 respurl, ct, resp = parsev1commandresponse(
575 ui, url, requrl, qs, resp, compressible=False
572 ui, url, requrl, qs, resp, compressible=False
576 )
573 )
577 except RedirectedRepoError as e:
574 except RedirectedRepoError as e:
578 req, requrl, qs = makev1commandrequest(
575 req, requrl, qs = makev1commandrequest(
579 ui, requestbuilder, caps, capable, e.respurl, b'capabilities', args
576 ui, requestbuilder, caps, capable, e.respurl, b'capabilities', args
580 )
577 )
581 resp = sendrequest(ui, opener, req)
578 resp = sendrequest(ui, opener, req)
582 respurl, ct, resp = parsev1commandresponse(
579 respurl, ct, resp = parsev1commandresponse(
583 ui, url, requrl, qs, resp, compressible=False
580 ui, url, requrl, qs, resp, compressible=False
584 )
581 )
585
582
586 try:
583 try:
587 rawdata = resp.read()
584 rawdata = resp.read()
588 finally:
585 finally:
589 resp.close()
586 resp.close()
590
587
591 if not ct.startswith(b'application/mercurial-'):
588 if not ct.startswith(b'application/mercurial-'):
592 raise error.ProgrammingError(b'unexpected content-type: %s' % ct)
589 raise error.ProgrammingError(b'unexpected content-type: %s' % ct)
593
590
594 info = {b'v1capabilities': set(rawdata.split())}
591 info = {b'v1capabilities': set(rawdata.split())}
595
592
596 return respurl, info
593 return respurl, info
597
594
598
595
599 def makepeer(ui, path, opener=None, requestbuilder=urlreq.request):
596 def makepeer(ui, path, opener=None, requestbuilder=urlreq.request):
600 """Construct an appropriate HTTP peer instance.
597 """Construct an appropriate HTTP peer instance.
601
598
602 ``opener`` is an ``url.opener`` that should be used to establish
599 ``opener`` is an ``url.opener`` that should be used to establish
603 connections, perform HTTP requests.
600 connections, perform HTTP requests.
604
601
605 ``requestbuilder`` is the type used for constructing HTTP requests.
602 ``requestbuilder`` is the type used for constructing HTTP requests.
606 It exists as an argument so extensions can override the default.
603 It exists as an argument so extensions can override the default.
607 """
604 """
608 u = urlutil.url(path)
605 u = urlutil.url(path)
609 if u.query or u.fragment:
606 if u.query or u.fragment:
610 raise error.Abort(
607 raise error.Abort(
611 _(b'unsupported URL component: "%s"') % (u.query or u.fragment)
608 _(b'unsupported URL component: "%s"') % (u.query or u.fragment)
612 )
609 )
613
610
614 # urllib cannot handle URLs with embedded user or passwd.
611 # urllib cannot handle URLs with embedded user or passwd.
615 url, authinfo = u.authinfo()
612 url, authinfo = u.authinfo()
616 ui.debug(b'using %s\n' % url)
613 ui.debug(b'using %s\n' % url)
617
614
618 opener = opener or urlmod.opener(ui, authinfo)
615 opener = opener or urlmod.opener(ui, authinfo)
619
616
620 respurl, info = performhandshake(ui, url, opener, requestbuilder)
617 respurl, info = performhandshake(ui, url, opener, requestbuilder)
621
618
622 return httppeer(
619 return httppeer(
623 ui, path, respurl, opener, requestbuilder, info[b'v1capabilities']
620 ui, path, respurl, opener, requestbuilder, info[b'v1capabilities']
624 )
621 )
625
622
626
623
627 def instance(ui, path, create, intents=None, createopts=None):
624 def instance(ui, path, create, intents=None, createopts=None):
628 if create:
625 if create:
629 raise error.Abort(_(b'cannot create new http repository'))
626 raise error.Abort(_(b'cannot create new http repository'))
630 try:
627 try:
631 if path.startswith(b'https:') and not urlmod.has_https:
628 if path.startswith(b'https:') and not urlmod.has_https:
632 raise error.Abort(
629 raise error.Abort(
633 _(b'Python support for SSL and HTTPS is not installed')
630 _(b'Python support for SSL and HTTPS is not installed')
634 )
631 )
635
632
636 inst = makepeer(ui, path)
633 inst = makepeer(ui, path)
637
634
638 return inst
635 return inst
639 except error.RepoError as httpexception:
636 except error.RepoError as httpexception:
640 try:
637 try:
641 r = statichttprepo.instance(ui, b"static-" + path, create)
638 r = statichttprepo.instance(ui, b"static-" + path, create)
642 ui.note(_(b'(falling back to static-http)\n'))
639 ui.note(_(b'(falling back to static-http)\n'))
643 return r
640 return r
644 except error.RepoError:
641 except error.RepoError:
645 raise httpexception # use the original http RepoError instead
642 raise httpexception # use the original http RepoError instead
@@ -1,2059 +1,2053 b''
1 # repository.py - Interfaces and base classes for repositories and peers.
1 # repository.py - Interfaces and base classes for repositories and peers.
2 # coding: utf-8
2 # coding: utf-8
3 #
3 #
4 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
4 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9
9
10 from ..i18n import _
10 from ..i18n import _
11 from .. import error
11 from .. import error
12 from . import util as interfaceutil
12 from . import util as interfaceutil
13
13
14 # Local repository feature string.
14 # Local repository feature string.
15
15
16 # Revlogs are being used for file storage.
16 # Revlogs are being used for file storage.
17 REPO_FEATURE_REVLOG_FILE_STORAGE = b'revlogfilestorage'
17 REPO_FEATURE_REVLOG_FILE_STORAGE = b'revlogfilestorage'
18 # The storage part of the repository is shared from an external source.
18 # The storage part of the repository is shared from an external source.
19 REPO_FEATURE_SHARED_STORAGE = b'sharedstore'
19 REPO_FEATURE_SHARED_STORAGE = b'sharedstore'
20 # LFS supported for backing file storage.
20 # LFS supported for backing file storage.
21 REPO_FEATURE_LFS = b'lfs'
21 REPO_FEATURE_LFS = b'lfs'
22 # Repository supports being stream cloned.
22 # Repository supports being stream cloned.
23 REPO_FEATURE_STREAM_CLONE = b'streamclone'
23 REPO_FEATURE_STREAM_CLONE = b'streamclone'
24 # Repository supports (at least) some sidedata to be stored
24 # Repository supports (at least) some sidedata to be stored
25 REPO_FEATURE_SIDE_DATA = b'side-data'
25 REPO_FEATURE_SIDE_DATA = b'side-data'
26 # Files storage may lack data for all ancestors.
26 # Files storage may lack data for all ancestors.
27 REPO_FEATURE_SHALLOW_FILE_STORAGE = b'shallowfilestorage'
27 REPO_FEATURE_SHALLOW_FILE_STORAGE = b'shallowfilestorage'
28
28
29 REVISION_FLAG_CENSORED = 1 << 15
29 REVISION_FLAG_CENSORED = 1 << 15
30 REVISION_FLAG_ELLIPSIS = 1 << 14
30 REVISION_FLAG_ELLIPSIS = 1 << 14
31 REVISION_FLAG_EXTSTORED = 1 << 13
31 REVISION_FLAG_EXTSTORED = 1 << 13
32 REVISION_FLAG_HASCOPIESINFO = 1 << 12
32 REVISION_FLAG_HASCOPIESINFO = 1 << 12
33
33
34 REVISION_FLAGS_KNOWN = (
34 REVISION_FLAGS_KNOWN = (
35 REVISION_FLAG_CENSORED
35 REVISION_FLAG_CENSORED
36 | REVISION_FLAG_ELLIPSIS
36 | REVISION_FLAG_ELLIPSIS
37 | REVISION_FLAG_EXTSTORED
37 | REVISION_FLAG_EXTSTORED
38 | REVISION_FLAG_HASCOPIESINFO
38 | REVISION_FLAG_HASCOPIESINFO
39 )
39 )
40
40
41 CG_DELTAMODE_STD = b'default'
41 CG_DELTAMODE_STD = b'default'
42 CG_DELTAMODE_PREV = b'previous'
42 CG_DELTAMODE_PREV = b'previous'
43 CG_DELTAMODE_FULL = b'fulltext'
43 CG_DELTAMODE_FULL = b'fulltext'
44 CG_DELTAMODE_P1 = b'p1'
44 CG_DELTAMODE_P1 = b'p1'
45
45
46
46
47 ## Cache related constants:
47 ## Cache related constants:
48 #
48 #
49 # Used to control which cache should be warmed in a repo.updatecaches(…) call.
49 # Used to control which cache should be warmed in a repo.updatecaches(…) call.
50
50
51 # Warm branchmaps of all known repoview's filter-level
51 # Warm branchmaps of all known repoview's filter-level
52 CACHE_BRANCHMAP_ALL = b"branchmap-all"
52 CACHE_BRANCHMAP_ALL = b"branchmap-all"
53 # Warm branchmaps of repoview's filter-level used by server
53 # Warm branchmaps of repoview's filter-level used by server
54 CACHE_BRANCHMAP_SERVED = b"branchmap-served"
54 CACHE_BRANCHMAP_SERVED = b"branchmap-served"
55 # Warm internal changelog cache (eg: persistent nodemap)
55 # Warm internal changelog cache (eg: persistent nodemap)
56 CACHE_CHANGELOG_CACHE = b"changelog-cache"
56 CACHE_CHANGELOG_CACHE = b"changelog-cache"
57 # Warm full manifest cache
57 # Warm full manifest cache
58 CACHE_FULL_MANIFEST = b"full-manifest"
58 CACHE_FULL_MANIFEST = b"full-manifest"
59 # Warm file-node-tags cache
59 # Warm file-node-tags cache
60 CACHE_FILE_NODE_TAGS = b"file-node-tags"
60 CACHE_FILE_NODE_TAGS = b"file-node-tags"
61 # Warm internal manifestlog cache (eg: persistent nodemap)
61 # Warm internal manifestlog cache (eg: persistent nodemap)
62 CACHE_MANIFESTLOG_CACHE = b"manifestlog-cache"
62 CACHE_MANIFESTLOG_CACHE = b"manifestlog-cache"
63 # Warn rev branch cache
63 # Warn rev branch cache
64 CACHE_REV_BRANCH = b"rev-branch-cache"
64 CACHE_REV_BRANCH = b"rev-branch-cache"
65 # Warm tags' cache for default repoview'
65 # Warm tags' cache for default repoview'
66 CACHE_TAGS_DEFAULT = b"tags-default"
66 CACHE_TAGS_DEFAULT = b"tags-default"
67 # Warm tags' cache for repoview's filter-level used by server
67 # Warm tags' cache for repoview's filter-level used by server
68 CACHE_TAGS_SERVED = b"tags-served"
68 CACHE_TAGS_SERVED = b"tags-served"
69
69
70 # the cache to warm by default after a simple transaction
70 # the cache to warm by default after a simple transaction
71 # (this is a mutable set to let extension update it)
71 # (this is a mutable set to let extension update it)
72 CACHES_DEFAULT = {
72 CACHES_DEFAULT = {
73 CACHE_BRANCHMAP_SERVED,
73 CACHE_BRANCHMAP_SERVED,
74 }
74 }
75
75
76 # the caches to warm when warming all of them
76 # the caches to warm when warming all of them
77 # (this is a mutable set to let extension update it)
77 # (this is a mutable set to let extension update it)
78 CACHES_ALL = {
78 CACHES_ALL = {
79 CACHE_BRANCHMAP_SERVED,
79 CACHE_BRANCHMAP_SERVED,
80 CACHE_BRANCHMAP_ALL,
80 CACHE_BRANCHMAP_ALL,
81 CACHE_CHANGELOG_CACHE,
81 CACHE_CHANGELOG_CACHE,
82 CACHE_FILE_NODE_TAGS,
82 CACHE_FILE_NODE_TAGS,
83 CACHE_FULL_MANIFEST,
83 CACHE_FULL_MANIFEST,
84 CACHE_MANIFESTLOG_CACHE,
84 CACHE_MANIFESTLOG_CACHE,
85 CACHE_TAGS_DEFAULT,
85 CACHE_TAGS_DEFAULT,
86 CACHE_TAGS_SERVED,
86 CACHE_TAGS_SERVED,
87 }
87 }
88
88
89 # the cache to warm by default on simple call
89 # the cache to warm by default on simple call
90 # (this is a mutable set to let extension update it)
90 # (this is a mutable set to let extension update it)
91 CACHES_POST_CLONE = CACHES_ALL.copy()
91 CACHES_POST_CLONE = CACHES_ALL.copy()
92 CACHES_POST_CLONE.discard(CACHE_FILE_NODE_TAGS)
92 CACHES_POST_CLONE.discard(CACHE_FILE_NODE_TAGS)
93
93
94
94
95 class ipeerconnection(interfaceutil.Interface):
95 class ipeerconnection(interfaceutil.Interface):
96 """Represents a "connection" to a repository.
96 """Represents a "connection" to a repository.
97
97
98 This is the base interface for representing a connection to a repository.
98 This is the base interface for representing a connection to a repository.
99 It holds basic properties and methods applicable to all peer types.
99 It holds basic properties and methods applicable to all peer types.
100
100
101 This is not a complete interface definition and should not be used
101 This is not a complete interface definition and should not be used
102 outside of this module.
102 outside of this module.
103 """
103 """
104
104
105 ui = interfaceutil.Attribute("""ui.ui instance""")
105 ui = interfaceutil.Attribute("""ui.ui instance""")
106
106
107 def url():
107 def url():
108 """Returns a URL string representing this peer.
108 """Returns a URL string representing this peer.
109
109
110 Currently, implementations expose the raw URL used to construct the
110 Currently, implementations expose the raw URL used to construct the
111 instance. It may contain credentials as part of the URL. The
111 instance. It may contain credentials as part of the URL. The
112 expectations of the value aren't well-defined and this could lead to
112 expectations of the value aren't well-defined and this could lead to
113 data leakage.
113 data leakage.
114
114
115 TODO audit/clean consumers and more clearly define the contents of this
115 TODO audit/clean consumers and more clearly define the contents of this
116 value.
116 value.
117 """
117 """
118
118
119 def local():
119 def local():
120 """Returns a local repository instance.
120 """Returns a local repository instance.
121
121
122 If the peer represents a local repository, returns an object that
122 If the peer represents a local repository, returns an object that
123 can be used to interface with it. Otherwise returns ``None``.
123 can be used to interface with it. Otherwise returns ``None``.
124 """
124 """
125
125
126 def peer():
127 """Returns an object conforming to this interface.
128
129 Most implementations will ``return self``.
130 """
131
132 def canpush():
126 def canpush():
133 """Returns a boolean indicating if this peer can be pushed to."""
127 """Returns a boolean indicating if this peer can be pushed to."""
134
128
135 def close():
129 def close():
136 """Close the connection to this peer.
130 """Close the connection to this peer.
137
131
138 This is called when the peer will no longer be used. Resources
132 This is called when the peer will no longer be used. Resources
139 associated with the peer should be cleaned up.
133 associated with the peer should be cleaned up.
140 """
134 """
141
135
142
136
143 class ipeercapabilities(interfaceutil.Interface):
137 class ipeercapabilities(interfaceutil.Interface):
144 """Peer sub-interface related to capabilities."""
138 """Peer sub-interface related to capabilities."""
145
139
146 def capable(name):
140 def capable(name):
147 """Determine support for a named capability.
141 """Determine support for a named capability.
148
142
149 Returns ``False`` if capability not supported.
143 Returns ``False`` if capability not supported.
150
144
151 Returns ``True`` if boolean capability is supported. Returns a string
145 Returns ``True`` if boolean capability is supported. Returns a string
152 if capability support is non-boolean.
146 if capability support is non-boolean.
153
147
154 Capability strings may or may not map to wire protocol capabilities.
148 Capability strings may or may not map to wire protocol capabilities.
155 """
149 """
156
150
157 def requirecap(name, purpose):
151 def requirecap(name, purpose):
158 """Require a capability to be present.
152 """Require a capability to be present.
159
153
160 Raises a ``CapabilityError`` if the capability isn't present.
154 Raises a ``CapabilityError`` if the capability isn't present.
161 """
155 """
162
156
163
157
164 class ipeercommands(interfaceutil.Interface):
158 class ipeercommands(interfaceutil.Interface):
165 """Client-side interface for communicating over the wire protocol.
159 """Client-side interface for communicating over the wire protocol.
166
160
167 This interface is used as a gateway to the Mercurial wire protocol.
161 This interface is used as a gateway to the Mercurial wire protocol.
168 methods commonly call wire protocol commands of the same name.
162 methods commonly call wire protocol commands of the same name.
169 """
163 """
170
164
171 def branchmap():
165 def branchmap():
172 """Obtain heads in named branches.
166 """Obtain heads in named branches.
173
167
174 Returns a dict mapping branch name to an iterable of nodes that are
168 Returns a dict mapping branch name to an iterable of nodes that are
175 heads on that branch.
169 heads on that branch.
176 """
170 """
177
171
178 def capabilities():
172 def capabilities():
179 """Obtain capabilities of the peer.
173 """Obtain capabilities of the peer.
180
174
181 Returns a set of string capabilities.
175 Returns a set of string capabilities.
182 """
176 """
183
177
184 def clonebundles():
178 def clonebundles():
185 """Obtains the clone bundles manifest for the repo.
179 """Obtains the clone bundles manifest for the repo.
186
180
187 Returns the manifest as unparsed bytes.
181 Returns the manifest as unparsed bytes.
188 """
182 """
189
183
190 def debugwireargs(one, two, three=None, four=None, five=None):
184 def debugwireargs(one, two, three=None, four=None, five=None):
191 """Used to facilitate debugging of arguments passed over the wire."""
185 """Used to facilitate debugging of arguments passed over the wire."""
192
186
193 def getbundle(source, **kwargs):
187 def getbundle(source, **kwargs):
194 """Obtain remote repository data as a bundle.
188 """Obtain remote repository data as a bundle.
195
189
196 This command is how the bulk of repository data is transferred from
190 This command is how the bulk of repository data is transferred from
197 the peer to the local repository
191 the peer to the local repository
198
192
199 Returns a generator of bundle data.
193 Returns a generator of bundle data.
200 """
194 """
201
195
202 def heads():
196 def heads():
203 """Determine all known head revisions in the peer.
197 """Determine all known head revisions in the peer.
204
198
205 Returns an iterable of binary nodes.
199 Returns an iterable of binary nodes.
206 """
200 """
207
201
208 def known(nodes):
202 def known(nodes):
209 """Determine whether multiple nodes are known.
203 """Determine whether multiple nodes are known.
210
204
211 Accepts an iterable of nodes whose presence to check for.
205 Accepts an iterable of nodes whose presence to check for.
212
206
213 Returns an iterable of booleans indicating of the corresponding node
207 Returns an iterable of booleans indicating of the corresponding node
214 at that index is known to the peer.
208 at that index is known to the peer.
215 """
209 """
216
210
217 def listkeys(namespace):
211 def listkeys(namespace):
218 """Obtain all keys in a pushkey namespace.
212 """Obtain all keys in a pushkey namespace.
219
213
220 Returns an iterable of key names.
214 Returns an iterable of key names.
221 """
215 """
222
216
223 def lookup(key):
217 def lookup(key):
224 """Resolve a value to a known revision.
218 """Resolve a value to a known revision.
225
219
226 Returns a binary node of the resolved revision on success.
220 Returns a binary node of the resolved revision on success.
227 """
221 """
228
222
229 def pushkey(namespace, key, old, new):
223 def pushkey(namespace, key, old, new):
230 """Set a value using the ``pushkey`` protocol.
224 """Set a value using the ``pushkey`` protocol.
231
225
232 Arguments correspond to the pushkey namespace and key to operate on and
226 Arguments correspond to the pushkey namespace and key to operate on and
233 the old and new values for that key.
227 the old and new values for that key.
234
228
235 Returns a string with the peer result. The value inside varies by the
229 Returns a string with the peer result. The value inside varies by the
236 namespace.
230 namespace.
237 """
231 """
238
232
239 def stream_out():
233 def stream_out():
240 """Obtain streaming clone data.
234 """Obtain streaming clone data.
241
235
242 Successful result should be a generator of data chunks.
236 Successful result should be a generator of data chunks.
243 """
237 """
244
238
245 def unbundle(bundle, heads, url):
239 def unbundle(bundle, heads, url):
246 """Transfer repository data to the peer.
240 """Transfer repository data to the peer.
247
241
248 This is how the bulk of data during a push is transferred.
242 This is how the bulk of data during a push is transferred.
249
243
250 Returns the integer number of heads added to the peer.
244 Returns the integer number of heads added to the peer.
251 """
245 """
252
246
253
247
254 class ipeerlegacycommands(interfaceutil.Interface):
248 class ipeerlegacycommands(interfaceutil.Interface):
255 """Interface for implementing support for legacy wire protocol commands.
249 """Interface for implementing support for legacy wire protocol commands.
256
250
257 Wire protocol commands transition to legacy status when they are no longer
251 Wire protocol commands transition to legacy status when they are no longer
258 used by modern clients. To facilitate identifying which commands are
252 used by modern clients. To facilitate identifying which commands are
259 legacy, the interfaces are split.
253 legacy, the interfaces are split.
260 """
254 """
261
255
262 def between(pairs):
256 def between(pairs):
263 """Obtain nodes between pairs of nodes.
257 """Obtain nodes between pairs of nodes.
264
258
265 ``pairs`` is an iterable of node pairs.
259 ``pairs`` is an iterable of node pairs.
266
260
267 Returns an iterable of iterables of nodes corresponding to each
261 Returns an iterable of iterables of nodes corresponding to each
268 requested pair.
262 requested pair.
269 """
263 """
270
264
271 def branches(nodes):
265 def branches(nodes):
272 """Obtain ancestor changesets of specific nodes back to a branch point.
266 """Obtain ancestor changesets of specific nodes back to a branch point.
273
267
274 For each requested node, the peer finds the first ancestor node that is
268 For each requested node, the peer finds the first ancestor node that is
275 a DAG root or is a merge.
269 a DAG root or is a merge.
276
270
277 Returns an iterable of iterables with the resolved values for each node.
271 Returns an iterable of iterables with the resolved values for each node.
278 """
272 """
279
273
280 def changegroup(nodes, source):
274 def changegroup(nodes, source):
281 """Obtain a changegroup with data for descendants of specified nodes."""
275 """Obtain a changegroup with data for descendants of specified nodes."""
282
276
283 def changegroupsubset(bases, heads, source):
277 def changegroupsubset(bases, heads, source):
284 pass
278 pass
285
279
286
280
287 class ipeercommandexecutor(interfaceutil.Interface):
281 class ipeercommandexecutor(interfaceutil.Interface):
288 """Represents a mechanism to execute remote commands.
282 """Represents a mechanism to execute remote commands.
289
283
290 This is the primary interface for requesting that wire protocol commands
284 This is the primary interface for requesting that wire protocol commands
291 be executed. Instances of this interface are active in a context manager
285 be executed. Instances of this interface are active in a context manager
292 and have a well-defined lifetime. When the context manager exits, all
286 and have a well-defined lifetime. When the context manager exits, all
293 outstanding requests are waited on.
287 outstanding requests are waited on.
294 """
288 """
295
289
296 def callcommand(name, args):
290 def callcommand(name, args):
297 """Request that a named command be executed.
291 """Request that a named command be executed.
298
292
299 Receives the command name and a dictionary of command arguments.
293 Receives the command name and a dictionary of command arguments.
300
294
301 Returns a ``concurrent.futures.Future`` that will resolve to the
295 Returns a ``concurrent.futures.Future`` that will resolve to the
302 result of that command request. That exact value is left up to
296 result of that command request. That exact value is left up to
303 the implementation and possibly varies by command.
297 the implementation and possibly varies by command.
304
298
305 Not all commands can coexist with other commands in an executor
299 Not all commands can coexist with other commands in an executor
306 instance: it depends on the underlying wire protocol transport being
300 instance: it depends on the underlying wire protocol transport being
307 used and the command itself.
301 used and the command itself.
308
302
309 Implementations MAY call ``sendcommands()`` automatically if the
303 Implementations MAY call ``sendcommands()`` automatically if the
310 requested command can not coexist with other commands in this executor.
304 requested command can not coexist with other commands in this executor.
311
305
312 Implementations MAY call ``sendcommands()`` automatically when the
306 Implementations MAY call ``sendcommands()`` automatically when the
313 future's ``result()`` is called. So, consumers using multiple
307 future's ``result()`` is called. So, consumers using multiple
314 commands with an executor MUST ensure that ``result()`` is not called
308 commands with an executor MUST ensure that ``result()`` is not called
315 until all command requests have been issued.
309 until all command requests have been issued.
316 """
310 """
317
311
318 def sendcommands():
312 def sendcommands():
319 """Trigger submission of queued command requests.
313 """Trigger submission of queued command requests.
320
314
321 Not all transports submit commands as soon as they are requested to
315 Not all transports submit commands as soon as they are requested to
322 run. When called, this method forces queued command requests to be
316 run. When called, this method forces queued command requests to be
323 issued. It will no-op if all commands have already been sent.
317 issued. It will no-op if all commands have already been sent.
324
318
325 When called, no more new commands may be issued with this executor.
319 When called, no more new commands may be issued with this executor.
326 """
320 """
327
321
328 def close():
322 def close():
329 """Signal that this command request is finished.
323 """Signal that this command request is finished.
330
324
331 When called, no more new commands may be issued. All outstanding
325 When called, no more new commands may be issued. All outstanding
332 commands that have previously been issued are waited on before
326 commands that have previously been issued are waited on before
333 returning. This not only includes waiting for the futures to resolve,
327 returning. This not only includes waiting for the futures to resolve,
334 but also waiting for all response data to arrive. In other words,
328 but also waiting for all response data to arrive. In other words,
335 calling this waits for all on-wire state for issued command requests
329 calling this waits for all on-wire state for issued command requests
336 to finish.
330 to finish.
337
331
338 When used as a context manager, this method is called when exiting the
332 When used as a context manager, this method is called when exiting the
339 context manager.
333 context manager.
340
334
341 This method may call ``sendcommands()`` if there are buffered commands.
335 This method may call ``sendcommands()`` if there are buffered commands.
342 """
336 """
343
337
344
338
345 class ipeerrequests(interfaceutil.Interface):
339 class ipeerrequests(interfaceutil.Interface):
346 """Interface for executing commands on a peer."""
340 """Interface for executing commands on a peer."""
347
341
348 limitedarguments = interfaceutil.Attribute(
342 limitedarguments = interfaceutil.Attribute(
349 """True if the peer cannot receive large argument value for commands."""
343 """True if the peer cannot receive large argument value for commands."""
350 )
344 )
351
345
352 def commandexecutor():
346 def commandexecutor():
353 """A context manager that resolves to an ipeercommandexecutor.
347 """A context manager that resolves to an ipeercommandexecutor.
354
348
355 The object this resolves to can be used to issue command requests
349 The object this resolves to can be used to issue command requests
356 to the peer.
350 to the peer.
357
351
358 Callers should call its ``callcommand`` method to issue command
352 Callers should call its ``callcommand`` method to issue command
359 requests.
353 requests.
360
354
361 A new executor should be obtained for each distinct set of commands
355 A new executor should be obtained for each distinct set of commands
362 (possibly just a single command) that the consumer wants to execute
356 (possibly just a single command) that the consumer wants to execute
363 as part of a single operation or round trip. This is because some
357 as part of a single operation or round trip. This is because some
364 peers are half-duplex and/or don't support persistent connections.
358 peers are half-duplex and/or don't support persistent connections.
365 e.g. in the case of HTTP peers, commands sent to an executor represent
359 e.g. in the case of HTTP peers, commands sent to an executor represent
366 a single HTTP request. While some peers may support multiple command
360 a single HTTP request. While some peers may support multiple command
367 sends over the wire per executor, consumers need to code to the least
361 sends over the wire per executor, consumers need to code to the least
368 capable peer. So it should be assumed that command executors buffer
362 capable peer. So it should be assumed that command executors buffer
369 called commands until they are told to send them and that each
363 called commands until they are told to send them and that each
370 command executor could result in a new connection or wire-level request
364 command executor could result in a new connection or wire-level request
371 being issued.
365 being issued.
372 """
366 """
373
367
374
368
375 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
369 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
376 """Unified interface for peer repositories.
370 """Unified interface for peer repositories.
377
371
378 All peer instances must conform to this interface.
372 All peer instances must conform to this interface.
379 """
373 """
380
374
381
375
382 class ipeerv2(ipeerconnection, ipeercapabilities, ipeerrequests):
376 class ipeerv2(ipeerconnection, ipeercapabilities, ipeerrequests):
383 """Unified peer interface for wire protocol version 2 peers."""
377 """Unified peer interface for wire protocol version 2 peers."""
384
378
385 apidescriptor = interfaceutil.Attribute(
379 apidescriptor = interfaceutil.Attribute(
386 """Data structure holding description of server API."""
380 """Data structure holding description of server API."""
387 )
381 )
388
382
389
383
390 @interfaceutil.implementer(ipeerbase)
384 @interfaceutil.implementer(ipeerbase)
391 class peer:
385 class peer:
392 """Base class for peer repositories."""
386 """Base class for peer repositories."""
393
387
394 limitedarguments = False
388 limitedarguments = False
395
389
396 def capable(self, name):
390 def capable(self, name):
397 caps = self.capabilities()
391 caps = self.capabilities()
398 if name in caps:
392 if name in caps:
399 return True
393 return True
400
394
401 name = b'%s=' % name
395 name = b'%s=' % name
402 for cap in caps:
396 for cap in caps:
403 if cap.startswith(name):
397 if cap.startswith(name):
404 return cap[len(name) :]
398 return cap[len(name) :]
405
399
406 return False
400 return False
407
401
408 def requirecap(self, name, purpose):
402 def requirecap(self, name, purpose):
409 if self.capable(name):
403 if self.capable(name):
410 return
404 return
411
405
412 raise error.CapabilityError(
406 raise error.CapabilityError(
413 _(
407 _(
414 b'cannot %s; remote repository does not support the '
408 b'cannot %s; remote repository does not support the '
415 b'\'%s\' capability'
409 b'\'%s\' capability'
416 )
410 )
417 % (purpose, name)
411 % (purpose, name)
418 )
412 )
419
413
420
414
421 class iverifyproblem(interfaceutil.Interface):
415 class iverifyproblem(interfaceutil.Interface):
422 """Represents a problem with the integrity of the repository.
416 """Represents a problem with the integrity of the repository.
423
417
424 Instances of this interface are emitted to describe an integrity issue
418 Instances of this interface are emitted to describe an integrity issue
425 with a repository (e.g. corrupt storage, missing data, etc).
419 with a repository (e.g. corrupt storage, missing data, etc).
426
420
427 Instances are essentially messages associated with severity.
421 Instances are essentially messages associated with severity.
428 """
422 """
429
423
430 warning = interfaceutil.Attribute(
424 warning = interfaceutil.Attribute(
431 """Message indicating a non-fatal problem."""
425 """Message indicating a non-fatal problem."""
432 )
426 )
433
427
434 error = interfaceutil.Attribute("""Message indicating a fatal problem.""")
428 error = interfaceutil.Attribute("""Message indicating a fatal problem.""")
435
429
436 node = interfaceutil.Attribute(
430 node = interfaceutil.Attribute(
437 """Revision encountering the problem.
431 """Revision encountering the problem.
438
432
439 ``None`` means the problem doesn't apply to a single revision.
433 ``None`` means the problem doesn't apply to a single revision.
440 """
434 """
441 )
435 )
442
436
443
437
444 class irevisiondelta(interfaceutil.Interface):
438 class irevisiondelta(interfaceutil.Interface):
445 """Represents a delta between one revision and another.
439 """Represents a delta between one revision and another.
446
440
447 Instances convey enough information to allow a revision to be exchanged
441 Instances convey enough information to allow a revision to be exchanged
448 with another repository.
442 with another repository.
449
443
450 Instances represent the fulltext revision data or a delta against
444 Instances represent the fulltext revision data or a delta against
451 another revision. Therefore the ``revision`` and ``delta`` attributes
445 another revision. Therefore the ``revision`` and ``delta`` attributes
452 are mutually exclusive.
446 are mutually exclusive.
453
447
454 Typically used for changegroup generation.
448 Typically used for changegroup generation.
455 """
449 """
456
450
457 node = interfaceutil.Attribute("""20 byte node of this revision.""")
451 node = interfaceutil.Attribute("""20 byte node of this revision.""")
458
452
459 p1node = interfaceutil.Attribute(
453 p1node = interfaceutil.Attribute(
460 """20 byte node of 1st parent of this revision."""
454 """20 byte node of 1st parent of this revision."""
461 )
455 )
462
456
463 p2node = interfaceutil.Attribute(
457 p2node = interfaceutil.Attribute(
464 """20 byte node of 2nd parent of this revision."""
458 """20 byte node of 2nd parent of this revision."""
465 )
459 )
466
460
467 linknode = interfaceutil.Attribute(
461 linknode = interfaceutil.Attribute(
468 """20 byte node of the changelog revision this node is linked to."""
462 """20 byte node of the changelog revision this node is linked to."""
469 )
463 )
470
464
471 flags = interfaceutil.Attribute(
465 flags = interfaceutil.Attribute(
472 """2 bytes of integer flags that apply to this revision.
466 """2 bytes of integer flags that apply to this revision.
473
467
474 This is a bitwise composition of the ``REVISION_FLAG_*`` constants.
468 This is a bitwise composition of the ``REVISION_FLAG_*`` constants.
475 """
469 """
476 )
470 )
477
471
478 basenode = interfaceutil.Attribute(
472 basenode = interfaceutil.Attribute(
479 """20 byte node of the revision this data is a delta against.
473 """20 byte node of the revision this data is a delta against.
480
474
481 ``nullid`` indicates that the revision is a full revision and not
475 ``nullid`` indicates that the revision is a full revision and not
482 a delta.
476 a delta.
483 """
477 """
484 )
478 )
485
479
486 baserevisionsize = interfaceutil.Attribute(
480 baserevisionsize = interfaceutil.Attribute(
487 """Size of base revision this delta is against.
481 """Size of base revision this delta is against.
488
482
489 May be ``None`` if ``basenode`` is ``nullid``.
483 May be ``None`` if ``basenode`` is ``nullid``.
490 """
484 """
491 )
485 )
492
486
493 revision = interfaceutil.Attribute(
487 revision = interfaceutil.Attribute(
494 """Raw fulltext of revision data for this node."""
488 """Raw fulltext of revision data for this node."""
495 )
489 )
496
490
497 delta = interfaceutil.Attribute(
491 delta = interfaceutil.Attribute(
498 """Delta between ``basenode`` and ``node``.
492 """Delta between ``basenode`` and ``node``.
499
493
500 Stored in the bdiff delta format.
494 Stored in the bdiff delta format.
501 """
495 """
502 )
496 )
503
497
504 sidedata = interfaceutil.Attribute(
498 sidedata = interfaceutil.Attribute(
505 """Raw sidedata bytes for the given revision."""
499 """Raw sidedata bytes for the given revision."""
506 )
500 )
507
501
508 protocol_flags = interfaceutil.Attribute(
502 protocol_flags = interfaceutil.Attribute(
509 """Single byte of integer flags that can influence the protocol.
503 """Single byte of integer flags that can influence the protocol.
510
504
511 This is a bitwise composition of the ``storageutil.CG_FLAG*`` constants.
505 This is a bitwise composition of the ``storageutil.CG_FLAG*`` constants.
512 """
506 """
513 )
507 )
514
508
515
509
516 class ifilerevisionssequence(interfaceutil.Interface):
510 class ifilerevisionssequence(interfaceutil.Interface):
517 """Contains index data for all revisions of a file.
511 """Contains index data for all revisions of a file.
518
512
519 Types implementing this behave like lists of tuples. The index
513 Types implementing this behave like lists of tuples. The index
520 in the list corresponds to the revision number. The values contain
514 in the list corresponds to the revision number. The values contain
521 index metadata.
515 index metadata.
522
516
523 The *null* revision (revision number -1) is always the last item
517 The *null* revision (revision number -1) is always the last item
524 in the index.
518 in the index.
525 """
519 """
526
520
527 def __len__():
521 def __len__():
528 """The total number of revisions."""
522 """The total number of revisions."""
529
523
530 def __getitem__(rev):
524 def __getitem__(rev):
531 """Returns the object having a specific revision number.
525 """Returns the object having a specific revision number.
532
526
533 Returns an 8-tuple with the following fields:
527 Returns an 8-tuple with the following fields:
534
528
535 offset+flags
529 offset+flags
536 Contains the offset and flags for the revision. 64-bit unsigned
530 Contains the offset and flags for the revision. 64-bit unsigned
537 integer where first 6 bytes are the offset and the next 2 bytes
531 integer where first 6 bytes are the offset and the next 2 bytes
538 are flags. The offset can be 0 if it is not used by the store.
532 are flags. The offset can be 0 if it is not used by the store.
539 compressed size
533 compressed size
540 Size of the revision data in the store. It can be 0 if it isn't
534 Size of the revision data in the store. It can be 0 if it isn't
541 needed by the store.
535 needed by the store.
542 uncompressed size
536 uncompressed size
543 Fulltext size. It can be 0 if it isn't needed by the store.
537 Fulltext size. It can be 0 if it isn't needed by the store.
544 base revision
538 base revision
545 Revision number of revision the delta for storage is encoded
539 Revision number of revision the delta for storage is encoded
546 against. -1 indicates not encoded against a base revision.
540 against. -1 indicates not encoded against a base revision.
547 link revision
541 link revision
548 Revision number of changelog revision this entry is related to.
542 Revision number of changelog revision this entry is related to.
549 p1 revision
543 p1 revision
550 Revision number of 1st parent. -1 if no 1st parent.
544 Revision number of 1st parent. -1 if no 1st parent.
551 p2 revision
545 p2 revision
552 Revision number of 2nd parent. -1 if no 1st parent.
546 Revision number of 2nd parent. -1 if no 1st parent.
553 node
547 node
554 Binary node value for this revision number.
548 Binary node value for this revision number.
555
549
556 Negative values should index off the end of the sequence. ``-1``
550 Negative values should index off the end of the sequence. ``-1``
557 should return the null revision. ``-2`` should return the most
551 should return the null revision. ``-2`` should return the most
558 recent revision.
552 recent revision.
559 """
553 """
560
554
561 def __contains__(rev):
555 def __contains__(rev):
562 """Whether a revision number exists."""
556 """Whether a revision number exists."""
563
557
564 def insert(self, i, entry):
558 def insert(self, i, entry):
565 """Add an item to the index at specific revision."""
559 """Add an item to the index at specific revision."""
566
560
567
561
568 class ifileindex(interfaceutil.Interface):
562 class ifileindex(interfaceutil.Interface):
569 """Storage interface for index data of a single file.
563 """Storage interface for index data of a single file.
570
564
571 File storage data is divided into index metadata and data storage.
565 File storage data is divided into index metadata and data storage.
572 This interface defines the index portion of the interface.
566 This interface defines the index portion of the interface.
573
567
574 The index logically consists of:
568 The index logically consists of:
575
569
576 * A mapping between revision numbers and nodes.
570 * A mapping between revision numbers and nodes.
577 * DAG data (storing and querying the relationship between nodes).
571 * DAG data (storing and querying the relationship between nodes).
578 * Metadata to facilitate storage.
572 * Metadata to facilitate storage.
579 """
573 """
580
574
581 nullid = interfaceutil.Attribute(
575 nullid = interfaceutil.Attribute(
582 """node for the null revision for use as delta base."""
576 """node for the null revision for use as delta base."""
583 )
577 )
584
578
585 def __len__():
579 def __len__():
586 """Obtain the number of revisions stored for this file."""
580 """Obtain the number of revisions stored for this file."""
587
581
588 def __iter__():
582 def __iter__():
589 """Iterate over revision numbers for this file."""
583 """Iterate over revision numbers for this file."""
590
584
591 def hasnode(node):
585 def hasnode(node):
592 """Returns a bool indicating if a node is known to this store.
586 """Returns a bool indicating if a node is known to this store.
593
587
594 Implementations must only return True for full, binary node values:
588 Implementations must only return True for full, binary node values:
595 hex nodes, revision numbers, and partial node matches must be
589 hex nodes, revision numbers, and partial node matches must be
596 rejected.
590 rejected.
597
591
598 The null node is never present.
592 The null node is never present.
599 """
593 """
600
594
601 def revs(start=0, stop=None):
595 def revs(start=0, stop=None):
602 """Iterate over revision numbers for this file, with control."""
596 """Iterate over revision numbers for this file, with control."""
603
597
604 def parents(node):
598 def parents(node):
605 """Returns a 2-tuple of parent nodes for a revision.
599 """Returns a 2-tuple of parent nodes for a revision.
606
600
607 Values will be ``nullid`` if the parent is empty.
601 Values will be ``nullid`` if the parent is empty.
608 """
602 """
609
603
610 def parentrevs(rev):
604 def parentrevs(rev):
611 """Like parents() but operates on revision numbers."""
605 """Like parents() but operates on revision numbers."""
612
606
613 def rev(node):
607 def rev(node):
614 """Obtain the revision number given a node.
608 """Obtain the revision number given a node.
615
609
616 Raises ``error.LookupError`` if the node is not known.
610 Raises ``error.LookupError`` if the node is not known.
617 """
611 """
618
612
619 def node(rev):
613 def node(rev):
620 """Obtain the node value given a revision number.
614 """Obtain the node value given a revision number.
621
615
622 Raises ``IndexError`` if the node is not known.
616 Raises ``IndexError`` if the node is not known.
623 """
617 """
624
618
625 def lookup(node):
619 def lookup(node):
626 """Attempt to resolve a value to a node.
620 """Attempt to resolve a value to a node.
627
621
628 Value can be a binary node, hex node, revision number, or a string
622 Value can be a binary node, hex node, revision number, or a string
629 that can be converted to an integer.
623 that can be converted to an integer.
630
624
631 Raises ``error.LookupError`` if a node could not be resolved.
625 Raises ``error.LookupError`` if a node could not be resolved.
632 """
626 """
633
627
634 def linkrev(rev):
628 def linkrev(rev):
635 """Obtain the changeset revision number a revision is linked to."""
629 """Obtain the changeset revision number a revision is linked to."""
636
630
637 def iscensored(rev):
631 def iscensored(rev):
638 """Return whether a revision's content has been censored."""
632 """Return whether a revision's content has been censored."""
639
633
640 def commonancestorsheads(node1, node2):
634 def commonancestorsheads(node1, node2):
641 """Obtain an iterable of nodes containing heads of common ancestors.
635 """Obtain an iterable of nodes containing heads of common ancestors.
642
636
643 See ``ancestor.commonancestorsheads()``.
637 See ``ancestor.commonancestorsheads()``.
644 """
638 """
645
639
646 def descendants(revs):
640 def descendants(revs):
647 """Obtain descendant revision numbers for a set of revision numbers.
641 """Obtain descendant revision numbers for a set of revision numbers.
648
642
649 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
643 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
650 """
644 """
651
645
652 def heads(start=None, stop=None):
646 def heads(start=None, stop=None):
653 """Obtain a list of nodes that are DAG heads, with control.
647 """Obtain a list of nodes that are DAG heads, with control.
654
648
655 The set of revisions examined can be limited by specifying
649 The set of revisions examined can be limited by specifying
656 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
650 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
657 iterable of nodes. DAG traversal starts at earlier revision
651 iterable of nodes. DAG traversal starts at earlier revision
658 ``start`` and iterates forward until any node in ``stop`` is
652 ``start`` and iterates forward until any node in ``stop`` is
659 encountered.
653 encountered.
660 """
654 """
661
655
662 def children(node):
656 def children(node):
663 """Obtain nodes that are children of a node.
657 """Obtain nodes that are children of a node.
664
658
665 Returns a list of nodes.
659 Returns a list of nodes.
666 """
660 """
667
661
668
662
669 class ifiledata(interfaceutil.Interface):
663 class ifiledata(interfaceutil.Interface):
670 """Storage interface for data storage of a specific file.
664 """Storage interface for data storage of a specific file.
671
665
672 This complements ``ifileindex`` and provides an interface for accessing
666 This complements ``ifileindex`` and provides an interface for accessing
673 data for a tracked file.
667 data for a tracked file.
674 """
668 """
675
669
676 def size(rev):
670 def size(rev):
677 """Obtain the fulltext size of file data.
671 """Obtain the fulltext size of file data.
678
672
679 Any metadata is excluded from size measurements.
673 Any metadata is excluded from size measurements.
680 """
674 """
681
675
682 def revision(node, raw=False):
676 def revision(node, raw=False):
683 """Obtain fulltext data for a node.
677 """Obtain fulltext data for a node.
684
678
685 By default, any storage transformations are applied before the data
679 By default, any storage transformations are applied before the data
686 is returned. If ``raw`` is True, non-raw storage transformations
680 is returned. If ``raw`` is True, non-raw storage transformations
687 are not applied.
681 are not applied.
688
682
689 The fulltext data may contain a header containing metadata. Most
683 The fulltext data may contain a header containing metadata. Most
690 consumers should use ``read()`` to obtain the actual file data.
684 consumers should use ``read()`` to obtain the actual file data.
691 """
685 """
692
686
693 def rawdata(node):
687 def rawdata(node):
694 """Obtain raw data for a node."""
688 """Obtain raw data for a node."""
695
689
696 def read(node):
690 def read(node):
697 """Resolve file fulltext data.
691 """Resolve file fulltext data.
698
692
699 This is similar to ``revision()`` except any metadata in the data
693 This is similar to ``revision()`` except any metadata in the data
700 headers is stripped.
694 headers is stripped.
701 """
695 """
702
696
703 def renamed(node):
697 def renamed(node):
704 """Obtain copy metadata for a node.
698 """Obtain copy metadata for a node.
705
699
706 Returns ``False`` if no copy metadata is stored or a 2-tuple of
700 Returns ``False`` if no copy metadata is stored or a 2-tuple of
707 (path, node) from which this revision was copied.
701 (path, node) from which this revision was copied.
708 """
702 """
709
703
710 def cmp(node, fulltext):
704 def cmp(node, fulltext):
711 """Compare fulltext to another revision.
705 """Compare fulltext to another revision.
712
706
713 Returns True if the fulltext is different from what is stored.
707 Returns True if the fulltext is different from what is stored.
714
708
715 This takes copy metadata into account.
709 This takes copy metadata into account.
716
710
717 TODO better document the copy metadata and censoring logic.
711 TODO better document the copy metadata and censoring logic.
718 """
712 """
719
713
720 def emitrevisions(
714 def emitrevisions(
721 nodes,
715 nodes,
722 nodesorder=None,
716 nodesorder=None,
723 revisiondata=False,
717 revisiondata=False,
724 assumehaveparentrevisions=False,
718 assumehaveparentrevisions=False,
725 deltamode=CG_DELTAMODE_STD,
719 deltamode=CG_DELTAMODE_STD,
726 ):
720 ):
727 """Produce ``irevisiondelta`` for revisions.
721 """Produce ``irevisiondelta`` for revisions.
728
722
729 Given an iterable of nodes, emits objects conforming to the
723 Given an iterable of nodes, emits objects conforming to the
730 ``irevisiondelta`` interface that describe revisions in storage.
724 ``irevisiondelta`` interface that describe revisions in storage.
731
725
732 This method is a generator.
726 This method is a generator.
733
727
734 The input nodes may be unordered. Implementations must ensure that a
728 The input nodes may be unordered. Implementations must ensure that a
735 node's parents are emitted before the node itself. Transitively, this
729 node's parents are emitted before the node itself. Transitively, this
736 means that a node may only be emitted once all its ancestors in
730 means that a node may only be emitted once all its ancestors in
737 ``nodes`` have also been emitted.
731 ``nodes`` have also been emitted.
738
732
739 By default, emits "index" data (the ``node``, ``p1node``, and
733 By default, emits "index" data (the ``node``, ``p1node``, and
740 ``p2node`` attributes). If ``revisiondata`` is set, revision data
734 ``p2node`` attributes). If ``revisiondata`` is set, revision data
741 will also be present on the emitted objects.
735 will also be present on the emitted objects.
742
736
743 With default argument values, implementations can choose to emit
737 With default argument values, implementations can choose to emit
744 either fulltext revision data or a delta. When emitting deltas,
738 either fulltext revision data or a delta. When emitting deltas,
745 implementations must consider whether the delta's base revision
739 implementations must consider whether the delta's base revision
746 fulltext is available to the receiver.
740 fulltext is available to the receiver.
747
741
748 The base revision fulltext is guaranteed to be available if any of
742 The base revision fulltext is guaranteed to be available if any of
749 the following are met:
743 the following are met:
750
744
751 * Its fulltext revision was emitted by this method call.
745 * Its fulltext revision was emitted by this method call.
752 * A delta for that revision was emitted by this method call.
746 * A delta for that revision was emitted by this method call.
753 * ``assumehaveparentrevisions`` is True and the base revision is a
747 * ``assumehaveparentrevisions`` is True and the base revision is a
754 parent of the node.
748 parent of the node.
755
749
756 ``nodesorder`` can be used to control the order that revisions are
750 ``nodesorder`` can be used to control the order that revisions are
757 emitted. By default, revisions can be reordered as long as they are
751 emitted. By default, revisions can be reordered as long as they are
758 in DAG topological order (see above). If the value is ``nodes``,
752 in DAG topological order (see above). If the value is ``nodes``,
759 the iteration order from ``nodes`` should be used. If the value is
753 the iteration order from ``nodes`` should be used. If the value is
760 ``storage``, then the native order from the backing storage layer
754 ``storage``, then the native order from the backing storage layer
761 is used. (Not all storage layers will have strong ordering and behavior
755 is used. (Not all storage layers will have strong ordering and behavior
762 of this mode is storage-dependent.) ``nodes`` ordering can force
756 of this mode is storage-dependent.) ``nodes`` ordering can force
763 revisions to be emitted before their ancestors, so consumers should
757 revisions to be emitted before their ancestors, so consumers should
764 use it with care.
758 use it with care.
765
759
766 The ``linknode`` attribute on the returned ``irevisiondelta`` may not
760 The ``linknode`` attribute on the returned ``irevisiondelta`` may not
767 be set and it is the caller's responsibility to resolve it, if needed.
761 be set and it is the caller's responsibility to resolve it, if needed.
768
762
769 If ``deltamode`` is CG_DELTAMODE_PREV and revision data is requested,
763 If ``deltamode`` is CG_DELTAMODE_PREV and revision data is requested,
770 all revision data should be emitted as deltas against the revision
764 all revision data should be emitted as deltas against the revision
771 emitted just prior. The initial revision should be a delta against its
765 emitted just prior. The initial revision should be a delta against its
772 1st parent.
766 1st parent.
773 """
767 """
774
768
775
769
776 class ifilemutation(interfaceutil.Interface):
770 class ifilemutation(interfaceutil.Interface):
777 """Storage interface for mutation events of a tracked file."""
771 """Storage interface for mutation events of a tracked file."""
778
772
779 def add(filedata, meta, transaction, linkrev, p1, p2):
773 def add(filedata, meta, transaction, linkrev, p1, p2):
780 """Add a new revision to the store.
774 """Add a new revision to the store.
781
775
782 Takes file data, dictionary of metadata, a transaction, linkrev,
776 Takes file data, dictionary of metadata, a transaction, linkrev,
783 and parent nodes.
777 and parent nodes.
784
778
785 Returns the node that was added.
779 Returns the node that was added.
786
780
787 May no-op if a revision matching the supplied data is already stored.
781 May no-op if a revision matching the supplied data is already stored.
788 """
782 """
789
783
790 def addrevision(
784 def addrevision(
791 revisiondata,
785 revisiondata,
792 transaction,
786 transaction,
793 linkrev,
787 linkrev,
794 p1,
788 p1,
795 p2,
789 p2,
796 node=None,
790 node=None,
797 flags=0,
791 flags=0,
798 cachedelta=None,
792 cachedelta=None,
799 ):
793 ):
800 """Add a new revision to the store and return its number.
794 """Add a new revision to the store and return its number.
801
795
802 This is similar to ``add()`` except it operates at a lower level.
796 This is similar to ``add()`` except it operates at a lower level.
803
797
804 The data passed in already contains a metadata header, if any.
798 The data passed in already contains a metadata header, if any.
805
799
806 ``node`` and ``flags`` can be used to define the expected node and
800 ``node`` and ``flags`` can be used to define the expected node and
807 the flags to use with storage. ``flags`` is a bitwise value composed
801 the flags to use with storage. ``flags`` is a bitwise value composed
808 of the various ``REVISION_FLAG_*`` constants.
802 of the various ``REVISION_FLAG_*`` constants.
809
803
810 ``add()`` is usually called when adding files from e.g. the working
804 ``add()`` is usually called when adding files from e.g. the working
811 directory. ``addrevision()`` is often called by ``add()`` and for
805 directory. ``addrevision()`` is often called by ``add()`` and for
812 scenarios where revision data has already been computed, such as when
806 scenarios where revision data has already been computed, such as when
813 applying raw data from a peer repo.
807 applying raw data from a peer repo.
814 """
808 """
815
809
816 def addgroup(
810 def addgroup(
817 deltas,
811 deltas,
818 linkmapper,
812 linkmapper,
819 transaction,
813 transaction,
820 addrevisioncb=None,
814 addrevisioncb=None,
821 duplicaterevisioncb=None,
815 duplicaterevisioncb=None,
822 maybemissingparents=False,
816 maybemissingparents=False,
823 ):
817 ):
824 """Process a series of deltas for storage.
818 """Process a series of deltas for storage.
825
819
826 ``deltas`` is an iterable of 7-tuples of
820 ``deltas`` is an iterable of 7-tuples of
827 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
821 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
828 to add.
822 to add.
829
823
830 The ``delta`` field contains ``mpatch`` data to apply to a base
824 The ``delta`` field contains ``mpatch`` data to apply to a base
831 revision, identified by ``deltabase``. The base node can be
825 revision, identified by ``deltabase``. The base node can be
832 ``nullid``, in which case the header from the delta can be ignored
826 ``nullid``, in which case the header from the delta can be ignored
833 and the delta used as the fulltext.
827 and the delta used as the fulltext.
834
828
835 ``alwayscache`` instructs the lower layers to cache the content of the
829 ``alwayscache`` instructs the lower layers to cache the content of the
836 newly added revision, even if it needs to be explicitly computed.
830 newly added revision, even if it needs to be explicitly computed.
837 This used to be the default when ``addrevisioncb`` was provided up to
831 This used to be the default when ``addrevisioncb`` was provided up to
838 Mercurial 5.8.
832 Mercurial 5.8.
839
833
840 ``addrevisioncb`` should be called for each new rev as it is committed.
834 ``addrevisioncb`` should be called for each new rev as it is committed.
841 ``duplicaterevisioncb`` should be called for all revs with a
835 ``duplicaterevisioncb`` should be called for all revs with a
842 pre-existing node.
836 pre-existing node.
843
837
844 ``maybemissingparents`` is a bool indicating whether the incoming
838 ``maybemissingparents`` is a bool indicating whether the incoming
845 data may reference parents/ancestor revisions that aren't present.
839 data may reference parents/ancestor revisions that aren't present.
846 This flag is set when receiving data into a "shallow" store that
840 This flag is set when receiving data into a "shallow" store that
847 doesn't hold all history.
841 doesn't hold all history.
848
842
849 Returns a list of nodes that were processed. A node will be in the list
843 Returns a list of nodes that were processed. A node will be in the list
850 even if it existed in the store previously.
844 even if it existed in the store previously.
851 """
845 """
852
846
853 def censorrevision(tr, node, tombstone=b''):
847 def censorrevision(tr, node, tombstone=b''):
854 """Remove the content of a single revision.
848 """Remove the content of a single revision.
855
849
856 The specified ``node`` will have its content purged from storage.
850 The specified ``node`` will have its content purged from storage.
857 Future attempts to access the revision data for this node will
851 Future attempts to access the revision data for this node will
858 result in failure.
852 result in failure.
859
853
860 A ``tombstone`` message can optionally be stored. This message may be
854 A ``tombstone`` message can optionally be stored. This message may be
861 displayed to users when they attempt to access the missing revision
855 displayed to users when they attempt to access the missing revision
862 data.
856 data.
863
857
864 Storage backends may have stored deltas against the previous content
858 Storage backends may have stored deltas against the previous content
865 in this revision. As part of censoring a revision, these storage
859 in this revision. As part of censoring a revision, these storage
866 backends are expected to rewrite any internally stored deltas such
860 backends are expected to rewrite any internally stored deltas such
867 that they no longer reference the deleted content.
861 that they no longer reference the deleted content.
868 """
862 """
869
863
870 def getstrippoint(minlink):
864 def getstrippoint(minlink):
871 """Find the minimum revision that must be stripped to strip a linkrev.
865 """Find the minimum revision that must be stripped to strip a linkrev.
872
866
873 Returns a 2-tuple containing the minimum revision number and a set
867 Returns a 2-tuple containing the minimum revision number and a set
874 of all revisions numbers that would be broken by this strip.
868 of all revisions numbers that would be broken by this strip.
875
869
876 TODO this is highly revlog centric and should be abstracted into
870 TODO this is highly revlog centric and should be abstracted into
877 a higher-level deletion API. ``repair.strip()`` relies on this.
871 a higher-level deletion API. ``repair.strip()`` relies on this.
878 """
872 """
879
873
880 def strip(minlink, transaction):
874 def strip(minlink, transaction):
881 """Remove storage of items starting at a linkrev.
875 """Remove storage of items starting at a linkrev.
882
876
883 This uses ``getstrippoint()`` to determine the first node to remove.
877 This uses ``getstrippoint()`` to determine the first node to remove.
884 Then it effectively truncates storage for all revisions after that.
878 Then it effectively truncates storage for all revisions after that.
885
879
886 TODO this is highly revlog centric and should be abstracted into a
880 TODO this is highly revlog centric and should be abstracted into a
887 higher-level deletion API.
881 higher-level deletion API.
888 """
882 """
889
883
890
884
891 class ifilestorage(ifileindex, ifiledata, ifilemutation):
885 class ifilestorage(ifileindex, ifiledata, ifilemutation):
892 """Complete storage interface for a single tracked file."""
886 """Complete storage interface for a single tracked file."""
893
887
894 def files():
888 def files():
895 """Obtain paths that are backing storage for this file.
889 """Obtain paths that are backing storage for this file.
896
890
897 TODO this is used heavily by verify code and there should probably
891 TODO this is used heavily by verify code and there should probably
898 be a better API for that.
892 be a better API for that.
899 """
893 """
900
894
901 def storageinfo(
895 def storageinfo(
902 exclusivefiles=False,
896 exclusivefiles=False,
903 sharedfiles=False,
897 sharedfiles=False,
904 revisionscount=False,
898 revisionscount=False,
905 trackedsize=False,
899 trackedsize=False,
906 storedsize=False,
900 storedsize=False,
907 ):
901 ):
908 """Obtain information about storage for this file's data.
902 """Obtain information about storage for this file's data.
909
903
910 Returns a dict describing storage for this tracked path. The keys
904 Returns a dict describing storage for this tracked path. The keys
911 in the dict map to arguments of the same. The arguments are bools
905 in the dict map to arguments of the same. The arguments are bools
912 indicating whether to calculate and obtain that data.
906 indicating whether to calculate and obtain that data.
913
907
914 exclusivefiles
908 exclusivefiles
915 Iterable of (vfs, path) describing files that are exclusively
909 Iterable of (vfs, path) describing files that are exclusively
916 used to back storage for this tracked path.
910 used to back storage for this tracked path.
917
911
918 sharedfiles
912 sharedfiles
919 Iterable of (vfs, path) describing files that are used to back
913 Iterable of (vfs, path) describing files that are used to back
920 storage for this tracked path. Those files may also provide storage
914 storage for this tracked path. Those files may also provide storage
921 for other stored entities.
915 for other stored entities.
922
916
923 revisionscount
917 revisionscount
924 Number of revisions available for retrieval.
918 Number of revisions available for retrieval.
925
919
926 trackedsize
920 trackedsize
927 Total size in bytes of all tracked revisions. This is a sum of the
921 Total size in bytes of all tracked revisions. This is a sum of the
928 length of the fulltext of all revisions.
922 length of the fulltext of all revisions.
929
923
930 storedsize
924 storedsize
931 Total size in bytes used to store data for all tracked revisions.
925 Total size in bytes used to store data for all tracked revisions.
932 This is commonly less than ``trackedsize`` due to internal usage
926 This is commonly less than ``trackedsize`` due to internal usage
933 of deltas rather than fulltext revisions.
927 of deltas rather than fulltext revisions.
934
928
935 Not all storage backends may support all queries are have a reasonable
929 Not all storage backends may support all queries are have a reasonable
936 value to use. In that case, the value should be set to ``None`` and
930 value to use. In that case, the value should be set to ``None`` and
937 callers are expected to handle this special value.
931 callers are expected to handle this special value.
938 """
932 """
939
933
940 def verifyintegrity(state):
934 def verifyintegrity(state):
941 """Verifies the integrity of file storage.
935 """Verifies the integrity of file storage.
942
936
943 ``state`` is a dict holding state of the verifier process. It can be
937 ``state`` is a dict holding state of the verifier process. It can be
944 used to communicate data between invocations of multiple storage
938 used to communicate data between invocations of multiple storage
945 primitives.
939 primitives.
946
940
947 If individual revisions cannot have their revision content resolved,
941 If individual revisions cannot have their revision content resolved,
948 the method is expected to set the ``skipread`` key to a set of nodes
942 the method is expected to set the ``skipread`` key to a set of nodes
949 that encountered problems. If set, the method can also add the node(s)
943 that encountered problems. If set, the method can also add the node(s)
950 to ``safe_renamed`` in order to indicate nodes that may perform the
944 to ``safe_renamed`` in order to indicate nodes that may perform the
951 rename checks with currently accessible data.
945 rename checks with currently accessible data.
952
946
953 The method yields objects conforming to the ``iverifyproblem``
947 The method yields objects conforming to the ``iverifyproblem``
954 interface.
948 interface.
955 """
949 """
956
950
957
951
958 class idirs(interfaceutil.Interface):
952 class idirs(interfaceutil.Interface):
959 """Interface representing a collection of directories from paths.
953 """Interface representing a collection of directories from paths.
960
954
961 This interface is essentially a derived data structure representing
955 This interface is essentially a derived data structure representing
962 directories from a collection of paths.
956 directories from a collection of paths.
963 """
957 """
964
958
965 def addpath(path):
959 def addpath(path):
966 """Add a path to the collection.
960 """Add a path to the collection.
967
961
968 All directories in the path will be added to the collection.
962 All directories in the path will be added to the collection.
969 """
963 """
970
964
971 def delpath(path):
965 def delpath(path):
972 """Remove a path from the collection.
966 """Remove a path from the collection.
973
967
974 If the removal was the last path in a particular directory, the
968 If the removal was the last path in a particular directory, the
975 directory is removed from the collection.
969 directory is removed from the collection.
976 """
970 """
977
971
978 def __iter__():
972 def __iter__():
979 """Iterate over the directories in this collection of paths."""
973 """Iterate over the directories in this collection of paths."""
980
974
981 def __contains__(path):
975 def __contains__(path):
982 """Whether a specific directory is in this collection."""
976 """Whether a specific directory is in this collection."""
983
977
984
978
985 class imanifestdict(interfaceutil.Interface):
979 class imanifestdict(interfaceutil.Interface):
986 """Interface representing a manifest data structure.
980 """Interface representing a manifest data structure.
987
981
988 A manifest is effectively a dict mapping paths to entries. Each entry
982 A manifest is effectively a dict mapping paths to entries. Each entry
989 consists of a binary node and extra flags affecting that entry.
983 consists of a binary node and extra flags affecting that entry.
990 """
984 """
991
985
992 def __getitem__(path):
986 def __getitem__(path):
993 """Returns the binary node value for a path in the manifest.
987 """Returns the binary node value for a path in the manifest.
994
988
995 Raises ``KeyError`` if the path does not exist in the manifest.
989 Raises ``KeyError`` if the path does not exist in the manifest.
996
990
997 Equivalent to ``self.find(path)[0]``.
991 Equivalent to ``self.find(path)[0]``.
998 """
992 """
999
993
1000 def find(path):
994 def find(path):
1001 """Returns the entry for a path in the manifest.
995 """Returns the entry for a path in the manifest.
1002
996
1003 Returns a 2-tuple of (node, flags).
997 Returns a 2-tuple of (node, flags).
1004
998
1005 Raises ``KeyError`` if the path does not exist in the manifest.
999 Raises ``KeyError`` if the path does not exist in the manifest.
1006 """
1000 """
1007
1001
1008 def __len__():
1002 def __len__():
1009 """Return the number of entries in the manifest."""
1003 """Return the number of entries in the manifest."""
1010
1004
1011 def __nonzero__():
1005 def __nonzero__():
1012 """Returns True if the manifest has entries, False otherwise."""
1006 """Returns True if the manifest has entries, False otherwise."""
1013
1007
1014 __bool__ = __nonzero__
1008 __bool__ = __nonzero__
1015
1009
1016 def __setitem__(path, node):
1010 def __setitem__(path, node):
1017 """Define the node value for a path in the manifest.
1011 """Define the node value for a path in the manifest.
1018
1012
1019 If the path is already in the manifest, its flags will be copied to
1013 If the path is already in the manifest, its flags will be copied to
1020 the new entry.
1014 the new entry.
1021 """
1015 """
1022
1016
1023 def __contains__(path):
1017 def __contains__(path):
1024 """Whether a path exists in the manifest."""
1018 """Whether a path exists in the manifest."""
1025
1019
1026 def __delitem__(path):
1020 def __delitem__(path):
1027 """Remove a path from the manifest.
1021 """Remove a path from the manifest.
1028
1022
1029 Raises ``KeyError`` if the path is not in the manifest.
1023 Raises ``KeyError`` if the path is not in the manifest.
1030 """
1024 """
1031
1025
1032 def __iter__():
1026 def __iter__():
1033 """Iterate over paths in the manifest."""
1027 """Iterate over paths in the manifest."""
1034
1028
1035 def iterkeys():
1029 def iterkeys():
1036 """Iterate over paths in the manifest."""
1030 """Iterate over paths in the manifest."""
1037
1031
1038 def keys():
1032 def keys():
1039 """Obtain a list of paths in the manifest."""
1033 """Obtain a list of paths in the manifest."""
1040
1034
1041 def filesnotin(other, match=None):
1035 def filesnotin(other, match=None):
1042 """Obtain the set of paths in this manifest but not in another.
1036 """Obtain the set of paths in this manifest but not in another.
1043
1037
1044 ``match`` is an optional matcher function to be applied to both
1038 ``match`` is an optional matcher function to be applied to both
1045 manifests.
1039 manifests.
1046
1040
1047 Returns a set of paths.
1041 Returns a set of paths.
1048 """
1042 """
1049
1043
1050 def dirs():
1044 def dirs():
1051 """Returns an object implementing the ``idirs`` interface."""
1045 """Returns an object implementing the ``idirs`` interface."""
1052
1046
1053 def hasdir(dir):
1047 def hasdir(dir):
1054 """Returns a bool indicating if a directory is in this manifest."""
1048 """Returns a bool indicating if a directory is in this manifest."""
1055
1049
1056 def walk(match):
1050 def walk(match):
1057 """Generator of paths in manifest satisfying a matcher.
1051 """Generator of paths in manifest satisfying a matcher.
1058
1052
1059 If the matcher has explicit files listed and they don't exist in
1053 If the matcher has explicit files listed and they don't exist in
1060 the manifest, ``match.bad()`` is called for each missing file.
1054 the manifest, ``match.bad()`` is called for each missing file.
1061 """
1055 """
1062
1056
1063 def diff(other, match=None, clean=False):
1057 def diff(other, match=None, clean=False):
1064 """Find differences between this manifest and another.
1058 """Find differences between this manifest and another.
1065
1059
1066 This manifest is compared to ``other``.
1060 This manifest is compared to ``other``.
1067
1061
1068 If ``match`` is provided, the two manifests are filtered against this
1062 If ``match`` is provided, the two manifests are filtered against this
1069 matcher and only entries satisfying the matcher are compared.
1063 matcher and only entries satisfying the matcher are compared.
1070
1064
1071 If ``clean`` is True, unchanged files are included in the returned
1065 If ``clean`` is True, unchanged files are included in the returned
1072 object.
1066 object.
1073
1067
1074 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
1068 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
1075 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
1069 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
1076 represents the node and flags for this manifest and ``(node2, flag2)``
1070 represents the node and flags for this manifest and ``(node2, flag2)``
1077 are the same for the other manifest.
1071 are the same for the other manifest.
1078 """
1072 """
1079
1073
1080 def setflag(path, flag):
1074 def setflag(path, flag):
1081 """Set the flag value for a given path.
1075 """Set the flag value for a given path.
1082
1076
1083 Raises ``KeyError`` if the path is not already in the manifest.
1077 Raises ``KeyError`` if the path is not already in the manifest.
1084 """
1078 """
1085
1079
1086 def get(path, default=None):
1080 def get(path, default=None):
1087 """Obtain the node value for a path or a default value if missing."""
1081 """Obtain the node value for a path or a default value if missing."""
1088
1082
1089 def flags(path):
1083 def flags(path):
1090 """Return the flags value for a path (default: empty bytestring)."""
1084 """Return the flags value for a path (default: empty bytestring)."""
1091
1085
1092 def copy():
1086 def copy():
1093 """Return a copy of this manifest."""
1087 """Return a copy of this manifest."""
1094
1088
1095 def items():
1089 def items():
1096 """Returns an iterable of (path, node) for items in this manifest."""
1090 """Returns an iterable of (path, node) for items in this manifest."""
1097
1091
1098 def iteritems():
1092 def iteritems():
1099 """Identical to items()."""
1093 """Identical to items()."""
1100
1094
1101 def iterentries():
1095 def iterentries():
1102 """Returns an iterable of (path, node, flags) for this manifest.
1096 """Returns an iterable of (path, node, flags) for this manifest.
1103
1097
1104 Similar to ``iteritems()`` except items are a 3-tuple and include
1098 Similar to ``iteritems()`` except items are a 3-tuple and include
1105 flags.
1099 flags.
1106 """
1100 """
1107
1101
1108 def text():
1102 def text():
1109 """Obtain the raw data representation for this manifest.
1103 """Obtain the raw data representation for this manifest.
1110
1104
1111 Result is used to create a manifest revision.
1105 Result is used to create a manifest revision.
1112 """
1106 """
1113
1107
1114 def fastdelta(base, changes):
1108 def fastdelta(base, changes):
1115 """Obtain a delta between this manifest and another given changes.
1109 """Obtain a delta between this manifest and another given changes.
1116
1110
1117 ``base`` in the raw data representation for another manifest.
1111 ``base`` in the raw data representation for another manifest.
1118
1112
1119 ``changes`` is an iterable of ``(path, to_delete)``.
1113 ``changes`` is an iterable of ``(path, to_delete)``.
1120
1114
1121 Returns a 2-tuple containing ``bytearray(self.text())`` and the
1115 Returns a 2-tuple containing ``bytearray(self.text())`` and the
1122 delta between ``base`` and this manifest.
1116 delta between ``base`` and this manifest.
1123
1117
1124 If this manifest implementation can't support ``fastdelta()``,
1118 If this manifest implementation can't support ``fastdelta()``,
1125 raise ``mercurial.manifest.FastdeltaUnavailable``.
1119 raise ``mercurial.manifest.FastdeltaUnavailable``.
1126 """
1120 """
1127
1121
1128
1122
1129 class imanifestrevisionbase(interfaceutil.Interface):
1123 class imanifestrevisionbase(interfaceutil.Interface):
1130 """Base interface representing a single revision of a manifest.
1124 """Base interface representing a single revision of a manifest.
1131
1125
1132 Should not be used as a primary interface: should always be inherited
1126 Should not be used as a primary interface: should always be inherited
1133 as part of a larger interface.
1127 as part of a larger interface.
1134 """
1128 """
1135
1129
1136 def copy():
1130 def copy():
1137 """Obtain a copy of this manifest instance.
1131 """Obtain a copy of this manifest instance.
1138
1132
1139 Returns an object conforming to the ``imanifestrevisionwritable``
1133 Returns an object conforming to the ``imanifestrevisionwritable``
1140 interface. The instance will be associated with the same
1134 interface. The instance will be associated with the same
1141 ``imanifestlog`` collection as this instance.
1135 ``imanifestlog`` collection as this instance.
1142 """
1136 """
1143
1137
1144 def read():
1138 def read():
1145 """Obtain the parsed manifest data structure.
1139 """Obtain the parsed manifest data structure.
1146
1140
1147 The returned object conforms to the ``imanifestdict`` interface.
1141 The returned object conforms to the ``imanifestdict`` interface.
1148 """
1142 """
1149
1143
1150
1144
1151 class imanifestrevisionstored(imanifestrevisionbase):
1145 class imanifestrevisionstored(imanifestrevisionbase):
1152 """Interface representing a manifest revision committed to storage."""
1146 """Interface representing a manifest revision committed to storage."""
1153
1147
1154 def node():
1148 def node():
1155 """The binary node for this manifest."""
1149 """The binary node for this manifest."""
1156
1150
1157 parents = interfaceutil.Attribute(
1151 parents = interfaceutil.Attribute(
1158 """List of binary nodes that are parents for this manifest revision."""
1152 """List of binary nodes that are parents for this manifest revision."""
1159 )
1153 )
1160
1154
1161 def readdelta(shallow=False):
1155 def readdelta(shallow=False):
1162 """Obtain the manifest data structure representing changes from parent.
1156 """Obtain the manifest data structure representing changes from parent.
1163
1157
1164 This manifest is compared to its 1st parent. A new manifest representing
1158 This manifest is compared to its 1st parent. A new manifest representing
1165 those differences is constructed.
1159 those differences is constructed.
1166
1160
1167 The returned object conforms to the ``imanifestdict`` interface.
1161 The returned object conforms to the ``imanifestdict`` interface.
1168 """
1162 """
1169
1163
1170 def readfast(shallow=False):
1164 def readfast(shallow=False):
1171 """Calls either ``read()`` or ``readdelta()``.
1165 """Calls either ``read()`` or ``readdelta()``.
1172
1166
1173 The faster of the two options is called.
1167 The faster of the two options is called.
1174 """
1168 """
1175
1169
1176 def find(key):
1170 def find(key):
1177 """Calls self.read().find(key)``.
1171 """Calls self.read().find(key)``.
1178
1172
1179 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
1173 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
1180 """
1174 """
1181
1175
1182
1176
1183 class imanifestrevisionwritable(imanifestrevisionbase):
1177 class imanifestrevisionwritable(imanifestrevisionbase):
1184 """Interface representing a manifest revision that can be committed."""
1178 """Interface representing a manifest revision that can be committed."""
1185
1179
1186 def write(transaction, linkrev, p1node, p2node, added, removed, match=None):
1180 def write(transaction, linkrev, p1node, p2node, added, removed, match=None):
1187 """Add this revision to storage.
1181 """Add this revision to storage.
1188
1182
1189 Takes a transaction object, the changeset revision number it will
1183 Takes a transaction object, the changeset revision number it will
1190 be associated with, its parent nodes, and lists of added and
1184 be associated with, its parent nodes, and lists of added and
1191 removed paths.
1185 removed paths.
1192
1186
1193 If match is provided, storage can choose not to inspect or write out
1187 If match is provided, storage can choose not to inspect or write out
1194 items that do not match. Storage is still required to be able to provide
1188 items that do not match. Storage is still required to be able to provide
1195 the full manifest in the future for any directories written (these
1189 the full manifest in the future for any directories written (these
1196 manifests should not be "narrowed on disk").
1190 manifests should not be "narrowed on disk").
1197
1191
1198 Returns the binary node of the created revision.
1192 Returns the binary node of the created revision.
1199 """
1193 """
1200
1194
1201
1195
1202 class imanifeststorage(interfaceutil.Interface):
1196 class imanifeststorage(interfaceutil.Interface):
1203 """Storage interface for manifest data."""
1197 """Storage interface for manifest data."""
1204
1198
1205 nodeconstants = interfaceutil.Attribute(
1199 nodeconstants = interfaceutil.Attribute(
1206 """nodeconstants used by the current repository."""
1200 """nodeconstants used by the current repository."""
1207 )
1201 )
1208
1202
1209 tree = interfaceutil.Attribute(
1203 tree = interfaceutil.Attribute(
1210 """The path to the directory this manifest tracks.
1204 """The path to the directory this manifest tracks.
1211
1205
1212 The empty bytestring represents the root manifest.
1206 The empty bytestring represents the root manifest.
1213 """
1207 """
1214 )
1208 )
1215
1209
1216 index = interfaceutil.Attribute(
1210 index = interfaceutil.Attribute(
1217 """An ``ifilerevisionssequence`` instance."""
1211 """An ``ifilerevisionssequence`` instance."""
1218 )
1212 )
1219
1213
1220 opener = interfaceutil.Attribute(
1214 opener = interfaceutil.Attribute(
1221 """VFS opener to use to access underlying files used for storage.
1215 """VFS opener to use to access underlying files used for storage.
1222
1216
1223 TODO this is revlog specific and should not be exposed.
1217 TODO this is revlog specific and should not be exposed.
1224 """
1218 """
1225 )
1219 )
1226
1220
1227 _generaldelta = interfaceutil.Attribute(
1221 _generaldelta = interfaceutil.Attribute(
1228 """Whether generaldelta storage is being used.
1222 """Whether generaldelta storage is being used.
1229
1223
1230 TODO this is revlog specific and should not be exposed.
1224 TODO this is revlog specific and should not be exposed.
1231 """
1225 """
1232 )
1226 )
1233
1227
1234 fulltextcache = interfaceutil.Attribute(
1228 fulltextcache = interfaceutil.Attribute(
1235 """Dict with cache of fulltexts.
1229 """Dict with cache of fulltexts.
1236
1230
1237 TODO this doesn't feel appropriate for the storage interface.
1231 TODO this doesn't feel appropriate for the storage interface.
1238 """
1232 """
1239 )
1233 )
1240
1234
1241 def __len__():
1235 def __len__():
1242 """Obtain the number of revisions stored for this manifest."""
1236 """Obtain the number of revisions stored for this manifest."""
1243
1237
1244 def __iter__():
1238 def __iter__():
1245 """Iterate over revision numbers for this manifest."""
1239 """Iterate over revision numbers for this manifest."""
1246
1240
1247 def rev(node):
1241 def rev(node):
1248 """Obtain the revision number given a binary node.
1242 """Obtain the revision number given a binary node.
1249
1243
1250 Raises ``error.LookupError`` if the node is not known.
1244 Raises ``error.LookupError`` if the node is not known.
1251 """
1245 """
1252
1246
1253 def node(rev):
1247 def node(rev):
1254 """Obtain the node value given a revision number.
1248 """Obtain the node value given a revision number.
1255
1249
1256 Raises ``error.LookupError`` if the revision is not known.
1250 Raises ``error.LookupError`` if the revision is not known.
1257 """
1251 """
1258
1252
1259 def lookup(value):
1253 def lookup(value):
1260 """Attempt to resolve a value to a node.
1254 """Attempt to resolve a value to a node.
1261
1255
1262 Value can be a binary node, hex node, revision number, or a bytes
1256 Value can be a binary node, hex node, revision number, or a bytes
1263 that can be converted to an integer.
1257 that can be converted to an integer.
1264
1258
1265 Raises ``error.LookupError`` if a ndoe could not be resolved.
1259 Raises ``error.LookupError`` if a ndoe could not be resolved.
1266 """
1260 """
1267
1261
1268 def parents(node):
1262 def parents(node):
1269 """Returns a 2-tuple of parent nodes for a node.
1263 """Returns a 2-tuple of parent nodes for a node.
1270
1264
1271 Values will be ``nullid`` if the parent is empty.
1265 Values will be ``nullid`` if the parent is empty.
1272 """
1266 """
1273
1267
1274 def parentrevs(rev):
1268 def parentrevs(rev):
1275 """Like parents() but operates on revision numbers."""
1269 """Like parents() but operates on revision numbers."""
1276
1270
1277 def linkrev(rev):
1271 def linkrev(rev):
1278 """Obtain the changeset revision number a revision is linked to."""
1272 """Obtain the changeset revision number a revision is linked to."""
1279
1273
1280 def revision(node, _df=None):
1274 def revision(node, _df=None):
1281 """Obtain fulltext data for a node."""
1275 """Obtain fulltext data for a node."""
1282
1276
1283 def rawdata(node, _df=None):
1277 def rawdata(node, _df=None):
1284 """Obtain raw data for a node."""
1278 """Obtain raw data for a node."""
1285
1279
1286 def revdiff(rev1, rev2):
1280 def revdiff(rev1, rev2):
1287 """Obtain a delta between two revision numbers.
1281 """Obtain a delta between two revision numbers.
1288
1282
1289 The returned data is the result of ``bdiff.bdiff()`` on the raw
1283 The returned data is the result of ``bdiff.bdiff()`` on the raw
1290 revision data.
1284 revision data.
1291 """
1285 """
1292
1286
1293 def cmp(node, fulltext):
1287 def cmp(node, fulltext):
1294 """Compare fulltext to another revision.
1288 """Compare fulltext to another revision.
1295
1289
1296 Returns True if the fulltext is different from what is stored.
1290 Returns True if the fulltext is different from what is stored.
1297 """
1291 """
1298
1292
1299 def emitrevisions(
1293 def emitrevisions(
1300 nodes,
1294 nodes,
1301 nodesorder=None,
1295 nodesorder=None,
1302 revisiondata=False,
1296 revisiondata=False,
1303 assumehaveparentrevisions=False,
1297 assumehaveparentrevisions=False,
1304 ):
1298 ):
1305 """Produce ``irevisiondelta`` describing revisions.
1299 """Produce ``irevisiondelta`` describing revisions.
1306
1300
1307 See the documentation for ``ifiledata`` for more.
1301 See the documentation for ``ifiledata`` for more.
1308 """
1302 """
1309
1303
1310 def addgroup(
1304 def addgroup(
1311 deltas,
1305 deltas,
1312 linkmapper,
1306 linkmapper,
1313 transaction,
1307 transaction,
1314 addrevisioncb=None,
1308 addrevisioncb=None,
1315 duplicaterevisioncb=None,
1309 duplicaterevisioncb=None,
1316 ):
1310 ):
1317 """Process a series of deltas for storage.
1311 """Process a series of deltas for storage.
1318
1312
1319 See the documentation in ``ifilemutation`` for more.
1313 See the documentation in ``ifilemutation`` for more.
1320 """
1314 """
1321
1315
1322 def rawsize(rev):
1316 def rawsize(rev):
1323 """Obtain the size of tracked data.
1317 """Obtain the size of tracked data.
1324
1318
1325 Is equivalent to ``len(m.rawdata(node))``.
1319 Is equivalent to ``len(m.rawdata(node))``.
1326
1320
1327 TODO this method is only used by upgrade code and may be removed.
1321 TODO this method is only used by upgrade code and may be removed.
1328 """
1322 """
1329
1323
1330 def getstrippoint(minlink):
1324 def getstrippoint(minlink):
1331 """Find minimum revision that must be stripped to strip a linkrev.
1325 """Find minimum revision that must be stripped to strip a linkrev.
1332
1326
1333 See the documentation in ``ifilemutation`` for more.
1327 See the documentation in ``ifilemutation`` for more.
1334 """
1328 """
1335
1329
1336 def strip(minlink, transaction):
1330 def strip(minlink, transaction):
1337 """Remove storage of items starting at a linkrev.
1331 """Remove storage of items starting at a linkrev.
1338
1332
1339 See the documentation in ``ifilemutation`` for more.
1333 See the documentation in ``ifilemutation`` for more.
1340 """
1334 """
1341
1335
1342 def checksize():
1336 def checksize():
1343 """Obtain the expected sizes of backing files.
1337 """Obtain the expected sizes of backing files.
1344
1338
1345 TODO this is used by verify and it should not be part of the interface.
1339 TODO this is used by verify and it should not be part of the interface.
1346 """
1340 """
1347
1341
1348 def files():
1342 def files():
1349 """Obtain paths that are backing storage for this manifest.
1343 """Obtain paths that are backing storage for this manifest.
1350
1344
1351 TODO this is used by verify and there should probably be a better API
1345 TODO this is used by verify and there should probably be a better API
1352 for this functionality.
1346 for this functionality.
1353 """
1347 """
1354
1348
1355 def deltaparent(rev):
1349 def deltaparent(rev):
1356 """Obtain the revision that a revision is delta'd against.
1350 """Obtain the revision that a revision is delta'd against.
1357
1351
1358 TODO delta encoding is an implementation detail of storage and should
1352 TODO delta encoding is an implementation detail of storage and should
1359 not be exposed to the storage interface.
1353 not be exposed to the storage interface.
1360 """
1354 """
1361
1355
1362 def clone(tr, dest, **kwargs):
1356 def clone(tr, dest, **kwargs):
1363 """Clone this instance to another."""
1357 """Clone this instance to another."""
1364
1358
1365 def clearcaches(clear_persisted_data=False):
1359 def clearcaches(clear_persisted_data=False):
1366 """Clear any caches associated with this instance."""
1360 """Clear any caches associated with this instance."""
1367
1361
1368 def dirlog(d):
1362 def dirlog(d):
1369 """Obtain a manifest storage instance for a tree."""
1363 """Obtain a manifest storage instance for a tree."""
1370
1364
1371 def add(
1365 def add(
1372 m, transaction, link, p1, p2, added, removed, readtree=None, match=None
1366 m, transaction, link, p1, p2, added, removed, readtree=None, match=None
1373 ):
1367 ):
1374 """Add a revision to storage.
1368 """Add a revision to storage.
1375
1369
1376 ``m`` is an object conforming to ``imanifestdict``.
1370 ``m`` is an object conforming to ``imanifestdict``.
1377
1371
1378 ``link`` is the linkrev revision number.
1372 ``link`` is the linkrev revision number.
1379
1373
1380 ``p1`` and ``p2`` are the parent revision numbers.
1374 ``p1`` and ``p2`` are the parent revision numbers.
1381
1375
1382 ``added`` and ``removed`` are iterables of added and removed paths,
1376 ``added`` and ``removed`` are iterables of added and removed paths,
1383 respectively.
1377 respectively.
1384
1378
1385 ``readtree`` is a function that can be used to read the child tree(s)
1379 ``readtree`` is a function that can be used to read the child tree(s)
1386 when recursively writing the full tree structure when using
1380 when recursively writing the full tree structure when using
1387 treemanifets.
1381 treemanifets.
1388
1382
1389 ``match`` is a matcher that can be used to hint to storage that not all
1383 ``match`` is a matcher that can be used to hint to storage that not all
1390 paths must be inspected; this is an optimization and can be safely
1384 paths must be inspected; this is an optimization and can be safely
1391 ignored. Note that the storage must still be able to reproduce a full
1385 ignored. Note that the storage must still be able to reproduce a full
1392 manifest including files that did not match.
1386 manifest including files that did not match.
1393 """
1387 """
1394
1388
1395 def storageinfo(
1389 def storageinfo(
1396 exclusivefiles=False,
1390 exclusivefiles=False,
1397 sharedfiles=False,
1391 sharedfiles=False,
1398 revisionscount=False,
1392 revisionscount=False,
1399 trackedsize=False,
1393 trackedsize=False,
1400 storedsize=False,
1394 storedsize=False,
1401 ):
1395 ):
1402 """Obtain information about storage for this manifest's data.
1396 """Obtain information about storage for this manifest's data.
1403
1397
1404 See ``ifilestorage.storageinfo()`` for a description of this method.
1398 See ``ifilestorage.storageinfo()`` for a description of this method.
1405 This one behaves the same way, except for manifest data.
1399 This one behaves the same way, except for manifest data.
1406 """
1400 """
1407
1401
1408
1402
1409 class imanifestlog(interfaceutil.Interface):
1403 class imanifestlog(interfaceutil.Interface):
1410 """Interface representing a collection of manifest snapshots.
1404 """Interface representing a collection of manifest snapshots.
1411
1405
1412 Represents the root manifest in a repository.
1406 Represents the root manifest in a repository.
1413
1407
1414 Also serves as a means to access nested tree manifests and to cache
1408 Also serves as a means to access nested tree manifests and to cache
1415 tree manifests.
1409 tree manifests.
1416 """
1410 """
1417
1411
1418 nodeconstants = interfaceutil.Attribute(
1412 nodeconstants = interfaceutil.Attribute(
1419 """nodeconstants used by the current repository."""
1413 """nodeconstants used by the current repository."""
1420 )
1414 )
1421
1415
1422 def __getitem__(node):
1416 def __getitem__(node):
1423 """Obtain a manifest instance for a given binary node.
1417 """Obtain a manifest instance for a given binary node.
1424
1418
1425 Equivalent to calling ``self.get('', node)``.
1419 Equivalent to calling ``self.get('', node)``.
1426
1420
1427 The returned object conforms to the ``imanifestrevisionstored``
1421 The returned object conforms to the ``imanifestrevisionstored``
1428 interface.
1422 interface.
1429 """
1423 """
1430
1424
1431 def get(tree, node, verify=True):
1425 def get(tree, node, verify=True):
1432 """Retrieve the manifest instance for a given directory and binary node.
1426 """Retrieve the manifest instance for a given directory and binary node.
1433
1427
1434 ``node`` always refers to the node of the root manifest (which will be
1428 ``node`` always refers to the node of the root manifest (which will be
1435 the only manifest if flat manifests are being used).
1429 the only manifest if flat manifests are being used).
1436
1430
1437 If ``tree`` is the empty string, the root manifest is returned.
1431 If ``tree`` is the empty string, the root manifest is returned.
1438 Otherwise the manifest for the specified directory will be returned
1432 Otherwise the manifest for the specified directory will be returned
1439 (requires tree manifests).
1433 (requires tree manifests).
1440
1434
1441 If ``verify`` is True, ``LookupError`` is raised if the node is not
1435 If ``verify`` is True, ``LookupError`` is raised if the node is not
1442 known.
1436 known.
1443
1437
1444 The returned object conforms to the ``imanifestrevisionstored``
1438 The returned object conforms to the ``imanifestrevisionstored``
1445 interface.
1439 interface.
1446 """
1440 """
1447
1441
1448 def getstorage(tree):
1442 def getstorage(tree):
1449 """Retrieve an interface to storage for a particular tree.
1443 """Retrieve an interface to storage for a particular tree.
1450
1444
1451 If ``tree`` is the empty bytestring, storage for the root manifest will
1445 If ``tree`` is the empty bytestring, storage for the root manifest will
1452 be returned. Otherwise storage for a tree manifest is returned.
1446 be returned. Otherwise storage for a tree manifest is returned.
1453
1447
1454 TODO formalize interface for returned object.
1448 TODO formalize interface for returned object.
1455 """
1449 """
1456
1450
1457 def clearcaches():
1451 def clearcaches():
1458 """Clear caches associated with this collection."""
1452 """Clear caches associated with this collection."""
1459
1453
1460 def rev(node):
1454 def rev(node):
1461 """Obtain the revision number for a binary node.
1455 """Obtain the revision number for a binary node.
1462
1456
1463 Raises ``error.LookupError`` if the node is not known.
1457 Raises ``error.LookupError`` if the node is not known.
1464 """
1458 """
1465
1459
1466 def update_caches(transaction):
1460 def update_caches(transaction):
1467 """update whatever cache are relevant for the used storage."""
1461 """update whatever cache are relevant for the used storage."""
1468
1462
1469
1463
1470 class ilocalrepositoryfilestorage(interfaceutil.Interface):
1464 class ilocalrepositoryfilestorage(interfaceutil.Interface):
1471 """Local repository sub-interface providing access to tracked file storage.
1465 """Local repository sub-interface providing access to tracked file storage.
1472
1466
1473 This interface defines how a repository accesses storage for a single
1467 This interface defines how a repository accesses storage for a single
1474 tracked file path.
1468 tracked file path.
1475 """
1469 """
1476
1470
1477 def file(f):
1471 def file(f):
1478 """Obtain a filelog for a tracked path.
1472 """Obtain a filelog for a tracked path.
1479
1473
1480 The returned type conforms to the ``ifilestorage`` interface.
1474 The returned type conforms to the ``ifilestorage`` interface.
1481 """
1475 """
1482
1476
1483
1477
1484 class ilocalrepositorymain(interfaceutil.Interface):
1478 class ilocalrepositorymain(interfaceutil.Interface):
1485 """Main interface for local repositories.
1479 """Main interface for local repositories.
1486
1480
1487 This currently captures the reality of things - not how things should be.
1481 This currently captures the reality of things - not how things should be.
1488 """
1482 """
1489
1483
1490 nodeconstants = interfaceutil.Attribute(
1484 nodeconstants = interfaceutil.Attribute(
1491 """Constant nodes matching the hash function used by the repository."""
1485 """Constant nodes matching the hash function used by the repository."""
1492 )
1486 )
1493 nullid = interfaceutil.Attribute(
1487 nullid = interfaceutil.Attribute(
1494 """null revision for the hash function used by the repository."""
1488 """null revision for the hash function used by the repository."""
1495 )
1489 )
1496
1490
1497 supported = interfaceutil.Attribute(
1491 supported = interfaceutil.Attribute(
1498 """Set of requirements that this repo is capable of opening."""
1492 """Set of requirements that this repo is capable of opening."""
1499 )
1493 )
1500
1494
1501 requirements = interfaceutil.Attribute(
1495 requirements = interfaceutil.Attribute(
1502 """Set of requirements this repo uses."""
1496 """Set of requirements this repo uses."""
1503 )
1497 )
1504
1498
1505 features = interfaceutil.Attribute(
1499 features = interfaceutil.Attribute(
1506 """Set of "features" this repository supports.
1500 """Set of "features" this repository supports.
1507
1501
1508 A "feature" is a loosely-defined term. It can refer to a feature
1502 A "feature" is a loosely-defined term. It can refer to a feature
1509 in the classical sense or can describe an implementation detail
1503 in the classical sense or can describe an implementation detail
1510 of the repository. For example, a ``readonly`` feature may denote
1504 of the repository. For example, a ``readonly`` feature may denote
1511 the repository as read-only. Or a ``revlogfilestore`` feature may
1505 the repository as read-only. Or a ``revlogfilestore`` feature may
1512 denote that the repository is using revlogs for file storage.
1506 denote that the repository is using revlogs for file storage.
1513
1507
1514 The intent of features is to provide a machine-queryable mechanism
1508 The intent of features is to provide a machine-queryable mechanism
1515 for repo consumers to test for various repository characteristics.
1509 for repo consumers to test for various repository characteristics.
1516
1510
1517 Features are similar to ``requirements``. The main difference is that
1511 Features are similar to ``requirements``. The main difference is that
1518 requirements are stored on-disk and represent requirements to open the
1512 requirements are stored on-disk and represent requirements to open the
1519 repository. Features are more run-time capabilities of the repository
1513 repository. Features are more run-time capabilities of the repository
1520 and more granular capabilities (which may be derived from requirements).
1514 and more granular capabilities (which may be derived from requirements).
1521 """
1515 """
1522 )
1516 )
1523
1517
1524 filtername = interfaceutil.Attribute(
1518 filtername = interfaceutil.Attribute(
1525 """Name of the repoview that is active on this repo."""
1519 """Name of the repoview that is active on this repo."""
1526 )
1520 )
1527
1521
1528 wvfs = interfaceutil.Attribute(
1522 wvfs = interfaceutil.Attribute(
1529 """VFS used to access the working directory."""
1523 """VFS used to access the working directory."""
1530 )
1524 )
1531
1525
1532 vfs = interfaceutil.Attribute(
1526 vfs = interfaceutil.Attribute(
1533 """VFS rooted at the .hg directory.
1527 """VFS rooted at the .hg directory.
1534
1528
1535 Used to access repository data not in the store.
1529 Used to access repository data not in the store.
1536 """
1530 """
1537 )
1531 )
1538
1532
1539 svfs = interfaceutil.Attribute(
1533 svfs = interfaceutil.Attribute(
1540 """VFS rooted at the store.
1534 """VFS rooted at the store.
1541
1535
1542 Used to access repository data in the store. Typically .hg/store.
1536 Used to access repository data in the store. Typically .hg/store.
1543 But can point elsewhere if the store is shared.
1537 But can point elsewhere if the store is shared.
1544 """
1538 """
1545 )
1539 )
1546
1540
1547 root = interfaceutil.Attribute(
1541 root = interfaceutil.Attribute(
1548 """Path to the root of the working directory."""
1542 """Path to the root of the working directory."""
1549 )
1543 )
1550
1544
1551 path = interfaceutil.Attribute("""Path to the .hg directory.""")
1545 path = interfaceutil.Attribute("""Path to the .hg directory.""")
1552
1546
1553 origroot = interfaceutil.Attribute(
1547 origroot = interfaceutil.Attribute(
1554 """The filesystem path that was used to construct the repo."""
1548 """The filesystem path that was used to construct the repo."""
1555 )
1549 )
1556
1550
1557 auditor = interfaceutil.Attribute(
1551 auditor = interfaceutil.Attribute(
1558 """A pathauditor for the working directory.
1552 """A pathauditor for the working directory.
1559
1553
1560 This checks if a path refers to a nested repository.
1554 This checks if a path refers to a nested repository.
1561
1555
1562 Operates on the filesystem.
1556 Operates on the filesystem.
1563 """
1557 """
1564 )
1558 )
1565
1559
1566 nofsauditor = interfaceutil.Attribute(
1560 nofsauditor = interfaceutil.Attribute(
1567 """A pathauditor for the working directory.
1561 """A pathauditor for the working directory.
1568
1562
1569 This is like ``auditor`` except it doesn't do filesystem checks.
1563 This is like ``auditor`` except it doesn't do filesystem checks.
1570 """
1564 """
1571 )
1565 )
1572
1566
1573 baseui = interfaceutil.Attribute(
1567 baseui = interfaceutil.Attribute(
1574 """Original ui instance passed into constructor."""
1568 """Original ui instance passed into constructor."""
1575 )
1569 )
1576
1570
1577 ui = interfaceutil.Attribute("""Main ui instance for this instance.""")
1571 ui = interfaceutil.Attribute("""Main ui instance for this instance.""")
1578
1572
1579 sharedpath = interfaceutil.Attribute(
1573 sharedpath = interfaceutil.Attribute(
1580 """Path to the .hg directory of the repo this repo was shared from."""
1574 """Path to the .hg directory of the repo this repo was shared from."""
1581 )
1575 )
1582
1576
1583 store = interfaceutil.Attribute("""A store instance.""")
1577 store = interfaceutil.Attribute("""A store instance.""")
1584
1578
1585 spath = interfaceutil.Attribute("""Path to the store.""")
1579 spath = interfaceutil.Attribute("""Path to the store.""")
1586
1580
1587 sjoin = interfaceutil.Attribute("""Alias to self.store.join.""")
1581 sjoin = interfaceutil.Attribute("""Alias to self.store.join.""")
1588
1582
1589 cachevfs = interfaceutil.Attribute(
1583 cachevfs = interfaceutil.Attribute(
1590 """A VFS used to access the cache directory.
1584 """A VFS used to access the cache directory.
1591
1585
1592 Typically .hg/cache.
1586 Typically .hg/cache.
1593 """
1587 """
1594 )
1588 )
1595
1589
1596 wcachevfs = interfaceutil.Attribute(
1590 wcachevfs = interfaceutil.Attribute(
1597 """A VFS used to access the cache directory dedicated to working copy
1591 """A VFS used to access the cache directory dedicated to working copy
1598
1592
1599 Typically .hg/wcache.
1593 Typically .hg/wcache.
1600 """
1594 """
1601 )
1595 )
1602
1596
1603 filteredrevcache = interfaceutil.Attribute(
1597 filteredrevcache = interfaceutil.Attribute(
1604 """Holds sets of revisions to be filtered."""
1598 """Holds sets of revisions to be filtered."""
1605 )
1599 )
1606
1600
1607 names = interfaceutil.Attribute("""A ``namespaces`` instance.""")
1601 names = interfaceutil.Attribute("""A ``namespaces`` instance.""")
1608
1602
1609 filecopiesmode = interfaceutil.Attribute(
1603 filecopiesmode = interfaceutil.Attribute(
1610 """The way files copies should be dealt with in this repo."""
1604 """The way files copies should be dealt with in this repo."""
1611 )
1605 )
1612
1606
1613 def close():
1607 def close():
1614 """Close the handle on this repository."""
1608 """Close the handle on this repository."""
1615
1609
1616 def peer():
1610 def peer():
1617 """Obtain an object conforming to the ``peer`` interface."""
1611 """Obtain an object conforming to the ``peer`` interface."""
1618
1612
1619 def unfiltered():
1613 def unfiltered():
1620 """Obtain an unfiltered/raw view of this repo."""
1614 """Obtain an unfiltered/raw view of this repo."""
1621
1615
1622 def filtered(name, visibilityexceptions=None):
1616 def filtered(name, visibilityexceptions=None):
1623 """Obtain a named view of this repository."""
1617 """Obtain a named view of this repository."""
1624
1618
1625 obsstore = interfaceutil.Attribute("""A store of obsolescence data.""")
1619 obsstore = interfaceutil.Attribute("""A store of obsolescence data.""")
1626
1620
1627 changelog = interfaceutil.Attribute("""A handle on the changelog revlog.""")
1621 changelog = interfaceutil.Attribute("""A handle on the changelog revlog.""")
1628
1622
1629 manifestlog = interfaceutil.Attribute(
1623 manifestlog = interfaceutil.Attribute(
1630 """An instance conforming to the ``imanifestlog`` interface.
1624 """An instance conforming to the ``imanifestlog`` interface.
1631
1625
1632 Provides access to manifests for the repository.
1626 Provides access to manifests for the repository.
1633 """
1627 """
1634 )
1628 )
1635
1629
1636 dirstate = interfaceutil.Attribute("""Working directory state.""")
1630 dirstate = interfaceutil.Attribute("""Working directory state.""")
1637
1631
1638 narrowpats = interfaceutil.Attribute(
1632 narrowpats = interfaceutil.Attribute(
1639 """Matcher patterns for this repository's narrowspec."""
1633 """Matcher patterns for this repository's narrowspec."""
1640 )
1634 )
1641
1635
1642 def narrowmatch(match=None, includeexact=False):
1636 def narrowmatch(match=None, includeexact=False):
1643 """Obtain a matcher for the narrowspec."""
1637 """Obtain a matcher for the narrowspec."""
1644
1638
1645 def setnarrowpats(newincludes, newexcludes):
1639 def setnarrowpats(newincludes, newexcludes):
1646 """Define the narrowspec for this repository."""
1640 """Define the narrowspec for this repository."""
1647
1641
1648 def __getitem__(changeid):
1642 def __getitem__(changeid):
1649 """Try to resolve a changectx."""
1643 """Try to resolve a changectx."""
1650
1644
1651 def __contains__(changeid):
1645 def __contains__(changeid):
1652 """Whether a changeset exists."""
1646 """Whether a changeset exists."""
1653
1647
1654 def __nonzero__():
1648 def __nonzero__():
1655 """Always returns True."""
1649 """Always returns True."""
1656 return True
1650 return True
1657
1651
1658 __bool__ = __nonzero__
1652 __bool__ = __nonzero__
1659
1653
1660 def __len__():
1654 def __len__():
1661 """Returns the number of changesets in the repo."""
1655 """Returns the number of changesets in the repo."""
1662
1656
1663 def __iter__():
1657 def __iter__():
1664 """Iterate over revisions in the changelog."""
1658 """Iterate over revisions in the changelog."""
1665
1659
1666 def revs(expr, *args):
1660 def revs(expr, *args):
1667 """Evaluate a revset.
1661 """Evaluate a revset.
1668
1662
1669 Emits revisions.
1663 Emits revisions.
1670 """
1664 """
1671
1665
1672 def set(expr, *args):
1666 def set(expr, *args):
1673 """Evaluate a revset.
1667 """Evaluate a revset.
1674
1668
1675 Emits changectx instances.
1669 Emits changectx instances.
1676 """
1670 """
1677
1671
1678 def anyrevs(specs, user=False, localalias=None):
1672 def anyrevs(specs, user=False, localalias=None):
1679 """Find revisions matching one of the given revsets."""
1673 """Find revisions matching one of the given revsets."""
1680
1674
1681 def url():
1675 def url():
1682 """Returns a string representing the location of this repo."""
1676 """Returns a string representing the location of this repo."""
1683
1677
1684 def hook(name, throw=False, **args):
1678 def hook(name, throw=False, **args):
1685 """Call a hook."""
1679 """Call a hook."""
1686
1680
1687 def tags():
1681 def tags():
1688 """Return a mapping of tag to node."""
1682 """Return a mapping of tag to node."""
1689
1683
1690 def tagtype(tagname):
1684 def tagtype(tagname):
1691 """Return the type of a given tag."""
1685 """Return the type of a given tag."""
1692
1686
1693 def tagslist():
1687 def tagslist():
1694 """Return a list of tags ordered by revision."""
1688 """Return a list of tags ordered by revision."""
1695
1689
1696 def nodetags(node):
1690 def nodetags(node):
1697 """Return the tags associated with a node."""
1691 """Return the tags associated with a node."""
1698
1692
1699 def nodebookmarks(node):
1693 def nodebookmarks(node):
1700 """Return the list of bookmarks pointing to the specified node."""
1694 """Return the list of bookmarks pointing to the specified node."""
1701
1695
1702 def branchmap():
1696 def branchmap():
1703 """Return a mapping of branch to heads in that branch."""
1697 """Return a mapping of branch to heads in that branch."""
1704
1698
1705 def revbranchcache():
1699 def revbranchcache():
1706 pass
1700 pass
1707
1701
1708 def register_changeset(rev, changelogrevision):
1702 def register_changeset(rev, changelogrevision):
1709 """Extension point for caches for new nodes.
1703 """Extension point for caches for new nodes.
1710
1704
1711 Multiple consumers are expected to need parts of the changelogrevision,
1705 Multiple consumers are expected to need parts of the changelogrevision,
1712 so it is provided as optimization to avoid duplicate lookups. A simple
1706 so it is provided as optimization to avoid duplicate lookups. A simple
1713 cache would be fragile when other revisions are accessed, too."""
1707 cache would be fragile when other revisions are accessed, too."""
1714 pass
1708 pass
1715
1709
1716 def branchtip(branchtip, ignoremissing=False):
1710 def branchtip(branchtip, ignoremissing=False):
1717 """Return the tip node for a given branch."""
1711 """Return the tip node for a given branch."""
1718
1712
1719 def lookup(key):
1713 def lookup(key):
1720 """Resolve the node for a revision."""
1714 """Resolve the node for a revision."""
1721
1715
1722 def lookupbranch(key):
1716 def lookupbranch(key):
1723 """Look up the branch name of the given revision or branch name."""
1717 """Look up the branch name of the given revision or branch name."""
1724
1718
1725 def known(nodes):
1719 def known(nodes):
1726 """Determine whether a series of nodes is known.
1720 """Determine whether a series of nodes is known.
1727
1721
1728 Returns a list of bools.
1722 Returns a list of bools.
1729 """
1723 """
1730
1724
1731 def local():
1725 def local():
1732 """Whether the repository is local."""
1726 """Whether the repository is local."""
1733 return True
1727 return True
1734
1728
1735 def publishing():
1729 def publishing():
1736 """Whether the repository is a publishing repository."""
1730 """Whether the repository is a publishing repository."""
1737
1731
1738 def cancopy():
1732 def cancopy():
1739 pass
1733 pass
1740
1734
1741 def shared():
1735 def shared():
1742 """The type of shared repository or None."""
1736 """The type of shared repository or None."""
1743
1737
1744 def wjoin(f, *insidef):
1738 def wjoin(f, *insidef):
1745 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1739 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1746
1740
1747 def setparents(p1, p2):
1741 def setparents(p1, p2):
1748 """Set the parent nodes of the working directory."""
1742 """Set the parent nodes of the working directory."""
1749
1743
1750 def filectx(path, changeid=None, fileid=None):
1744 def filectx(path, changeid=None, fileid=None):
1751 """Obtain a filectx for the given file revision."""
1745 """Obtain a filectx for the given file revision."""
1752
1746
1753 def getcwd():
1747 def getcwd():
1754 """Obtain the current working directory from the dirstate."""
1748 """Obtain the current working directory from the dirstate."""
1755
1749
1756 def pathto(f, cwd=None):
1750 def pathto(f, cwd=None):
1757 """Obtain the relative path to a file."""
1751 """Obtain the relative path to a file."""
1758
1752
1759 def adddatafilter(name, fltr):
1753 def adddatafilter(name, fltr):
1760 pass
1754 pass
1761
1755
1762 def wread(filename):
1756 def wread(filename):
1763 """Read a file from wvfs, using data filters."""
1757 """Read a file from wvfs, using data filters."""
1764
1758
1765 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
1759 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
1766 """Write data to a file in the wvfs, using data filters."""
1760 """Write data to a file in the wvfs, using data filters."""
1767
1761
1768 def wwritedata(filename, data):
1762 def wwritedata(filename, data):
1769 """Resolve data for writing to the wvfs, using data filters."""
1763 """Resolve data for writing to the wvfs, using data filters."""
1770
1764
1771 def currenttransaction():
1765 def currenttransaction():
1772 """Obtain the current transaction instance or None."""
1766 """Obtain the current transaction instance or None."""
1773
1767
1774 def transaction(desc, report=None):
1768 def transaction(desc, report=None):
1775 """Open a new transaction to write to the repository."""
1769 """Open a new transaction to write to the repository."""
1776
1770
1777 def undofiles():
1771 def undofiles():
1778 """Returns a list of (vfs, path) for files to undo transactions."""
1772 """Returns a list of (vfs, path) for files to undo transactions."""
1779
1773
1780 def recover():
1774 def recover():
1781 """Roll back an interrupted transaction."""
1775 """Roll back an interrupted transaction."""
1782
1776
1783 def rollback(dryrun=False, force=False):
1777 def rollback(dryrun=False, force=False):
1784 """Undo the last transaction.
1778 """Undo the last transaction.
1785
1779
1786 DANGEROUS.
1780 DANGEROUS.
1787 """
1781 """
1788
1782
1789 def updatecaches(tr=None, full=False, caches=None):
1783 def updatecaches(tr=None, full=False, caches=None):
1790 """Warm repo caches."""
1784 """Warm repo caches."""
1791
1785
1792 def invalidatecaches():
1786 def invalidatecaches():
1793 """Invalidate cached data due to the repository mutating."""
1787 """Invalidate cached data due to the repository mutating."""
1794
1788
1795 def invalidatevolatilesets():
1789 def invalidatevolatilesets():
1796 pass
1790 pass
1797
1791
1798 def invalidatedirstate():
1792 def invalidatedirstate():
1799 """Invalidate the dirstate."""
1793 """Invalidate the dirstate."""
1800
1794
1801 def invalidate(clearfilecache=False):
1795 def invalidate(clearfilecache=False):
1802 pass
1796 pass
1803
1797
1804 def invalidateall():
1798 def invalidateall():
1805 pass
1799 pass
1806
1800
1807 def lock(wait=True):
1801 def lock(wait=True):
1808 """Lock the repository store and return a lock instance."""
1802 """Lock the repository store and return a lock instance."""
1809
1803
1810 def wlock(wait=True):
1804 def wlock(wait=True):
1811 """Lock the non-store parts of the repository."""
1805 """Lock the non-store parts of the repository."""
1812
1806
1813 def currentwlock():
1807 def currentwlock():
1814 """Return the wlock if it's held or None."""
1808 """Return the wlock if it's held or None."""
1815
1809
1816 def checkcommitpatterns(wctx, match, status, fail):
1810 def checkcommitpatterns(wctx, match, status, fail):
1817 pass
1811 pass
1818
1812
1819 def commit(
1813 def commit(
1820 text=b'',
1814 text=b'',
1821 user=None,
1815 user=None,
1822 date=None,
1816 date=None,
1823 match=None,
1817 match=None,
1824 force=False,
1818 force=False,
1825 editor=False,
1819 editor=False,
1826 extra=None,
1820 extra=None,
1827 ):
1821 ):
1828 """Add a new revision to the repository."""
1822 """Add a new revision to the repository."""
1829
1823
1830 def commitctx(ctx, error=False, origctx=None):
1824 def commitctx(ctx, error=False, origctx=None):
1831 """Commit a commitctx instance to the repository."""
1825 """Commit a commitctx instance to the repository."""
1832
1826
1833 def destroying():
1827 def destroying():
1834 """Inform the repository that nodes are about to be destroyed."""
1828 """Inform the repository that nodes are about to be destroyed."""
1835
1829
1836 def destroyed():
1830 def destroyed():
1837 """Inform the repository that nodes have been destroyed."""
1831 """Inform the repository that nodes have been destroyed."""
1838
1832
1839 def status(
1833 def status(
1840 node1=b'.',
1834 node1=b'.',
1841 node2=None,
1835 node2=None,
1842 match=None,
1836 match=None,
1843 ignored=False,
1837 ignored=False,
1844 clean=False,
1838 clean=False,
1845 unknown=False,
1839 unknown=False,
1846 listsubrepos=False,
1840 listsubrepos=False,
1847 ):
1841 ):
1848 """Convenience method to call repo[x].status()."""
1842 """Convenience method to call repo[x].status()."""
1849
1843
1850 def addpostdsstatus(ps):
1844 def addpostdsstatus(ps):
1851 pass
1845 pass
1852
1846
1853 def postdsstatus():
1847 def postdsstatus():
1854 pass
1848 pass
1855
1849
1856 def clearpostdsstatus():
1850 def clearpostdsstatus():
1857 pass
1851 pass
1858
1852
1859 def heads(start=None):
1853 def heads(start=None):
1860 """Obtain list of nodes that are DAG heads."""
1854 """Obtain list of nodes that are DAG heads."""
1861
1855
1862 def branchheads(branch=None, start=None, closed=False):
1856 def branchheads(branch=None, start=None, closed=False):
1863 pass
1857 pass
1864
1858
1865 def branches(nodes):
1859 def branches(nodes):
1866 pass
1860 pass
1867
1861
1868 def between(pairs):
1862 def between(pairs):
1869 pass
1863 pass
1870
1864
1871 def checkpush(pushop):
1865 def checkpush(pushop):
1872 pass
1866 pass
1873
1867
1874 prepushoutgoinghooks = interfaceutil.Attribute("""util.hooks instance.""")
1868 prepushoutgoinghooks = interfaceutil.Attribute("""util.hooks instance.""")
1875
1869
1876 def pushkey(namespace, key, old, new):
1870 def pushkey(namespace, key, old, new):
1877 pass
1871 pass
1878
1872
1879 def listkeys(namespace):
1873 def listkeys(namespace):
1880 pass
1874 pass
1881
1875
1882 def debugwireargs(one, two, three=None, four=None, five=None):
1876 def debugwireargs(one, two, three=None, four=None, five=None):
1883 pass
1877 pass
1884
1878
1885 def savecommitmessage(text):
1879 def savecommitmessage(text):
1886 pass
1880 pass
1887
1881
1888 def register_sidedata_computer(
1882 def register_sidedata_computer(
1889 kind, category, keys, computer, flags, replace=False
1883 kind, category, keys, computer, flags, replace=False
1890 ):
1884 ):
1891 pass
1885 pass
1892
1886
1893 def register_wanted_sidedata(category):
1887 def register_wanted_sidedata(category):
1894 pass
1888 pass
1895
1889
1896
1890
1897 class completelocalrepository(
1891 class completelocalrepository(
1898 ilocalrepositorymain, ilocalrepositoryfilestorage
1892 ilocalrepositorymain, ilocalrepositoryfilestorage
1899 ):
1893 ):
1900 """Complete interface for a local repository."""
1894 """Complete interface for a local repository."""
1901
1895
1902
1896
1903 class iwireprotocolcommandcacher(interfaceutil.Interface):
1897 class iwireprotocolcommandcacher(interfaceutil.Interface):
1904 """Represents a caching backend for wire protocol commands.
1898 """Represents a caching backend for wire protocol commands.
1905
1899
1906 Wire protocol version 2 supports transparent caching of many commands.
1900 Wire protocol version 2 supports transparent caching of many commands.
1907 To leverage this caching, servers can activate objects that cache
1901 To leverage this caching, servers can activate objects that cache
1908 command responses. Objects handle both cache writing and reading.
1902 command responses. Objects handle both cache writing and reading.
1909 This interface defines how that response caching mechanism works.
1903 This interface defines how that response caching mechanism works.
1910
1904
1911 Wire protocol version 2 commands emit a series of objects that are
1905 Wire protocol version 2 commands emit a series of objects that are
1912 serialized and sent to the client. The caching layer exists between
1906 serialized and sent to the client. The caching layer exists between
1913 the invocation of the command function and the sending of its output
1907 the invocation of the command function and the sending of its output
1914 objects to an output layer.
1908 objects to an output layer.
1915
1909
1916 Instances of this interface represent a binding to a cache that
1910 Instances of this interface represent a binding to a cache that
1917 can serve a response (in place of calling a command function) and/or
1911 can serve a response (in place of calling a command function) and/or
1918 write responses to a cache for subsequent use.
1912 write responses to a cache for subsequent use.
1919
1913
1920 When a command request arrives, the following happens with regards
1914 When a command request arrives, the following happens with regards
1921 to this interface:
1915 to this interface:
1922
1916
1923 1. The server determines whether the command request is cacheable.
1917 1. The server determines whether the command request is cacheable.
1924 2. If it is, an instance of this interface is spawned.
1918 2. If it is, an instance of this interface is spawned.
1925 3. The cacher is activated in a context manager (``__enter__`` is called).
1919 3. The cacher is activated in a context manager (``__enter__`` is called).
1926 4. A cache *key* for that request is derived. This will call the
1920 4. A cache *key* for that request is derived. This will call the
1927 instance's ``adjustcachekeystate()`` method so the derivation
1921 instance's ``adjustcachekeystate()`` method so the derivation
1928 can be influenced.
1922 can be influenced.
1929 5. The cacher is informed of the derived cache key via a call to
1923 5. The cacher is informed of the derived cache key via a call to
1930 ``setcachekey()``.
1924 ``setcachekey()``.
1931 6. The cacher's ``lookup()`` method is called to test for presence of
1925 6. The cacher's ``lookup()`` method is called to test for presence of
1932 the derived key in the cache.
1926 the derived key in the cache.
1933 7. If ``lookup()`` returns a hit, that cached result is used in place
1927 7. If ``lookup()`` returns a hit, that cached result is used in place
1934 of invoking the command function. ``__exit__`` is called and the instance
1928 of invoking the command function. ``__exit__`` is called and the instance
1935 is discarded.
1929 is discarded.
1936 8. The command function is invoked.
1930 8. The command function is invoked.
1937 9. ``onobject()`` is called for each object emitted by the command
1931 9. ``onobject()`` is called for each object emitted by the command
1938 function.
1932 function.
1939 10. After the final object is seen, ``onfinished()`` is called.
1933 10. After the final object is seen, ``onfinished()`` is called.
1940 11. ``__exit__`` is called to signal the end of use of the instance.
1934 11. ``__exit__`` is called to signal the end of use of the instance.
1941
1935
1942 Cache *key* derivation can be influenced by the instance.
1936 Cache *key* derivation can be influenced by the instance.
1943
1937
1944 Cache keys are initially derived by a deterministic representation of
1938 Cache keys are initially derived by a deterministic representation of
1945 the command request. This includes the command name, arguments, protocol
1939 the command request. This includes the command name, arguments, protocol
1946 version, etc. This initial key derivation is performed by CBOR-encoding a
1940 version, etc. This initial key derivation is performed by CBOR-encoding a
1947 data structure and feeding that output into a hasher.
1941 data structure and feeding that output into a hasher.
1948
1942
1949 Instances of this interface can influence this initial key derivation
1943 Instances of this interface can influence this initial key derivation
1950 via ``adjustcachekeystate()``.
1944 via ``adjustcachekeystate()``.
1951
1945
1952 The instance is informed of the derived cache key via a call to
1946 The instance is informed of the derived cache key via a call to
1953 ``setcachekey()``. The instance must store the key locally so it can
1947 ``setcachekey()``. The instance must store the key locally so it can
1954 be consulted on subsequent operations that may require it.
1948 be consulted on subsequent operations that may require it.
1955
1949
1956 When constructed, the instance has access to a callable that can be used
1950 When constructed, the instance has access to a callable that can be used
1957 for encoding response objects. This callable receives as its single
1951 for encoding response objects. This callable receives as its single
1958 argument an object emitted by a command function. It returns an iterable
1952 argument an object emitted by a command function. It returns an iterable
1959 of bytes chunks representing the encoded object. Unless the cacher is
1953 of bytes chunks representing the encoded object. Unless the cacher is
1960 caching native Python objects in memory or has a way of reconstructing
1954 caching native Python objects in memory or has a way of reconstructing
1961 the original Python objects, implementations typically call this function
1955 the original Python objects, implementations typically call this function
1962 to produce bytes from the output objects and then store those bytes in
1956 to produce bytes from the output objects and then store those bytes in
1963 the cache. When it comes time to re-emit those bytes, they are wrapped
1957 the cache. When it comes time to re-emit those bytes, they are wrapped
1964 in a ``wireprototypes.encodedresponse`` instance to tell the output
1958 in a ``wireprototypes.encodedresponse`` instance to tell the output
1965 layer that they are pre-encoded.
1959 layer that they are pre-encoded.
1966
1960
1967 When receiving the objects emitted by the command function, instances
1961 When receiving the objects emitted by the command function, instances
1968 can choose what to do with those objects. The simplest thing to do is
1962 can choose what to do with those objects. The simplest thing to do is
1969 re-emit the original objects. They will be forwarded to the output
1963 re-emit the original objects. They will be forwarded to the output
1970 layer and will be processed as if the cacher did not exist.
1964 layer and will be processed as if the cacher did not exist.
1971
1965
1972 Implementations could also choose to not emit objects - instead locally
1966 Implementations could also choose to not emit objects - instead locally
1973 buffering objects or their encoded representation. They could then emit
1967 buffering objects or their encoded representation. They could then emit
1974 a single "coalesced" object when ``onfinished()`` is called. In
1968 a single "coalesced" object when ``onfinished()`` is called. In
1975 this way, the implementation would function as a filtering layer of
1969 this way, the implementation would function as a filtering layer of
1976 sorts.
1970 sorts.
1977
1971
1978 When caching objects, typically the encoded form of the object will
1972 When caching objects, typically the encoded form of the object will
1979 be stored. Keep in mind that if the original object is forwarded to
1973 be stored. Keep in mind that if the original object is forwarded to
1980 the output layer, it will need to be encoded there as well. For large
1974 the output layer, it will need to be encoded there as well. For large
1981 output, this redundant encoding could add overhead. Implementations
1975 output, this redundant encoding could add overhead. Implementations
1982 could wrap the encoded object data in ``wireprototypes.encodedresponse``
1976 could wrap the encoded object data in ``wireprototypes.encodedresponse``
1983 instances to avoid this overhead.
1977 instances to avoid this overhead.
1984 """
1978 """
1985
1979
1986 def __enter__():
1980 def __enter__():
1987 """Marks the instance as active.
1981 """Marks the instance as active.
1988
1982
1989 Should return self.
1983 Should return self.
1990 """
1984 """
1991
1985
1992 def __exit__(exctype, excvalue, exctb):
1986 def __exit__(exctype, excvalue, exctb):
1993 """Called when cacher is no longer used.
1987 """Called when cacher is no longer used.
1994
1988
1995 This can be used by implementations to perform cleanup actions (e.g.
1989 This can be used by implementations to perform cleanup actions (e.g.
1996 disconnecting network sockets, aborting a partially cached response.
1990 disconnecting network sockets, aborting a partially cached response.
1997 """
1991 """
1998
1992
1999 def adjustcachekeystate(state):
1993 def adjustcachekeystate(state):
2000 """Influences cache key derivation by adjusting state to derive key.
1994 """Influences cache key derivation by adjusting state to derive key.
2001
1995
2002 A dict defining the state used to derive the cache key is passed.
1996 A dict defining the state used to derive the cache key is passed.
2003
1997
2004 Implementations can modify this dict to record additional state that
1998 Implementations can modify this dict to record additional state that
2005 is wanted to influence key derivation.
1999 is wanted to influence key derivation.
2006
2000
2007 Implementations are *highly* encouraged to not modify or delete
2001 Implementations are *highly* encouraged to not modify or delete
2008 existing keys.
2002 existing keys.
2009 """
2003 """
2010
2004
2011 def setcachekey(key):
2005 def setcachekey(key):
2012 """Record the derived cache key for this request.
2006 """Record the derived cache key for this request.
2013
2007
2014 Instances may mutate the key for internal usage, as desired. e.g.
2008 Instances may mutate the key for internal usage, as desired. e.g.
2015 instances may wish to prepend the repo name, introduce path
2009 instances may wish to prepend the repo name, introduce path
2016 components for filesystem or URL addressing, etc. Behavior is up to
2010 components for filesystem or URL addressing, etc. Behavior is up to
2017 the cache.
2011 the cache.
2018
2012
2019 Returns a bool indicating if the request is cacheable by this
2013 Returns a bool indicating if the request is cacheable by this
2020 instance.
2014 instance.
2021 """
2015 """
2022
2016
2023 def lookup():
2017 def lookup():
2024 """Attempt to resolve an entry in the cache.
2018 """Attempt to resolve an entry in the cache.
2025
2019
2026 The instance is instructed to look for the cache key that it was
2020 The instance is instructed to look for the cache key that it was
2027 informed about via the call to ``setcachekey()``.
2021 informed about via the call to ``setcachekey()``.
2028
2022
2029 If there's no cache hit or the cacher doesn't wish to use the cached
2023 If there's no cache hit or the cacher doesn't wish to use the cached
2030 entry, ``None`` should be returned.
2024 entry, ``None`` should be returned.
2031
2025
2032 Else, a dict defining the cached result should be returned. The
2026 Else, a dict defining the cached result should be returned. The
2033 dict may have the following keys:
2027 dict may have the following keys:
2034
2028
2035 objs
2029 objs
2036 An iterable of objects that should be sent to the client. That
2030 An iterable of objects that should be sent to the client. That
2037 iterable of objects is expected to be what the command function
2031 iterable of objects is expected to be what the command function
2038 would return if invoked or an equivalent representation thereof.
2032 would return if invoked or an equivalent representation thereof.
2039 """
2033 """
2040
2034
2041 def onobject(obj):
2035 def onobject(obj):
2042 """Called when a new object is emitted from the command function.
2036 """Called when a new object is emitted from the command function.
2043
2037
2044 Receives as its argument the object that was emitted from the
2038 Receives as its argument the object that was emitted from the
2045 command function.
2039 command function.
2046
2040
2047 This method returns an iterator of objects to forward to the output
2041 This method returns an iterator of objects to forward to the output
2048 layer. The easiest implementation is a generator that just
2042 layer. The easiest implementation is a generator that just
2049 ``yield obj``.
2043 ``yield obj``.
2050 """
2044 """
2051
2045
2052 def onfinished():
2046 def onfinished():
2053 """Called after all objects have been emitted from the command function.
2047 """Called after all objects have been emitted from the command function.
2054
2048
2055 Implementations should return an iterator of objects to forward to
2049 Implementations should return an iterator of objects to forward to
2056 the output layer.
2050 the output layer.
2057
2051
2058 This method can be a generator.
2052 This method can be a generator.
2059 """
2053 """
@@ -1,3980 +1,3977 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 # coding: utf-8
2 # coding: utf-8
3 #
3 #
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9
9
10 import functools
10 import functools
11 import os
11 import os
12 import random
12 import random
13 import sys
13 import sys
14 import time
14 import time
15 import weakref
15 import weakref
16
16
17 from concurrent import futures
17 from concurrent import futures
18 from typing import (
18 from typing import (
19 Optional,
19 Optional,
20 )
20 )
21
21
22 from .i18n import _
22 from .i18n import _
23 from .node import (
23 from .node import (
24 bin,
24 bin,
25 hex,
25 hex,
26 nullrev,
26 nullrev,
27 sha1nodeconstants,
27 sha1nodeconstants,
28 short,
28 short,
29 )
29 )
30 from .pycompat import (
30 from .pycompat import (
31 delattr,
31 delattr,
32 getattr,
32 getattr,
33 )
33 )
34 from . import (
34 from . import (
35 bookmarks,
35 bookmarks,
36 branchmap,
36 branchmap,
37 bundle2,
37 bundle2,
38 bundlecaches,
38 bundlecaches,
39 changegroup,
39 changegroup,
40 color,
40 color,
41 commit,
41 commit,
42 context,
42 context,
43 dirstate,
43 dirstate,
44 dirstateguard,
44 dirstateguard,
45 discovery,
45 discovery,
46 encoding,
46 encoding,
47 error,
47 error,
48 exchange,
48 exchange,
49 extensions,
49 extensions,
50 filelog,
50 filelog,
51 hook,
51 hook,
52 lock as lockmod,
52 lock as lockmod,
53 match as matchmod,
53 match as matchmod,
54 mergestate as mergestatemod,
54 mergestate as mergestatemod,
55 mergeutil,
55 mergeutil,
56 namespaces,
56 namespaces,
57 narrowspec,
57 narrowspec,
58 obsolete,
58 obsolete,
59 pathutil,
59 pathutil,
60 phases,
60 phases,
61 pushkey,
61 pushkey,
62 pycompat,
62 pycompat,
63 rcutil,
63 rcutil,
64 repoview,
64 repoview,
65 requirements as requirementsmod,
65 requirements as requirementsmod,
66 revlog,
66 revlog,
67 revset,
67 revset,
68 revsetlang,
68 revsetlang,
69 scmutil,
69 scmutil,
70 sparse,
70 sparse,
71 store as storemod,
71 store as storemod,
72 subrepoutil,
72 subrepoutil,
73 tags as tagsmod,
73 tags as tagsmod,
74 transaction,
74 transaction,
75 txnutil,
75 txnutil,
76 util,
76 util,
77 vfs as vfsmod,
77 vfs as vfsmod,
78 wireprototypes,
78 wireprototypes,
79 )
79 )
80
80
81 from .interfaces import (
81 from .interfaces import (
82 repository,
82 repository,
83 util as interfaceutil,
83 util as interfaceutil,
84 )
84 )
85
85
86 from .utils import (
86 from .utils import (
87 hashutil,
87 hashutil,
88 procutil,
88 procutil,
89 stringutil,
89 stringutil,
90 urlutil,
90 urlutil,
91 )
91 )
92
92
93 from .revlogutils import (
93 from .revlogutils import (
94 concurrency_checker as revlogchecker,
94 concurrency_checker as revlogchecker,
95 constants as revlogconst,
95 constants as revlogconst,
96 sidedata as sidedatamod,
96 sidedata as sidedatamod,
97 )
97 )
98
98
99 release = lockmod.release
99 release = lockmod.release
100 urlerr = util.urlerr
100 urlerr = util.urlerr
101 urlreq = util.urlreq
101 urlreq = util.urlreq
102
102
103 # set of (path, vfs-location) tuples. vfs-location is:
103 # set of (path, vfs-location) tuples. vfs-location is:
104 # - 'plain for vfs relative paths
104 # - 'plain for vfs relative paths
105 # - '' for svfs relative paths
105 # - '' for svfs relative paths
106 _cachedfiles = set()
106 _cachedfiles = set()
107
107
108
108
109 class _basefilecache(scmutil.filecache):
109 class _basefilecache(scmutil.filecache):
110 """All filecache usage on repo are done for logic that should be unfiltered"""
110 """All filecache usage on repo are done for logic that should be unfiltered"""
111
111
112 def __get__(self, repo, type=None):
112 def __get__(self, repo, type=None):
113 if repo is None:
113 if repo is None:
114 return self
114 return self
115 # proxy to unfiltered __dict__ since filtered repo has no entry
115 # proxy to unfiltered __dict__ since filtered repo has no entry
116 unfi = repo.unfiltered()
116 unfi = repo.unfiltered()
117 try:
117 try:
118 return unfi.__dict__[self.sname]
118 return unfi.__dict__[self.sname]
119 except KeyError:
119 except KeyError:
120 pass
120 pass
121 return super(_basefilecache, self).__get__(unfi, type)
121 return super(_basefilecache, self).__get__(unfi, type)
122
122
123 def set(self, repo, value):
123 def set(self, repo, value):
124 return super(_basefilecache, self).set(repo.unfiltered(), value)
124 return super(_basefilecache, self).set(repo.unfiltered(), value)
125
125
126
126
127 class repofilecache(_basefilecache):
127 class repofilecache(_basefilecache):
128 """filecache for files in .hg but outside of .hg/store"""
128 """filecache for files in .hg but outside of .hg/store"""
129
129
130 def __init__(self, *paths):
130 def __init__(self, *paths):
131 super(repofilecache, self).__init__(*paths)
131 super(repofilecache, self).__init__(*paths)
132 for path in paths:
132 for path in paths:
133 _cachedfiles.add((path, b'plain'))
133 _cachedfiles.add((path, b'plain'))
134
134
135 def join(self, obj, fname):
135 def join(self, obj, fname):
136 return obj.vfs.join(fname)
136 return obj.vfs.join(fname)
137
137
138
138
139 class storecache(_basefilecache):
139 class storecache(_basefilecache):
140 """filecache for files in the store"""
140 """filecache for files in the store"""
141
141
142 def __init__(self, *paths):
142 def __init__(self, *paths):
143 super(storecache, self).__init__(*paths)
143 super(storecache, self).__init__(*paths)
144 for path in paths:
144 for path in paths:
145 _cachedfiles.add((path, b''))
145 _cachedfiles.add((path, b''))
146
146
147 def join(self, obj, fname):
147 def join(self, obj, fname):
148 return obj.sjoin(fname)
148 return obj.sjoin(fname)
149
149
150
150
151 class changelogcache(storecache):
151 class changelogcache(storecache):
152 """filecache for the changelog"""
152 """filecache for the changelog"""
153
153
154 def __init__(self):
154 def __init__(self):
155 super(changelogcache, self).__init__()
155 super(changelogcache, self).__init__()
156 _cachedfiles.add((b'00changelog.i', b''))
156 _cachedfiles.add((b'00changelog.i', b''))
157 _cachedfiles.add((b'00changelog.n', b''))
157 _cachedfiles.add((b'00changelog.n', b''))
158
158
159 def tracked_paths(self, obj):
159 def tracked_paths(self, obj):
160 paths = [self.join(obj, b'00changelog.i')]
160 paths = [self.join(obj, b'00changelog.i')]
161 if obj.store.opener.options.get(b'persistent-nodemap', False):
161 if obj.store.opener.options.get(b'persistent-nodemap', False):
162 paths.append(self.join(obj, b'00changelog.n'))
162 paths.append(self.join(obj, b'00changelog.n'))
163 return paths
163 return paths
164
164
165
165
166 class manifestlogcache(storecache):
166 class manifestlogcache(storecache):
167 """filecache for the manifestlog"""
167 """filecache for the manifestlog"""
168
168
169 def __init__(self):
169 def __init__(self):
170 super(manifestlogcache, self).__init__()
170 super(manifestlogcache, self).__init__()
171 _cachedfiles.add((b'00manifest.i', b''))
171 _cachedfiles.add((b'00manifest.i', b''))
172 _cachedfiles.add((b'00manifest.n', b''))
172 _cachedfiles.add((b'00manifest.n', b''))
173
173
174 def tracked_paths(self, obj):
174 def tracked_paths(self, obj):
175 paths = [self.join(obj, b'00manifest.i')]
175 paths = [self.join(obj, b'00manifest.i')]
176 if obj.store.opener.options.get(b'persistent-nodemap', False):
176 if obj.store.opener.options.get(b'persistent-nodemap', False):
177 paths.append(self.join(obj, b'00manifest.n'))
177 paths.append(self.join(obj, b'00manifest.n'))
178 return paths
178 return paths
179
179
180
180
181 class mixedrepostorecache(_basefilecache):
181 class mixedrepostorecache(_basefilecache):
182 """filecache for a mix files in .hg/store and outside"""
182 """filecache for a mix files in .hg/store and outside"""
183
183
184 def __init__(self, *pathsandlocations):
184 def __init__(self, *pathsandlocations):
185 # scmutil.filecache only uses the path for passing back into our
185 # scmutil.filecache only uses the path for passing back into our
186 # join(), so we can safely pass a list of paths and locations
186 # join(), so we can safely pass a list of paths and locations
187 super(mixedrepostorecache, self).__init__(*pathsandlocations)
187 super(mixedrepostorecache, self).__init__(*pathsandlocations)
188 _cachedfiles.update(pathsandlocations)
188 _cachedfiles.update(pathsandlocations)
189
189
190 def join(self, obj, fnameandlocation):
190 def join(self, obj, fnameandlocation):
191 fname, location = fnameandlocation
191 fname, location = fnameandlocation
192 if location == b'plain':
192 if location == b'plain':
193 return obj.vfs.join(fname)
193 return obj.vfs.join(fname)
194 else:
194 else:
195 if location != b'':
195 if location != b'':
196 raise error.ProgrammingError(
196 raise error.ProgrammingError(
197 b'unexpected location: %s' % location
197 b'unexpected location: %s' % location
198 )
198 )
199 return obj.sjoin(fname)
199 return obj.sjoin(fname)
200
200
201
201
202 def isfilecached(repo, name):
202 def isfilecached(repo, name):
203 """check if a repo has already cached "name" filecache-ed property
203 """check if a repo has already cached "name" filecache-ed property
204
204
205 This returns (cachedobj-or-None, iscached) tuple.
205 This returns (cachedobj-or-None, iscached) tuple.
206 """
206 """
207 cacheentry = repo.unfiltered()._filecache.get(name, None)
207 cacheentry = repo.unfiltered()._filecache.get(name, None)
208 if not cacheentry:
208 if not cacheentry:
209 return None, False
209 return None, False
210 return cacheentry.obj, True
210 return cacheentry.obj, True
211
211
212
212
213 class unfilteredpropertycache(util.propertycache):
213 class unfilteredpropertycache(util.propertycache):
214 """propertycache that apply to unfiltered repo only"""
214 """propertycache that apply to unfiltered repo only"""
215
215
216 def __get__(self, repo, type=None):
216 def __get__(self, repo, type=None):
217 unfi = repo.unfiltered()
217 unfi = repo.unfiltered()
218 if unfi is repo:
218 if unfi is repo:
219 return super(unfilteredpropertycache, self).__get__(unfi)
219 return super(unfilteredpropertycache, self).__get__(unfi)
220 return getattr(unfi, self.name)
220 return getattr(unfi, self.name)
221
221
222
222
223 class filteredpropertycache(util.propertycache):
223 class filteredpropertycache(util.propertycache):
224 """propertycache that must take filtering in account"""
224 """propertycache that must take filtering in account"""
225
225
226 def cachevalue(self, obj, value):
226 def cachevalue(self, obj, value):
227 object.__setattr__(obj, self.name, value)
227 object.__setattr__(obj, self.name, value)
228
228
229
229
230 def hasunfilteredcache(repo, name):
230 def hasunfilteredcache(repo, name):
231 """check if a repo has an unfilteredpropertycache value for <name>"""
231 """check if a repo has an unfilteredpropertycache value for <name>"""
232 return name in vars(repo.unfiltered())
232 return name in vars(repo.unfiltered())
233
233
234
234
235 def unfilteredmethod(orig):
235 def unfilteredmethod(orig):
236 """decorate method that always need to be run on unfiltered version"""
236 """decorate method that always need to be run on unfiltered version"""
237
237
238 @functools.wraps(orig)
238 @functools.wraps(orig)
239 def wrapper(repo, *args, **kwargs):
239 def wrapper(repo, *args, **kwargs):
240 return orig(repo.unfiltered(), *args, **kwargs)
240 return orig(repo.unfiltered(), *args, **kwargs)
241
241
242 return wrapper
242 return wrapper
243
243
244
244
245 moderncaps = {
245 moderncaps = {
246 b'lookup',
246 b'lookup',
247 b'branchmap',
247 b'branchmap',
248 b'pushkey',
248 b'pushkey',
249 b'known',
249 b'known',
250 b'getbundle',
250 b'getbundle',
251 b'unbundle',
251 b'unbundle',
252 }
252 }
253 legacycaps = moderncaps.union({b'changegroupsubset'})
253 legacycaps = moderncaps.union({b'changegroupsubset'})
254
254
255
255
256 @interfaceutil.implementer(repository.ipeercommandexecutor)
256 @interfaceutil.implementer(repository.ipeercommandexecutor)
257 class localcommandexecutor:
257 class localcommandexecutor:
258 def __init__(self, peer):
258 def __init__(self, peer):
259 self._peer = peer
259 self._peer = peer
260 self._sent = False
260 self._sent = False
261 self._closed = False
261 self._closed = False
262
262
263 def __enter__(self):
263 def __enter__(self):
264 return self
264 return self
265
265
266 def __exit__(self, exctype, excvalue, exctb):
266 def __exit__(self, exctype, excvalue, exctb):
267 self.close()
267 self.close()
268
268
269 def callcommand(self, command, args):
269 def callcommand(self, command, args):
270 if self._sent:
270 if self._sent:
271 raise error.ProgrammingError(
271 raise error.ProgrammingError(
272 b'callcommand() cannot be used after sendcommands()'
272 b'callcommand() cannot be used after sendcommands()'
273 )
273 )
274
274
275 if self._closed:
275 if self._closed:
276 raise error.ProgrammingError(
276 raise error.ProgrammingError(
277 b'callcommand() cannot be used after close()'
277 b'callcommand() cannot be used after close()'
278 )
278 )
279
279
280 # We don't need to support anything fancy. Just call the named
280 # We don't need to support anything fancy. Just call the named
281 # method on the peer and return a resolved future.
281 # method on the peer and return a resolved future.
282 fn = getattr(self._peer, pycompat.sysstr(command))
282 fn = getattr(self._peer, pycompat.sysstr(command))
283
283
284 f = futures.Future()
284 f = futures.Future()
285
285
286 try:
286 try:
287 result = fn(**pycompat.strkwargs(args))
287 result = fn(**pycompat.strkwargs(args))
288 except Exception:
288 except Exception:
289 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
289 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
290 else:
290 else:
291 f.set_result(result)
291 f.set_result(result)
292
292
293 return f
293 return f
294
294
295 def sendcommands(self):
295 def sendcommands(self):
296 self._sent = True
296 self._sent = True
297
297
298 def close(self):
298 def close(self):
299 self._closed = True
299 self._closed = True
300
300
301
301
302 @interfaceutil.implementer(repository.ipeercommands)
302 @interfaceutil.implementer(repository.ipeercommands)
303 class localpeer(repository.peer):
303 class localpeer(repository.peer):
304 '''peer for a local repo; reflects only the most recent API'''
304 '''peer for a local repo; reflects only the most recent API'''
305
305
306 def __init__(self, repo, caps=None):
306 def __init__(self, repo, caps=None):
307 super(localpeer, self).__init__()
307 super(localpeer, self).__init__()
308
308
309 if caps is None:
309 if caps is None:
310 caps = moderncaps.copy()
310 caps = moderncaps.copy()
311 self._repo = repo.filtered(b'served')
311 self._repo = repo.filtered(b'served')
312 self.ui = repo.ui
312 self.ui = repo.ui
313
313
314 if repo._wanted_sidedata:
314 if repo._wanted_sidedata:
315 formatted = bundle2.format_remote_wanted_sidedata(repo)
315 formatted = bundle2.format_remote_wanted_sidedata(repo)
316 caps.add(b'exp-wanted-sidedata=' + formatted)
316 caps.add(b'exp-wanted-sidedata=' + formatted)
317
317
318 self._caps = repo._restrictcapabilities(caps)
318 self._caps = repo._restrictcapabilities(caps)
319
319
320 # Begin of _basepeer interface.
320 # Begin of _basepeer interface.
321
321
322 def url(self):
322 def url(self):
323 return self._repo.url()
323 return self._repo.url()
324
324
325 def local(self):
325 def local(self):
326 return self._repo
326 return self._repo
327
327
328 def peer(self):
329 return self
330
331 def canpush(self):
328 def canpush(self):
332 return True
329 return True
333
330
334 def close(self):
331 def close(self):
335 self._repo.close()
332 self._repo.close()
336
333
337 # End of _basepeer interface.
334 # End of _basepeer interface.
338
335
339 # Begin of _basewirecommands interface.
336 # Begin of _basewirecommands interface.
340
337
341 def branchmap(self):
338 def branchmap(self):
342 return self._repo.branchmap()
339 return self._repo.branchmap()
343
340
344 def capabilities(self):
341 def capabilities(self):
345 return self._caps
342 return self._caps
346
343
347 def clonebundles(self):
344 def clonebundles(self):
348 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
345 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
349
346
350 def debugwireargs(self, one, two, three=None, four=None, five=None):
347 def debugwireargs(self, one, two, three=None, four=None, five=None):
351 """Used to test argument passing over the wire"""
348 """Used to test argument passing over the wire"""
352 return b"%s %s %s %s %s" % (
349 return b"%s %s %s %s %s" % (
353 one,
350 one,
354 two,
351 two,
355 pycompat.bytestr(three),
352 pycompat.bytestr(three),
356 pycompat.bytestr(four),
353 pycompat.bytestr(four),
357 pycompat.bytestr(five),
354 pycompat.bytestr(five),
358 )
355 )
359
356
360 def getbundle(
357 def getbundle(
361 self,
358 self,
362 source,
359 source,
363 heads=None,
360 heads=None,
364 common=None,
361 common=None,
365 bundlecaps=None,
362 bundlecaps=None,
366 remote_sidedata=None,
363 remote_sidedata=None,
367 **kwargs
364 **kwargs
368 ):
365 ):
369 chunks = exchange.getbundlechunks(
366 chunks = exchange.getbundlechunks(
370 self._repo,
367 self._repo,
371 source,
368 source,
372 heads=heads,
369 heads=heads,
373 common=common,
370 common=common,
374 bundlecaps=bundlecaps,
371 bundlecaps=bundlecaps,
375 remote_sidedata=remote_sidedata,
372 remote_sidedata=remote_sidedata,
376 **kwargs
373 **kwargs
377 )[1]
374 )[1]
378 cb = util.chunkbuffer(chunks)
375 cb = util.chunkbuffer(chunks)
379
376
380 if exchange.bundle2requested(bundlecaps):
377 if exchange.bundle2requested(bundlecaps):
381 # When requesting a bundle2, getbundle returns a stream to make the
378 # When requesting a bundle2, getbundle returns a stream to make the
382 # wire level function happier. We need to build a proper object
379 # wire level function happier. We need to build a proper object
383 # from it in local peer.
380 # from it in local peer.
384 return bundle2.getunbundler(self.ui, cb)
381 return bundle2.getunbundler(self.ui, cb)
385 else:
382 else:
386 return changegroup.getunbundler(b'01', cb, None)
383 return changegroup.getunbundler(b'01', cb, None)
387
384
388 def heads(self):
385 def heads(self):
389 return self._repo.heads()
386 return self._repo.heads()
390
387
391 def known(self, nodes):
388 def known(self, nodes):
392 return self._repo.known(nodes)
389 return self._repo.known(nodes)
393
390
394 def listkeys(self, namespace):
391 def listkeys(self, namespace):
395 return self._repo.listkeys(namespace)
392 return self._repo.listkeys(namespace)
396
393
397 def lookup(self, key):
394 def lookup(self, key):
398 return self._repo.lookup(key)
395 return self._repo.lookup(key)
399
396
400 def pushkey(self, namespace, key, old, new):
397 def pushkey(self, namespace, key, old, new):
401 return self._repo.pushkey(namespace, key, old, new)
398 return self._repo.pushkey(namespace, key, old, new)
402
399
403 def stream_out(self):
400 def stream_out(self):
404 raise error.Abort(_(b'cannot perform stream clone against local peer'))
401 raise error.Abort(_(b'cannot perform stream clone against local peer'))
405
402
406 def unbundle(self, bundle, heads, url):
403 def unbundle(self, bundle, heads, url):
407 """apply a bundle on a repo
404 """apply a bundle on a repo
408
405
409 This function handles the repo locking itself."""
406 This function handles the repo locking itself."""
410 try:
407 try:
411 try:
408 try:
412 bundle = exchange.readbundle(self.ui, bundle, None)
409 bundle = exchange.readbundle(self.ui, bundle, None)
413 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
410 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
414 if util.safehasattr(ret, b'getchunks'):
411 if util.safehasattr(ret, b'getchunks'):
415 # This is a bundle20 object, turn it into an unbundler.
412 # This is a bundle20 object, turn it into an unbundler.
416 # This little dance should be dropped eventually when the
413 # This little dance should be dropped eventually when the
417 # API is finally improved.
414 # API is finally improved.
418 stream = util.chunkbuffer(ret.getchunks())
415 stream = util.chunkbuffer(ret.getchunks())
419 ret = bundle2.getunbundler(self.ui, stream)
416 ret = bundle2.getunbundler(self.ui, stream)
420 return ret
417 return ret
421 except Exception as exc:
418 except Exception as exc:
422 # If the exception contains output salvaged from a bundle2
419 # If the exception contains output salvaged from a bundle2
423 # reply, we need to make sure it is printed before continuing
420 # reply, we need to make sure it is printed before continuing
424 # to fail. So we build a bundle2 with such output and consume
421 # to fail. So we build a bundle2 with such output and consume
425 # it directly.
422 # it directly.
426 #
423 #
427 # This is not very elegant but allows a "simple" solution for
424 # This is not very elegant but allows a "simple" solution for
428 # issue4594
425 # issue4594
429 output = getattr(exc, '_bundle2salvagedoutput', ())
426 output = getattr(exc, '_bundle2salvagedoutput', ())
430 if output:
427 if output:
431 bundler = bundle2.bundle20(self._repo.ui)
428 bundler = bundle2.bundle20(self._repo.ui)
432 for out in output:
429 for out in output:
433 bundler.addpart(out)
430 bundler.addpart(out)
434 stream = util.chunkbuffer(bundler.getchunks())
431 stream = util.chunkbuffer(bundler.getchunks())
435 b = bundle2.getunbundler(self.ui, stream)
432 b = bundle2.getunbundler(self.ui, stream)
436 bundle2.processbundle(self._repo, b)
433 bundle2.processbundle(self._repo, b)
437 raise
434 raise
438 except error.PushRaced as exc:
435 except error.PushRaced as exc:
439 raise error.ResponseError(
436 raise error.ResponseError(
440 _(b'push failed:'), stringutil.forcebytestr(exc)
437 _(b'push failed:'), stringutil.forcebytestr(exc)
441 )
438 )
442
439
443 # End of _basewirecommands interface.
440 # End of _basewirecommands interface.
444
441
445 # Begin of peer interface.
442 # Begin of peer interface.
446
443
447 def commandexecutor(self):
444 def commandexecutor(self):
448 return localcommandexecutor(self)
445 return localcommandexecutor(self)
449
446
450 # End of peer interface.
447 # End of peer interface.
451
448
452
449
453 @interfaceutil.implementer(repository.ipeerlegacycommands)
450 @interfaceutil.implementer(repository.ipeerlegacycommands)
454 class locallegacypeer(localpeer):
451 class locallegacypeer(localpeer):
455 """peer extension which implements legacy methods too; used for tests with
452 """peer extension which implements legacy methods too; used for tests with
456 restricted capabilities"""
453 restricted capabilities"""
457
454
458 def __init__(self, repo):
455 def __init__(self, repo):
459 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
456 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
460
457
461 # Begin of baselegacywirecommands interface.
458 # Begin of baselegacywirecommands interface.
462
459
463 def between(self, pairs):
460 def between(self, pairs):
464 return self._repo.between(pairs)
461 return self._repo.between(pairs)
465
462
466 def branches(self, nodes):
463 def branches(self, nodes):
467 return self._repo.branches(nodes)
464 return self._repo.branches(nodes)
468
465
469 def changegroup(self, nodes, source):
466 def changegroup(self, nodes, source):
470 outgoing = discovery.outgoing(
467 outgoing = discovery.outgoing(
471 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
468 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
472 )
469 )
473 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
470 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
474
471
475 def changegroupsubset(self, bases, heads, source):
472 def changegroupsubset(self, bases, heads, source):
476 outgoing = discovery.outgoing(
473 outgoing = discovery.outgoing(
477 self._repo, missingroots=bases, ancestorsof=heads
474 self._repo, missingroots=bases, ancestorsof=heads
478 )
475 )
479 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
476 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
480
477
481 # End of baselegacywirecommands interface.
478 # End of baselegacywirecommands interface.
482
479
483
480
484 # Functions receiving (ui, features) that extensions can register to impact
481 # Functions receiving (ui, features) that extensions can register to impact
485 # the ability to load repositories with custom requirements. Only
482 # the ability to load repositories with custom requirements. Only
486 # functions defined in loaded extensions are called.
483 # functions defined in loaded extensions are called.
487 #
484 #
488 # The function receives a set of requirement strings that the repository
485 # The function receives a set of requirement strings that the repository
489 # is capable of opening. Functions will typically add elements to the
486 # is capable of opening. Functions will typically add elements to the
490 # set to reflect that the extension knows how to handle that requirements.
487 # set to reflect that the extension knows how to handle that requirements.
491 featuresetupfuncs = set()
488 featuresetupfuncs = set()
492
489
493
490
494 def _getsharedvfs(hgvfs, requirements):
491 def _getsharedvfs(hgvfs, requirements):
495 """returns the vfs object pointing to root of shared source
492 """returns the vfs object pointing to root of shared source
496 repo for a shared repository
493 repo for a shared repository
497
494
498 hgvfs is vfs pointing at .hg/ of current repo (shared one)
495 hgvfs is vfs pointing at .hg/ of current repo (shared one)
499 requirements is a set of requirements of current repo (shared one)
496 requirements is a set of requirements of current repo (shared one)
500 """
497 """
501 # The ``shared`` or ``relshared`` requirements indicate the
498 # The ``shared`` or ``relshared`` requirements indicate the
502 # store lives in the path contained in the ``.hg/sharedpath`` file.
499 # store lives in the path contained in the ``.hg/sharedpath`` file.
503 # This is an absolute path for ``shared`` and relative to
500 # This is an absolute path for ``shared`` and relative to
504 # ``.hg/`` for ``relshared``.
501 # ``.hg/`` for ``relshared``.
505 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
502 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
506 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
503 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
507 sharedpath = util.normpath(hgvfs.join(sharedpath))
504 sharedpath = util.normpath(hgvfs.join(sharedpath))
508
505
509 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
506 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
510
507
511 if not sharedvfs.exists():
508 if not sharedvfs.exists():
512 raise error.RepoError(
509 raise error.RepoError(
513 _(b'.hg/sharedpath points to nonexistent directory %s')
510 _(b'.hg/sharedpath points to nonexistent directory %s')
514 % sharedvfs.base
511 % sharedvfs.base
515 )
512 )
516 return sharedvfs
513 return sharedvfs
517
514
518
515
519 def _readrequires(vfs, allowmissing):
516 def _readrequires(vfs, allowmissing):
520 """reads the require file present at root of this vfs
517 """reads the require file present at root of this vfs
521 and return a set of requirements
518 and return a set of requirements
522
519
523 If allowmissing is True, we suppress FileNotFoundError if raised"""
520 If allowmissing is True, we suppress FileNotFoundError if raised"""
524 # requires file contains a newline-delimited list of
521 # requires file contains a newline-delimited list of
525 # features/capabilities the opener (us) must have in order to use
522 # features/capabilities the opener (us) must have in order to use
526 # the repository. This file was introduced in Mercurial 0.9.2,
523 # the repository. This file was introduced in Mercurial 0.9.2,
527 # which means very old repositories may not have one. We assume
524 # which means very old repositories may not have one. We assume
528 # a missing file translates to no requirements.
525 # a missing file translates to no requirements.
529 read = vfs.tryread if allowmissing else vfs.read
526 read = vfs.tryread if allowmissing else vfs.read
530 return set(read(b'requires').splitlines())
527 return set(read(b'requires').splitlines())
531
528
532
529
533 def makelocalrepository(baseui, path: bytes, intents=None):
530 def makelocalrepository(baseui, path: bytes, intents=None):
534 """Create a local repository object.
531 """Create a local repository object.
535
532
536 Given arguments needed to construct a local repository, this function
533 Given arguments needed to construct a local repository, this function
537 performs various early repository loading functionality (such as
534 performs various early repository loading functionality (such as
538 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
535 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
539 the repository can be opened, derives a type suitable for representing
536 the repository can be opened, derives a type suitable for representing
540 that repository, and returns an instance of it.
537 that repository, and returns an instance of it.
541
538
542 The returned object conforms to the ``repository.completelocalrepository``
539 The returned object conforms to the ``repository.completelocalrepository``
543 interface.
540 interface.
544
541
545 The repository type is derived by calling a series of factory functions
542 The repository type is derived by calling a series of factory functions
546 for each aspect/interface of the final repository. These are defined by
543 for each aspect/interface of the final repository. These are defined by
547 ``REPO_INTERFACES``.
544 ``REPO_INTERFACES``.
548
545
549 Each factory function is called to produce a type implementing a specific
546 Each factory function is called to produce a type implementing a specific
550 interface. The cumulative list of returned types will be combined into a
547 interface. The cumulative list of returned types will be combined into a
551 new type and that type will be instantiated to represent the local
548 new type and that type will be instantiated to represent the local
552 repository.
549 repository.
553
550
554 The factory functions each receive various state that may be consulted
551 The factory functions each receive various state that may be consulted
555 as part of deriving a type.
552 as part of deriving a type.
556
553
557 Extensions should wrap these factory functions to customize repository type
554 Extensions should wrap these factory functions to customize repository type
558 creation. Note that an extension's wrapped function may be called even if
555 creation. Note that an extension's wrapped function may be called even if
559 that extension is not loaded for the repo being constructed. Extensions
556 that extension is not loaded for the repo being constructed. Extensions
560 should check if their ``__name__`` appears in the
557 should check if their ``__name__`` appears in the
561 ``extensionmodulenames`` set passed to the factory function and no-op if
558 ``extensionmodulenames`` set passed to the factory function and no-op if
562 not.
559 not.
563 """
560 """
564 ui = baseui.copy()
561 ui = baseui.copy()
565 # Prevent copying repo configuration.
562 # Prevent copying repo configuration.
566 ui.copy = baseui.copy
563 ui.copy = baseui.copy
567
564
568 # Working directory VFS rooted at repository root.
565 # Working directory VFS rooted at repository root.
569 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
566 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
570
567
571 # Main VFS for .hg/ directory.
568 # Main VFS for .hg/ directory.
572 hgpath = wdirvfs.join(b'.hg')
569 hgpath = wdirvfs.join(b'.hg')
573 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
570 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
574 # Whether this repository is shared one or not
571 # Whether this repository is shared one or not
575 shared = False
572 shared = False
576 # If this repository is shared, vfs pointing to shared repo
573 # If this repository is shared, vfs pointing to shared repo
577 sharedvfs = None
574 sharedvfs = None
578
575
579 # The .hg/ path should exist and should be a directory. All other
576 # The .hg/ path should exist and should be a directory. All other
580 # cases are errors.
577 # cases are errors.
581 if not hgvfs.isdir():
578 if not hgvfs.isdir():
582 try:
579 try:
583 hgvfs.stat()
580 hgvfs.stat()
584 except FileNotFoundError:
581 except FileNotFoundError:
585 pass
582 pass
586 except ValueError as e:
583 except ValueError as e:
587 # Can be raised on Python 3.8 when path is invalid.
584 # Can be raised on Python 3.8 when path is invalid.
588 raise error.Abort(
585 raise error.Abort(
589 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
586 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
590 )
587 )
591
588
592 raise error.RepoError(_(b'repository %s not found') % path)
589 raise error.RepoError(_(b'repository %s not found') % path)
593
590
594 requirements = _readrequires(hgvfs, True)
591 requirements = _readrequires(hgvfs, True)
595 shared = (
592 shared = (
596 requirementsmod.SHARED_REQUIREMENT in requirements
593 requirementsmod.SHARED_REQUIREMENT in requirements
597 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
594 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
598 )
595 )
599 storevfs = None
596 storevfs = None
600 if shared:
597 if shared:
601 # This is a shared repo
598 # This is a shared repo
602 sharedvfs = _getsharedvfs(hgvfs, requirements)
599 sharedvfs = _getsharedvfs(hgvfs, requirements)
603 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
600 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
604 else:
601 else:
605 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
602 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
606
603
607 # if .hg/requires contains the sharesafe requirement, it means
604 # if .hg/requires contains the sharesafe requirement, it means
608 # there exists a `.hg/store/requires` too and we should read it
605 # there exists a `.hg/store/requires` too and we should read it
609 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
606 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
610 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
607 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
611 # is not present, refer checkrequirementscompat() for that
608 # is not present, refer checkrequirementscompat() for that
612 #
609 #
613 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
610 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
614 # repository was shared the old way. We check the share source .hg/requires
611 # repository was shared the old way. We check the share source .hg/requires
615 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
612 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
616 # to be reshared
613 # to be reshared
617 hint = _(b"see `hg help config.format.use-share-safe` for more information")
614 hint = _(b"see `hg help config.format.use-share-safe` for more information")
618 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
615 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
619
616
620 if (
617 if (
621 shared
618 shared
622 and requirementsmod.SHARESAFE_REQUIREMENT
619 and requirementsmod.SHARESAFE_REQUIREMENT
623 not in _readrequires(sharedvfs, True)
620 not in _readrequires(sharedvfs, True)
624 ):
621 ):
625 mismatch_warn = ui.configbool(
622 mismatch_warn = ui.configbool(
626 b'share', b'safe-mismatch.source-not-safe.warn'
623 b'share', b'safe-mismatch.source-not-safe.warn'
627 )
624 )
628 mismatch_config = ui.config(
625 mismatch_config = ui.config(
629 b'share', b'safe-mismatch.source-not-safe'
626 b'share', b'safe-mismatch.source-not-safe'
630 )
627 )
631 mismatch_verbose_upgrade = ui.configbool(
628 mismatch_verbose_upgrade = ui.configbool(
632 b'share', b'safe-mismatch.source-not-safe:verbose-upgrade'
629 b'share', b'safe-mismatch.source-not-safe:verbose-upgrade'
633 )
630 )
634 if mismatch_config in (
631 if mismatch_config in (
635 b'downgrade-allow',
632 b'downgrade-allow',
636 b'allow',
633 b'allow',
637 b'downgrade-abort',
634 b'downgrade-abort',
638 ):
635 ):
639 # prevent cyclic import localrepo -> upgrade -> localrepo
636 # prevent cyclic import localrepo -> upgrade -> localrepo
640 from . import upgrade
637 from . import upgrade
641
638
642 upgrade.downgrade_share_to_non_safe(
639 upgrade.downgrade_share_to_non_safe(
643 ui,
640 ui,
644 hgvfs,
641 hgvfs,
645 sharedvfs,
642 sharedvfs,
646 requirements,
643 requirements,
647 mismatch_config,
644 mismatch_config,
648 mismatch_warn,
645 mismatch_warn,
649 mismatch_verbose_upgrade,
646 mismatch_verbose_upgrade,
650 )
647 )
651 elif mismatch_config == b'abort':
648 elif mismatch_config == b'abort':
652 raise error.Abort(
649 raise error.Abort(
653 _(b"share source does not support share-safe requirement"),
650 _(b"share source does not support share-safe requirement"),
654 hint=hint,
651 hint=hint,
655 )
652 )
656 else:
653 else:
657 raise error.Abort(
654 raise error.Abort(
658 _(
655 _(
659 b"share-safe mismatch with source.\nUnrecognized"
656 b"share-safe mismatch with source.\nUnrecognized"
660 b" value '%s' of `share.safe-mismatch.source-not-safe`"
657 b" value '%s' of `share.safe-mismatch.source-not-safe`"
661 b" set."
658 b" set."
662 )
659 )
663 % mismatch_config,
660 % mismatch_config,
664 hint=hint,
661 hint=hint,
665 )
662 )
666 else:
663 else:
667 requirements |= _readrequires(storevfs, False)
664 requirements |= _readrequires(storevfs, False)
668 elif shared:
665 elif shared:
669 sourcerequires = _readrequires(sharedvfs, False)
666 sourcerequires = _readrequires(sharedvfs, False)
670 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
667 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
671 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
668 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
672 mismatch_warn = ui.configbool(
669 mismatch_warn = ui.configbool(
673 b'share', b'safe-mismatch.source-safe.warn'
670 b'share', b'safe-mismatch.source-safe.warn'
674 )
671 )
675 mismatch_verbose_upgrade = ui.configbool(
672 mismatch_verbose_upgrade = ui.configbool(
676 b'share', b'safe-mismatch.source-safe:verbose-upgrade'
673 b'share', b'safe-mismatch.source-safe:verbose-upgrade'
677 )
674 )
678 if mismatch_config in (
675 if mismatch_config in (
679 b'upgrade-allow',
676 b'upgrade-allow',
680 b'allow',
677 b'allow',
681 b'upgrade-abort',
678 b'upgrade-abort',
682 ):
679 ):
683 # prevent cyclic import localrepo -> upgrade -> localrepo
680 # prevent cyclic import localrepo -> upgrade -> localrepo
684 from . import upgrade
681 from . import upgrade
685
682
686 upgrade.upgrade_share_to_safe(
683 upgrade.upgrade_share_to_safe(
687 ui,
684 ui,
688 hgvfs,
685 hgvfs,
689 storevfs,
686 storevfs,
690 requirements,
687 requirements,
691 mismatch_config,
688 mismatch_config,
692 mismatch_warn,
689 mismatch_warn,
693 mismatch_verbose_upgrade,
690 mismatch_verbose_upgrade,
694 )
691 )
695 elif mismatch_config == b'abort':
692 elif mismatch_config == b'abort':
696 raise error.Abort(
693 raise error.Abort(
697 _(
694 _(
698 b'version mismatch: source uses share-safe'
695 b'version mismatch: source uses share-safe'
699 b' functionality while the current share does not'
696 b' functionality while the current share does not'
700 ),
697 ),
701 hint=hint,
698 hint=hint,
702 )
699 )
703 else:
700 else:
704 raise error.Abort(
701 raise error.Abort(
705 _(
702 _(
706 b"share-safe mismatch with source.\nUnrecognized"
703 b"share-safe mismatch with source.\nUnrecognized"
707 b" value '%s' of `share.safe-mismatch.source-safe` set."
704 b" value '%s' of `share.safe-mismatch.source-safe` set."
708 )
705 )
709 % mismatch_config,
706 % mismatch_config,
710 hint=hint,
707 hint=hint,
711 )
708 )
712
709
713 # The .hg/hgrc file may load extensions or contain config options
710 # The .hg/hgrc file may load extensions or contain config options
714 # that influence repository construction. Attempt to load it and
711 # that influence repository construction. Attempt to load it and
715 # process any new extensions that it may have pulled in.
712 # process any new extensions that it may have pulled in.
716 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
713 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
717 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
714 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
718 extensions.loadall(ui)
715 extensions.loadall(ui)
719 extensions.populateui(ui)
716 extensions.populateui(ui)
720
717
721 # Set of module names of extensions loaded for this repository.
718 # Set of module names of extensions loaded for this repository.
722 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
719 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
723
720
724 supportedrequirements = gathersupportedrequirements(ui)
721 supportedrequirements = gathersupportedrequirements(ui)
725
722
726 # We first validate the requirements are known.
723 # We first validate the requirements are known.
727 ensurerequirementsrecognized(requirements, supportedrequirements)
724 ensurerequirementsrecognized(requirements, supportedrequirements)
728
725
729 # Then we validate that the known set is reasonable to use together.
726 # Then we validate that the known set is reasonable to use together.
730 ensurerequirementscompatible(ui, requirements)
727 ensurerequirementscompatible(ui, requirements)
731
728
732 # TODO there are unhandled edge cases related to opening repositories with
729 # TODO there are unhandled edge cases related to opening repositories with
733 # shared storage. If storage is shared, we should also test for requirements
730 # shared storage. If storage is shared, we should also test for requirements
734 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
731 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
735 # that repo, as that repo may load extensions needed to open it. This is a
732 # that repo, as that repo may load extensions needed to open it. This is a
736 # bit complicated because we don't want the other hgrc to overwrite settings
733 # bit complicated because we don't want the other hgrc to overwrite settings
737 # in this hgrc.
734 # in this hgrc.
738 #
735 #
739 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
736 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
740 # file when sharing repos. But if a requirement is added after the share is
737 # file when sharing repos. But if a requirement is added after the share is
741 # performed, thereby introducing a new requirement for the opener, we may
738 # performed, thereby introducing a new requirement for the opener, we may
742 # will not see that and could encounter a run-time error interacting with
739 # will not see that and could encounter a run-time error interacting with
743 # that shared store since it has an unknown-to-us requirement.
740 # that shared store since it has an unknown-to-us requirement.
744
741
745 # At this point, we know we should be capable of opening the repository.
742 # At this point, we know we should be capable of opening the repository.
746 # Now get on with doing that.
743 # Now get on with doing that.
747
744
748 features = set()
745 features = set()
749
746
750 # The "store" part of the repository holds versioned data. How it is
747 # The "store" part of the repository holds versioned data. How it is
751 # accessed is determined by various requirements. If `shared` or
748 # accessed is determined by various requirements. If `shared` or
752 # `relshared` requirements are present, this indicates current repository
749 # `relshared` requirements are present, this indicates current repository
753 # is a share and store exists in path mentioned in `.hg/sharedpath`
750 # is a share and store exists in path mentioned in `.hg/sharedpath`
754 if shared:
751 if shared:
755 storebasepath = sharedvfs.base
752 storebasepath = sharedvfs.base
756 cachepath = sharedvfs.join(b'cache')
753 cachepath = sharedvfs.join(b'cache')
757 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
754 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
758 else:
755 else:
759 storebasepath = hgvfs.base
756 storebasepath = hgvfs.base
760 cachepath = hgvfs.join(b'cache')
757 cachepath = hgvfs.join(b'cache')
761 wcachepath = hgvfs.join(b'wcache')
758 wcachepath = hgvfs.join(b'wcache')
762
759
763 # The store has changed over time and the exact layout is dictated by
760 # The store has changed over time and the exact layout is dictated by
764 # requirements. The store interface abstracts differences across all
761 # requirements. The store interface abstracts differences across all
765 # of them.
762 # of them.
766 store = makestore(
763 store = makestore(
767 requirements,
764 requirements,
768 storebasepath,
765 storebasepath,
769 lambda base: vfsmod.vfs(base, cacheaudited=True),
766 lambda base: vfsmod.vfs(base, cacheaudited=True),
770 )
767 )
771 hgvfs.createmode = store.createmode
768 hgvfs.createmode = store.createmode
772
769
773 storevfs = store.vfs
770 storevfs = store.vfs
774 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
771 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
775
772
776 if (
773 if (
777 requirementsmod.REVLOGV2_REQUIREMENT in requirements
774 requirementsmod.REVLOGV2_REQUIREMENT in requirements
778 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
775 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
779 ):
776 ):
780 features.add(repository.REPO_FEATURE_SIDE_DATA)
777 features.add(repository.REPO_FEATURE_SIDE_DATA)
781 # the revlogv2 docket introduced race condition that we need to fix
778 # the revlogv2 docket introduced race condition that we need to fix
782 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
779 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
783
780
784 # The cache vfs is used to manage cache files.
781 # The cache vfs is used to manage cache files.
785 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
782 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
786 cachevfs.createmode = store.createmode
783 cachevfs.createmode = store.createmode
787 # The cache vfs is used to manage cache files related to the working copy
784 # The cache vfs is used to manage cache files related to the working copy
788 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
785 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
789 wcachevfs.createmode = store.createmode
786 wcachevfs.createmode = store.createmode
790
787
791 # Now resolve the type for the repository object. We do this by repeatedly
788 # Now resolve the type for the repository object. We do this by repeatedly
792 # calling a factory function to produces types for specific aspects of the
789 # calling a factory function to produces types for specific aspects of the
793 # repo's operation. The aggregate returned types are used as base classes
790 # repo's operation. The aggregate returned types are used as base classes
794 # for a dynamically-derived type, which will represent our new repository.
791 # for a dynamically-derived type, which will represent our new repository.
795
792
796 bases = []
793 bases = []
797 extrastate = {}
794 extrastate = {}
798
795
799 for iface, fn in REPO_INTERFACES:
796 for iface, fn in REPO_INTERFACES:
800 # We pass all potentially useful state to give extensions tons of
797 # We pass all potentially useful state to give extensions tons of
801 # flexibility.
798 # flexibility.
802 typ = fn()(
799 typ = fn()(
803 ui=ui,
800 ui=ui,
804 intents=intents,
801 intents=intents,
805 requirements=requirements,
802 requirements=requirements,
806 features=features,
803 features=features,
807 wdirvfs=wdirvfs,
804 wdirvfs=wdirvfs,
808 hgvfs=hgvfs,
805 hgvfs=hgvfs,
809 store=store,
806 store=store,
810 storevfs=storevfs,
807 storevfs=storevfs,
811 storeoptions=storevfs.options,
808 storeoptions=storevfs.options,
812 cachevfs=cachevfs,
809 cachevfs=cachevfs,
813 wcachevfs=wcachevfs,
810 wcachevfs=wcachevfs,
814 extensionmodulenames=extensionmodulenames,
811 extensionmodulenames=extensionmodulenames,
815 extrastate=extrastate,
812 extrastate=extrastate,
816 baseclasses=bases,
813 baseclasses=bases,
817 )
814 )
818
815
819 if not isinstance(typ, type):
816 if not isinstance(typ, type):
820 raise error.ProgrammingError(
817 raise error.ProgrammingError(
821 b'unable to construct type for %s' % iface
818 b'unable to construct type for %s' % iface
822 )
819 )
823
820
824 bases.append(typ)
821 bases.append(typ)
825
822
826 # type() allows you to use characters in type names that wouldn't be
823 # type() allows you to use characters in type names that wouldn't be
827 # recognized as Python symbols in source code. We abuse that to add
824 # recognized as Python symbols in source code. We abuse that to add
828 # rich information about our constructed repo.
825 # rich information about our constructed repo.
829 name = pycompat.sysstr(
826 name = pycompat.sysstr(
830 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
827 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
831 )
828 )
832
829
833 cls = type(name, tuple(bases), {})
830 cls = type(name, tuple(bases), {})
834
831
835 return cls(
832 return cls(
836 baseui=baseui,
833 baseui=baseui,
837 ui=ui,
834 ui=ui,
838 origroot=path,
835 origroot=path,
839 wdirvfs=wdirvfs,
836 wdirvfs=wdirvfs,
840 hgvfs=hgvfs,
837 hgvfs=hgvfs,
841 requirements=requirements,
838 requirements=requirements,
842 supportedrequirements=supportedrequirements,
839 supportedrequirements=supportedrequirements,
843 sharedpath=storebasepath,
840 sharedpath=storebasepath,
844 store=store,
841 store=store,
845 cachevfs=cachevfs,
842 cachevfs=cachevfs,
846 wcachevfs=wcachevfs,
843 wcachevfs=wcachevfs,
847 features=features,
844 features=features,
848 intents=intents,
845 intents=intents,
849 )
846 )
850
847
851
848
852 def loadhgrc(
849 def loadhgrc(
853 ui,
850 ui,
854 wdirvfs: vfsmod.vfs,
851 wdirvfs: vfsmod.vfs,
855 hgvfs: vfsmod.vfs,
852 hgvfs: vfsmod.vfs,
856 requirements,
853 requirements,
857 sharedvfs: Optional[vfsmod.vfs] = None,
854 sharedvfs: Optional[vfsmod.vfs] = None,
858 ):
855 ):
859 """Load hgrc files/content into a ui instance.
856 """Load hgrc files/content into a ui instance.
860
857
861 This is called during repository opening to load any additional
858 This is called during repository opening to load any additional
862 config files or settings relevant to the current repository.
859 config files or settings relevant to the current repository.
863
860
864 Returns a bool indicating whether any additional configs were loaded.
861 Returns a bool indicating whether any additional configs were loaded.
865
862
866 Extensions should monkeypatch this function to modify how per-repo
863 Extensions should monkeypatch this function to modify how per-repo
867 configs are loaded. For example, an extension may wish to pull in
864 configs are loaded. For example, an extension may wish to pull in
868 configs from alternate files or sources.
865 configs from alternate files or sources.
869
866
870 sharedvfs is vfs object pointing to source repo if the current one is a
867 sharedvfs is vfs object pointing to source repo if the current one is a
871 shared one
868 shared one
872 """
869 """
873 if not rcutil.use_repo_hgrc():
870 if not rcutil.use_repo_hgrc():
874 return False
871 return False
875
872
876 ret = False
873 ret = False
877 # first load config from shared source if we has to
874 # first load config from shared source if we has to
878 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
875 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
879 try:
876 try:
880 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
877 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
881 ret = True
878 ret = True
882 except IOError:
879 except IOError:
883 pass
880 pass
884
881
885 try:
882 try:
886 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
883 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
887 ret = True
884 ret = True
888 except IOError:
885 except IOError:
889 pass
886 pass
890
887
891 try:
888 try:
892 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
889 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
893 ret = True
890 ret = True
894 except IOError:
891 except IOError:
895 pass
892 pass
896
893
897 return ret
894 return ret
898
895
899
896
900 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
897 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
901 """Perform additional actions after .hg/hgrc is loaded.
898 """Perform additional actions after .hg/hgrc is loaded.
902
899
903 This function is called during repository loading immediately after
900 This function is called during repository loading immediately after
904 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
901 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
905
902
906 The function can be used to validate configs, automatically add
903 The function can be used to validate configs, automatically add
907 options (including extensions) based on requirements, etc.
904 options (including extensions) based on requirements, etc.
908 """
905 """
909
906
910 # Map of requirements to list of extensions to load automatically when
907 # Map of requirements to list of extensions to load automatically when
911 # requirement is present.
908 # requirement is present.
912 autoextensions = {
909 autoextensions = {
913 b'git': [b'git'],
910 b'git': [b'git'],
914 b'largefiles': [b'largefiles'],
911 b'largefiles': [b'largefiles'],
915 b'lfs': [b'lfs'],
912 b'lfs': [b'lfs'],
916 }
913 }
917
914
918 for requirement, names in sorted(autoextensions.items()):
915 for requirement, names in sorted(autoextensions.items()):
919 if requirement not in requirements:
916 if requirement not in requirements:
920 continue
917 continue
921
918
922 for name in names:
919 for name in names:
923 if not ui.hasconfig(b'extensions', name):
920 if not ui.hasconfig(b'extensions', name):
924 ui.setconfig(b'extensions', name, b'', source=b'autoload')
921 ui.setconfig(b'extensions', name, b'', source=b'autoload')
925
922
926
923
927 def gathersupportedrequirements(ui):
924 def gathersupportedrequirements(ui):
928 """Determine the complete set of recognized requirements."""
925 """Determine the complete set of recognized requirements."""
929 # Start with all requirements supported by this file.
926 # Start with all requirements supported by this file.
930 supported = set(localrepository._basesupported)
927 supported = set(localrepository._basesupported)
931
928
932 # Execute ``featuresetupfuncs`` entries if they belong to an extension
929 # Execute ``featuresetupfuncs`` entries if they belong to an extension
933 # relevant to this ui instance.
930 # relevant to this ui instance.
934 modules = {m.__name__ for n, m in extensions.extensions(ui)}
931 modules = {m.__name__ for n, m in extensions.extensions(ui)}
935
932
936 for fn in featuresetupfuncs:
933 for fn in featuresetupfuncs:
937 if fn.__module__ in modules:
934 if fn.__module__ in modules:
938 fn(ui, supported)
935 fn(ui, supported)
939
936
940 # Add derived requirements from registered compression engines.
937 # Add derived requirements from registered compression engines.
941 for name in util.compengines:
938 for name in util.compengines:
942 engine = util.compengines[name]
939 engine = util.compengines[name]
943 if engine.available() and engine.revlogheader():
940 if engine.available() and engine.revlogheader():
944 supported.add(b'exp-compression-%s' % name)
941 supported.add(b'exp-compression-%s' % name)
945 if engine.name() == b'zstd':
942 if engine.name() == b'zstd':
946 supported.add(requirementsmod.REVLOG_COMPRESSION_ZSTD)
943 supported.add(requirementsmod.REVLOG_COMPRESSION_ZSTD)
947
944
948 return supported
945 return supported
949
946
950
947
951 def ensurerequirementsrecognized(requirements, supported):
948 def ensurerequirementsrecognized(requirements, supported):
952 """Validate that a set of local requirements is recognized.
949 """Validate that a set of local requirements is recognized.
953
950
954 Receives a set of requirements. Raises an ``error.RepoError`` if there
951 Receives a set of requirements. Raises an ``error.RepoError`` if there
955 exists any requirement in that set that currently loaded code doesn't
952 exists any requirement in that set that currently loaded code doesn't
956 recognize.
953 recognize.
957
954
958 Returns a set of supported requirements.
955 Returns a set of supported requirements.
959 """
956 """
960 missing = set()
957 missing = set()
961
958
962 for requirement in requirements:
959 for requirement in requirements:
963 if requirement in supported:
960 if requirement in supported:
964 continue
961 continue
965
962
966 if not requirement or not requirement[0:1].isalnum():
963 if not requirement or not requirement[0:1].isalnum():
967 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
964 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
968
965
969 missing.add(requirement)
966 missing.add(requirement)
970
967
971 if missing:
968 if missing:
972 raise error.RequirementError(
969 raise error.RequirementError(
973 _(b'repository requires features unknown to this Mercurial: %s')
970 _(b'repository requires features unknown to this Mercurial: %s')
974 % b' '.join(sorted(missing)),
971 % b' '.join(sorted(missing)),
975 hint=_(
972 hint=_(
976 b'see https://mercurial-scm.org/wiki/MissingRequirement '
973 b'see https://mercurial-scm.org/wiki/MissingRequirement '
977 b'for more information'
974 b'for more information'
978 ),
975 ),
979 )
976 )
980
977
981
978
982 def ensurerequirementscompatible(ui, requirements):
979 def ensurerequirementscompatible(ui, requirements):
983 """Validates that a set of recognized requirements is mutually compatible.
980 """Validates that a set of recognized requirements is mutually compatible.
984
981
985 Some requirements may not be compatible with others or require
982 Some requirements may not be compatible with others or require
986 config options that aren't enabled. This function is called during
983 config options that aren't enabled. This function is called during
987 repository opening to ensure that the set of requirements needed
984 repository opening to ensure that the set of requirements needed
988 to open a repository is sane and compatible with config options.
985 to open a repository is sane and compatible with config options.
989
986
990 Extensions can monkeypatch this function to perform additional
987 Extensions can monkeypatch this function to perform additional
991 checking.
988 checking.
992
989
993 ``error.RepoError`` should be raised on failure.
990 ``error.RepoError`` should be raised on failure.
994 """
991 """
995 if (
992 if (
996 requirementsmod.SPARSE_REQUIREMENT in requirements
993 requirementsmod.SPARSE_REQUIREMENT in requirements
997 and not sparse.enabled
994 and not sparse.enabled
998 ):
995 ):
999 raise error.RepoError(
996 raise error.RepoError(
1000 _(
997 _(
1001 b'repository is using sparse feature but '
998 b'repository is using sparse feature but '
1002 b'sparse is not enabled; enable the '
999 b'sparse is not enabled; enable the '
1003 b'"sparse" extensions to access'
1000 b'"sparse" extensions to access'
1004 )
1001 )
1005 )
1002 )
1006
1003
1007
1004
1008 def makestore(requirements, path, vfstype):
1005 def makestore(requirements, path, vfstype):
1009 """Construct a storage object for a repository."""
1006 """Construct a storage object for a repository."""
1010 if requirementsmod.STORE_REQUIREMENT in requirements:
1007 if requirementsmod.STORE_REQUIREMENT in requirements:
1011 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
1008 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
1012 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1009 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1013 return storemod.fncachestore(path, vfstype, dotencode)
1010 return storemod.fncachestore(path, vfstype, dotencode)
1014
1011
1015 return storemod.encodedstore(path, vfstype)
1012 return storemod.encodedstore(path, vfstype)
1016
1013
1017 return storemod.basicstore(path, vfstype)
1014 return storemod.basicstore(path, vfstype)
1018
1015
1019
1016
1020 def resolvestorevfsoptions(ui, requirements, features):
1017 def resolvestorevfsoptions(ui, requirements, features):
1021 """Resolve the options to pass to the store vfs opener.
1018 """Resolve the options to pass to the store vfs opener.
1022
1019
1023 The returned dict is used to influence behavior of the storage layer.
1020 The returned dict is used to influence behavior of the storage layer.
1024 """
1021 """
1025 options = {}
1022 options = {}
1026
1023
1027 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1024 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1028 options[b'treemanifest'] = True
1025 options[b'treemanifest'] = True
1029
1026
1030 # experimental config: format.manifestcachesize
1027 # experimental config: format.manifestcachesize
1031 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1028 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1032 if manifestcachesize is not None:
1029 if manifestcachesize is not None:
1033 options[b'manifestcachesize'] = manifestcachesize
1030 options[b'manifestcachesize'] = manifestcachesize
1034
1031
1035 # In the absence of another requirement superseding a revlog-related
1032 # In the absence of another requirement superseding a revlog-related
1036 # requirement, we have to assume the repo is using revlog version 0.
1033 # requirement, we have to assume the repo is using revlog version 0.
1037 # This revlog format is super old and we don't bother trying to parse
1034 # This revlog format is super old and we don't bother trying to parse
1038 # opener options for it because those options wouldn't do anything
1035 # opener options for it because those options wouldn't do anything
1039 # meaningful on such old repos.
1036 # meaningful on such old repos.
1040 if (
1037 if (
1041 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1038 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1042 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1039 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1043 ):
1040 ):
1044 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1041 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1045 else: # explicitly mark repo as using revlogv0
1042 else: # explicitly mark repo as using revlogv0
1046 options[b'revlogv0'] = True
1043 options[b'revlogv0'] = True
1047
1044
1048 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1045 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1049 options[b'copies-storage'] = b'changeset-sidedata'
1046 options[b'copies-storage'] = b'changeset-sidedata'
1050 else:
1047 else:
1051 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1048 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1052 copiesextramode = (b'changeset-only', b'compatibility')
1049 copiesextramode = (b'changeset-only', b'compatibility')
1053 if writecopiesto in copiesextramode:
1050 if writecopiesto in copiesextramode:
1054 options[b'copies-storage'] = b'extra'
1051 options[b'copies-storage'] = b'extra'
1055
1052
1056 return options
1053 return options
1057
1054
1058
1055
1059 def resolverevlogstorevfsoptions(ui, requirements, features):
1056 def resolverevlogstorevfsoptions(ui, requirements, features):
1060 """Resolve opener options specific to revlogs."""
1057 """Resolve opener options specific to revlogs."""
1061
1058
1062 options = {}
1059 options = {}
1063 options[b'flagprocessors'] = {}
1060 options[b'flagprocessors'] = {}
1064
1061
1065 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1062 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1066 options[b'revlogv1'] = True
1063 options[b'revlogv1'] = True
1067 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1064 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1068 options[b'revlogv2'] = True
1065 options[b'revlogv2'] = True
1069 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1066 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1070 options[b'changelogv2'] = True
1067 options[b'changelogv2'] = True
1071 cmp_rank = ui.configbool(b'experimental', b'changelog-v2.compute-rank')
1068 cmp_rank = ui.configbool(b'experimental', b'changelog-v2.compute-rank')
1072 options[b'changelogv2.compute-rank'] = cmp_rank
1069 options[b'changelogv2.compute-rank'] = cmp_rank
1073
1070
1074 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1071 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1075 options[b'generaldelta'] = True
1072 options[b'generaldelta'] = True
1076
1073
1077 # experimental config: format.chunkcachesize
1074 # experimental config: format.chunkcachesize
1078 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1075 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1079 if chunkcachesize is not None:
1076 if chunkcachesize is not None:
1080 options[b'chunkcachesize'] = chunkcachesize
1077 options[b'chunkcachesize'] = chunkcachesize
1081
1078
1082 deltabothparents = ui.configbool(
1079 deltabothparents = ui.configbool(
1083 b'storage', b'revlog.optimize-delta-parent-choice'
1080 b'storage', b'revlog.optimize-delta-parent-choice'
1084 )
1081 )
1085 options[b'deltabothparents'] = deltabothparents
1082 options[b'deltabothparents'] = deltabothparents
1086 dps_cgds = ui.configint(
1083 dps_cgds = ui.configint(
1087 b'storage',
1084 b'storage',
1088 b'revlog.delta-parent-search.candidate-group-chunk-size',
1085 b'revlog.delta-parent-search.candidate-group-chunk-size',
1089 )
1086 )
1090 options[b'delta-parent-search.candidate-group-chunk-size'] = dps_cgds
1087 options[b'delta-parent-search.candidate-group-chunk-size'] = dps_cgds
1091 options[b'debug-delta'] = ui.configbool(b'debug', b'revlog.debug-delta')
1088 options[b'debug-delta'] = ui.configbool(b'debug', b'revlog.debug-delta')
1092
1089
1093 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1090 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1094 options[b'issue6528.fix-incoming'] = issue6528
1091 options[b'issue6528.fix-incoming'] = issue6528
1095
1092
1096 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1093 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1097 lazydeltabase = False
1094 lazydeltabase = False
1098 if lazydelta:
1095 if lazydelta:
1099 lazydeltabase = ui.configbool(
1096 lazydeltabase = ui.configbool(
1100 b'storage', b'revlog.reuse-external-delta-parent'
1097 b'storage', b'revlog.reuse-external-delta-parent'
1101 )
1098 )
1102 if lazydeltabase is None:
1099 if lazydeltabase is None:
1103 lazydeltabase = not scmutil.gddeltaconfig(ui)
1100 lazydeltabase = not scmutil.gddeltaconfig(ui)
1104 options[b'lazydelta'] = lazydelta
1101 options[b'lazydelta'] = lazydelta
1105 options[b'lazydeltabase'] = lazydeltabase
1102 options[b'lazydeltabase'] = lazydeltabase
1106
1103
1107 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1104 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1108 if 0 <= chainspan:
1105 if 0 <= chainspan:
1109 options[b'maxdeltachainspan'] = chainspan
1106 options[b'maxdeltachainspan'] = chainspan
1110
1107
1111 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1108 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1112 if mmapindexthreshold is not None:
1109 if mmapindexthreshold is not None:
1113 options[b'mmapindexthreshold'] = mmapindexthreshold
1110 options[b'mmapindexthreshold'] = mmapindexthreshold
1114
1111
1115 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1112 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1116 srdensitythres = float(
1113 srdensitythres = float(
1117 ui.config(b'experimental', b'sparse-read.density-threshold')
1114 ui.config(b'experimental', b'sparse-read.density-threshold')
1118 )
1115 )
1119 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1116 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1120 options[b'with-sparse-read'] = withsparseread
1117 options[b'with-sparse-read'] = withsparseread
1121 options[b'sparse-read-density-threshold'] = srdensitythres
1118 options[b'sparse-read-density-threshold'] = srdensitythres
1122 options[b'sparse-read-min-gap-size'] = srmingapsize
1119 options[b'sparse-read-min-gap-size'] = srmingapsize
1123
1120
1124 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1121 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1125 options[b'sparse-revlog'] = sparserevlog
1122 options[b'sparse-revlog'] = sparserevlog
1126 if sparserevlog:
1123 if sparserevlog:
1127 options[b'generaldelta'] = True
1124 options[b'generaldelta'] = True
1128
1125
1129 maxchainlen = None
1126 maxchainlen = None
1130 if sparserevlog:
1127 if sparserevlog:
1131 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1128 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1132 # experimental config: format.maxchainlen
1129 # experimental config: format.maxchainlen
1133 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1130 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1134 if maxchainlen is not None:
1131 if maxchainlen is not None:
1135 options[b'maxchainlen'] = maxchainlen
1132 options[b'maxchainlen'] = maxchainlen
1136
1133
1137 for r in requirements:
1134 for r in requirements:
1138 # we allow multiple compression engine requirement to co-exist because
1135 # we allow multiple compression engine requirement to co-exist because
1139 # strickly speaking, revlog seems to support mixed compression style.
1136 # strickly speaking, revlog seems to support mixed compression style.
1140 #
1137 #
1141 # The compression used for new entries will be "the last one"
1138 # The compression used for new entries will be "the last one"
1142 prefix = r.startswith
1139 prefix = r.startswith
1143 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1140 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1144 options[b'compengine'] = r.split(b'-', 2)[2]
1141 options[b'compengine'] = r.split(b'-', 2)[2]
1145
1142
1146 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1143 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1147 if options[b'zlib.level'] is not None:
1144 if options[b'zlib.level'] is not None:
1148 if not (0 <= options[b'zlib.level'] <= 9):
1145 if not (0 <= options[b'zlib.level'] <= 9):
1149 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1146 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1150 raise error.Abort(msg % options[b'zlib.level'])
1147 raise error.Abort(msg % options[b'zlib.level'])
1151 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1148 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1152 if options[b'zstd.level'] is not None:
1149 if options[b'zstd.level'] is not None:
1153 if not (0 <= options[b'zstd.level'] <= 22):
1150 if not (0 <= options[b'zstd.level'] <= 22):
1154 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1151 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1155 raise error.Abort(msg % options[b'zstd.level'])
1152 raise error.Abort(msg % options[b'zstd.level'])
1156
1153
1157 if requirementsmod.NARROW_REQUIREMENT in requirements:
1154 if requirementsmod.NARROW_REQUIREMENT in requirements:
1158 options[b'enableellipsis'] = True
1155 options[b'enableellipsis'] = True
1159
1156
1160 if ui.configbool(b'experimental', b'rust.index'):
1157 if ui.configbool(b'experimental', b'rust.index'):
1161 options[b'rust.index'] = True
1158 options[b'rust.index'] = True
1162 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1159 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1163 slow_path = ui.config(
1160 slow_path = ui.config(
1164 b'storage', b'revlog.persistent-nodemap.slow-path'
1161 b'storage', b'revlog.persistent-nodemap.slow-path'
1165 )
1162 )
1166 if slow_path not in (b'allow', b'warn', b'abort'):
1163 if slow_path not in (b'allow', b'warn', b'abort'):
1167 default = ui.config_default(
1164 default = ui.config_default(
1168 b'storage', b'revlog.persistent-nodemap.slow-path'
1165 b'storage', b'revlog.persistent-nodemap.slow-path'
1169 )
1166 )
1170 msg = _(
1167 msg = _(
1171 b'unknown value for config '
1168 b'unknown value for config '
1172 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1169 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1173 )
1170 )
1174 ui.warn(msg % slow_path)
1171 ui.warn(msg % slow_path)
1175 if not ui.quiet:
1172 if not ui.quiet:
1176 ui.warn(_(b'falling back to default value: %s\n') % default)
1173 ui.warn(_(b'falling back to default value: %s\n') % default)
1177 slow_path = default
1174 slow_path = default
1178
1175
1179 msg = _(
1176 msg = _(
1180 b"accessing `persistent-nodemap` repository without associated "
1177 b"accessing `persistent-nodemap` repository without associated "
1181 b"fast implementation."
1178 b"fast implementation."
1182 )
1179 )
1183 hint = _(
1180 hint = _(
1184 b"check `hg help config.format.use-persistent-nodemap` "
1181 b"check `hg help config.format.use-persistent-nodemap` "
1185 b"for details"
1182 b"for details"
1186 )
1183 )
1187 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1184 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1188 if slow_path == b'warn':
1185 if slow_path == b'warn':
1189 msg = b"warning: " + msg + b'\n'
1186 msg = b"warning: " + msg + b'\n'
1190 ui.warn(msg)
1187 ui.warn(msg)
1191 if not ui.quiet:
1188 if not ui.quiet:
1192 hint = b'(' + hint + b')\n'
1189 hint = b'(' + hint + b')\n'
1193 ui.warn(hint)
1190 ui.warn(hint)
1194 if slow_path == b'abort':
1191 if slow_path == b'abort':
1195 raise error.Abort(msg, hint=hint)
1192 raise error.Abort(msg, hint=hint)
1196 options[b'persistent-nodemap'] = True
1193 options[b'persistent-nodemap'] = True
1197 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1194 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1198 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1195 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1199 if slow_path not in (b'allow', b'warn', b'abort'):
1196 if slow_path not in (b'allow', b'warn', b'abort'):
1200 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1197 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1201 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1198 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1202 ui.warn(msg % slow_path)
1199 ui.warn(msg % slow_path)
1203 if not ui.quiet:
1200 if not ui.quiet:
1204 ui.warn(_(b'falling back to default value: %s\n') % default)
1201 ui.warn(_(b'falling back to default value: %s\n') % default)
1205 slow_path = default
1202 slow_path = default
1206
1203
1207 msg = _(
1204 msg = _(
1208 b"accessing `dirstate-v2` repository without associated "
1205 b"accessing `dirstate-v2` repository without associated "
1209 b"fast implementation."
1206 b"fast implementation."
1210 )
1207 )
1211 hint = _(
1208 hint = _(
1212 b"check `hg help config.format.use-dirstate-v2` " b"for details"
1209 b"check `hg help config.format.use-dirstate-v2` " b"for details"
1213 )
1210 )
1214 if not dirstate.HAS_FAST_DIRSTATE_V2:
1211 if not dirstate.HAS_FAST_DIRSTATE_V2:
1215 if slow_path == b'warn':
1212 if slow_path == b'warn':
1216 msg = b"warning: " + msg + b'\n'
1213 msg = b"warning: " + msg + b'\n'
1217 ui.warn(msg)
1214 ui.warn(msg)
1218 if not ui.quiet:
1215 if not ui.quiet:
1219 hint = b'(' + hint + b')\n'
1216 hint = b'(' + hint + b')\n'
1220 ui.warn(hint)
1217 ui.warn(hint)
1221 if slow_path == b'abort':
1218 if slow_path == b'abort':
1222 raise error.Abort(msg, hint=hint)
1219 raise error.Abort(msg, hint=hint)
1223 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1220 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1224 options[b'persistent-nodemap.mmap'] = True
1221 options[b'persistent-nodemap.mmap'] = True
1225 if ui.configbool(b'devel', b'persistent-nodemap'):
1222 if ui.configbool(b'devel', b'persistent-nodemap'):
1226 options[b'devel-force-nodemap'] = True
1223 options[b'devel-force-nodemap'] = True
1227
1224
1228 return options
1225 return options
1229
1226
1230
1227
1231 def makemain(**kwargs):
1228 def makemain(**kwargs):
1232 """Produce a type conforming to ``ilocalrepositorymain``."""
1229 """Produce a type conforming to ``ilocalrepositorymain``."""
1233 return localrepository
1230 return localrepository
1234
1231
1235
1232
1236 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1233 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1237 class revlogfilestorage:
1234 class revlogfilestorage:
1238 """File storage when using revlogs."""
1235 """File storage when using revlogs."""
1239
1236
1240 def file(self, path):
1237 def file(self, path):
1241 if path.startswith(b'/'):
1238 if path.startswith(b'/'):
1242 path = path[1:]
1239 path = path[1:]
1243
1240
1244 return filelog.filelog(self.svfs, path)
1241 return filelog.filelog(self.svfs, path)
1245
1242
1246
1243
1247 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1244 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1248 class revlognarrowfilestorage:
1245 class revlognarrowfilestorage:
1249 """File storage when using revlogs and narrow files."""
1246 """File storage when using revlogs and narrow files."""
1250
1247
1251 def file(self, path):
1248 def file(self, path):
1252 if path.startswith(b'/'):
1249 if path.startswith(b'/'):
1253 path = path[1:]
1250 path = path[1:]
1254
1251
1255 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1252 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1256
1253
1257
1254
1258 def makefilestorage(requirements, features, **kwargs):
1255 def makefilestorage(requirements, features, **kwargs):
1259 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1256 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1260 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1257 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1261 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1258 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1262
1259
1263 if requirementsmod.NARROW_REQUIREMENT in requirements:
1260 if requirementsmod.NARROW_REQUIREMENT in requirements:
1264 return revlognarrowfilestorage
1261 return revlognarrowfilestorage
1265 else:
1262 else:
1266 return revlogfilestorage
1263 return revlogfilestorage
1267
1264
1268
1265
1269 # List of repository interfaces and factory functions for them. Each
1266 # List of repository interfaces and factory functions for them. Each
1270 # will be called in order during ``makelocalrepository()`` to iteratively
1267 # will be called in order during ``makelocalrepository()`` to iteratively
1271 # derive the final type for a local repository instance. We capture the
1268 # derive the final type for a local repository instance. We capture the
1272 # function as a lambda so we don't hold a reference and the module-level
1269 # function as a lambda so we don't hold a reference and the module-level
1273 # functions can be wrapped.
1270 # functions can be wrapped.
1274 REPO_INTERFACES = [
1271 REPO_INTERFACES = [
1275 (repository.ilocalrepositorymain, lambda: makemain),
1272 (repository.ilocalrepositorymain, lambda: makemain),
1276 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1273 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1277 ]
1274 ]
1278
1275
1279
1276
1280 @interfaceutil.implementer(repository.ilocalrepositorymain)
1277 @interfaceutil.implementer(repository.ilocalrepositorymain)
1281 class localrepository:
1278 class localrepository:
1282 """Main class for representing local repositories.
1279 """Main class for representing local repositories.
1283
1280
1284 All local repositories are instances of this class.
1281 All local repositories are instances of this class.
1285
1282
1286 Constructed on its own, instances of this class are not usable as
1283 Constructed on its own, instances of this class are not usable as
1287 repository objects. To obtain a usable repository object, call
1284 repository objects. To obtain a usable repository object, call
1288 ``hg.repository()``, ``localrepo.instance()``, or
1285 ``hg.repository()``, ``localrepo.instance()``, or
1289 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1286 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1290 ``instance()`` adds support for creating new repositories.
1287 ``instance()`` adds support for creating new repositories.
1291 ``hg.repository()`` adds more extension integration, including calling
1288 ``hg.repository()`` adds more extension integration, including calling
1292 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1289 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1293 used.
1290 used.
1294 """
1291 """
1295
1292
1296 _basesupported = {
1293 _basesupported = {
1297 requirementsmod.ARCHIVED_PHASE_REQUIREMENT,
1294 requirementsmod.ARCHIVED_PHASE_REQUIREMENT,
1298 requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT,
1295 requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT,
1299 requirementsmod.CHANGELOGV2_REQUIREMENT,
1296 requirementsmod.CHANGELOGV2_REQUIREMENT,
1300 requirementsmod.COPIESSDC_REQUIREMENT,
1297 requirementsmod.COPIESSDC_REQUIREMENT,
1301 requirementsmod.DIRSTATE_TRACKED_HINT_V1,
1298 requirementsmod.DIRSTATE_TRACKED_HINT_V1,
1302 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1299 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1303 requirementsmod.DOTENCODE_REQUIREMENT,
1300 requirementsmod.DOTENCODE_REQUIREMENT,
1304 requirementsmod.FNCACHE_REQUIREMENT,
1301 requirementsmod.FNCACHE_REQUIREMENT,
1305 requirementsmod.GENERALDELTA_REQUIREMENT,
1302 requirementsmod.GENERALDELTA_REQUIREMENT,
1306 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1303 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1307 requirementsmod.NODEMAP_REQUIREMENT,
1304 requirementsmod.NODEMAP_REQUIREMENT,
1308 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1305 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1309 requirementsmod.REVLOGV1_REQUIREMENT,
1306 requirementsmod.REVLOGV1_REQUIREMENT,
1310 requirementsmod.REVLOGV2_REQUIREMENT,
1307 requirementsmod.REVLOGV2_REQUIREMENT,
1311 requirementsmod.SHARED_REQUIREMENT,
1308 requirementsmod.SHARED_REQUIREMENT,
1312 requirementsmod.SHARESAFE_REQUIREMENT,
1309 requirementsmod.SHARESAFE_REQUIREMENT,
1313 requirementsmod.SPARSE_REQUIREMENT,
1310 requirementsmod.SPARSE_REQUIREMENT,
1314 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1311 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1315 requirementsmod.STORE_REQUIREMENT,
1312 requirementsmod.STORE_REQUIREMENT,
1316 requirementsmod.TREEMANIFEST_REQUIREMENT,
1313 requirementsmod.TREEMANIFEST_REQUIREMENT,
1317 }
1314 }
1318
1315
1319 # list of prefix for file which can be written without 'wlock'
1316 # list of prefix for file which can be written without 'wlock'
1320 # Extensions should extend this list when needed
1317 # Extensions should extend this list when needed
1321 _wlockfreeprefix = {
1318 _wlockfreeprefix = {
1322 # We migh consider requiring 'wlock' for the next
1319 # We migh consider requiring 'wlock' for the next
1323 # two, but pretty much all the existing code assume
1320 # two, but pretty much all the existing code assume
1324 # wlock is not needed so we keep them excluded for
1321 # wlock is not needed so we keep them excluded for
1325 # now.
1322 # now.
1326 b'hgrc',
1323 b'hgrc',
1327 b'requires',
1324 b'requires',
1328 # XXX cache is a complicatged business someone
1325 # XXX cache is a complicatged business someone
1329 # should investigate this in depth at some point
1326 # should investigate this in depth at some point
1330 b'cache/',
1327 b'cache/',
1331 # XXX shouldn't be dirstate covered by the wlock?
1328 # XXX shouldn't be dirstate covered by the wlock?
1332 b'dirstate',
1329 b'dirstate',
1333 # XXX bisect was still a bit too messy at the time
1330 # XXX bisect was still a bit too messy at the time
1334 # this changeset was introduced. Someone should fix
1331 # this changeset was introduced. Someone should fix
1335 # the remainig bit and drop this line
1332 # the remainig bit and drop this line
1336 b'bisect.state',
1333 b'bisect.state',
1337 }
1334 }
1338
1335
1339 def __init__(
1336 def __init__(
1340 self,
1337 self,
1341 baseui,
1338 baseui,
1342 ui,
1339 ui,
1343 origroot: bytes,
1340 origroot: bytes,
1344 wdirvfs: vfsmod.vfs,
1341 wdirvfs: vfsmod.vfs,
1345 hgvfs: vfsmod.vfs,
1342 hgvfs: vfsmod.vfs,
1346 requirements,
1343 requirements,
1347 supportedrequirements,
1344 supportedrequirements,
1348 sharedpath: bytes,
1345 sharedpath: bytes,
1349 store,
1346 store,
1350 cachevfs: vfsmod.vfs,
1347 cachevfs: vfsmod.vfs,
1351 wcachevfs: vfsmod.vfs,
1348 wcachevfs: vfsmod.vfs,
1352 features,
1349 features,
1353 intents=None,
1350 intents=None,
1354 ):
1351 ):
1355 """Create a new local repository instance.
1352 """Create a new local repository instance.
1356
1353
1357 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1354 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1358 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1355 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1359 object.
1356 object.
1360
1357
1361 Arguments:
1358 Arguments:
1362
1359
1363 baseui
1360 baseui
1364 ``ui.ui`` instance that ``ui`` argument was based off of.
1361 ``ui.ui`` instance that ``ui`` argument was based off of.
1365
1362
1366 ui
1363 ui
1367 ``ui.ui`` instance for use by the repository.
1364 ``ui.ui`` instance for use by the repository.
1368
1365
1369 origroot
1366 origroot
1370 ``bytes`` path to working directory root of this repository.
1367 ``bytes`` path to working directory root of this repository.
1371
1368
1372 wdirvfs
1369 wdirvfs
1373 ``vfs.vfs`` rooted at the working directory.
1370 ``vfs.vfs`` rooted at the working directory.
1374
1371
1375 hgvfs
1372 hgvfs
1376 ``vfs.vfs`` rooted at .hg/
1373 ``vfs.vfs`` rooted at .hg/
1377
1374
1378 requirements
1375 requirements
1379 ``set`` of bytestrings representing repository opening requirements.
1376 ``set`` of bytestrings representing repository opening requirements.
1380
1377
1381 supportedrequirements
1378 supportedrequirements
1382 ``set`` of bytestrings representing repository requirements that we
1379 ``set`` of bytestrings representing repository requirements that we
1383 know how to open. May be a supetset of ``requirements``.
1380 know how to open. May be a supetset of ``requirements``.
1384
1381
1385 sharedpath
1382 sharedpath
1386 ``bytes`` Defining path to storage base directory. Points to a
1383 ``bytes`` Defining path to storage base directory. Points to a
1387 ``.hg/`` directory somewhere.
1384 ``.hg/`` directory somewhere.
1388
1385
1389 store
1386 store
1390 ``store.basicstore`` (or derived) instance providing access to
1387 ``store.basicstore`` (or derived) instance providing access to
1391 versioned storage.
1388 versioned storage.
1392
1389
1393 cachevfs
1390 cachevfs
1394 ``vfs.vfs`` used for cache files.
1391 ``vfs.vfs`` used for cache files.
1395
1392
1396 wcachevfs
1393 wcachevfs
1397 ``vfs.vfs`` used for cache files related to the working copy.
1394 ``vfs.vfs`` used for cache files related to the working copy.
1398
1395
1399 features
1396 features
1400 ``set`` of bytestrings defining features/capabilities of this
1397 ``set`` of bytestrings defining features/capabilities of this
1401 instance.
1398 instance.
1402
1399
1403 intents
1400 intents
1404 ``set`` of system strings indicating what this repo will be used
1401 ``set`` of system strings indicating what this repo will be used
1405 for.
1402 for.
1406 """
1403 """
1407 self.baseui = baseui
1404 self.baseui = baseui
1408 self.ui = ui
1405 self.ui = ui
1409 self.origroot = origroot
1406 self.origroot = origroot
1410 # vfs rooted at working directory.
1407 # vfs rooted at working directory.
1411 self.wvfs = wdirvfs
1408 self.wvfs = wdirvfs
1412 self.root = wdirvfs.base
1409 self.root = wdirvfs.base
1413 # vfs rooted at .hg/. Used to access most non-store paths.
1410 # vfs rooted at .hg/. Used to access most non-store paths.
1414 self.vfs = hgvfs
1411 self.vfs = hgvfs
1415 self.path = hgvfs.base
1412 self.path = hgvfs.base
1416 self.requirements = requirements
1413 self.requirements = requirements
1417 self.nodeconstants = sha1nodeconstants
1414 self.nodeconstants = sha1nodeconstants
1418 self.nullid = self.nodeconstants.nullid
1415 self.nullid = self.nodeconstants.nullid
1419 self.supported = supportedrequirements
1416 self.supported = supportedrequirements
1420 self.sharedpath = sharedpath
1417 self.sharedpath = sharedpath
1421 self.store = store
1418 self.store = store
1422 self.cachevfs = cachevfs
1419 self.cachevfs = cachevfs
1423 self.wcachevfs = wcachevfs
1420 self.wcachevfs = wcachevfs
1424 self.features = features
1421 self.features = features
1425
1422
1426 self.filtername = None
1423 self.filtername = None
1427
1424
1428 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1425 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1429 b'devel', b'check-locks'
1426 b'devel', b'check-locks'
1430 ):
1427 ):
1431 self.vfs.audit = self._getvfsward(self.vfs.audit)
1428 self.vfs.audit = self._getvfsward(self.vfs.audit)
1432 # A list of callback to shape the phase if no data were found.
1429 # A list of callback to shape the phase if no data were found.
1433 # Callback are in the form: func(repo, roots) --> processed root.
1430 # Callback are in the form: func(repo, roots) --> processed root.
1434 # This list it to be filled by extension during repo setup
1431 # This list it to be filled by extension during repo setup
1435 self._phasedefaults = []
1432 self._phasedefaults = []
1436
1433
1437 color.setup(self.ui)
1434 color.setup(self.ui)
1438
1435
1439 self.spath = self.store.path
1436 self.spath = self.store.path
1440 self.svfs = self.store.vfs
1437 self.svfs = self.store.vfs
1441 self.sjoin = self.store.join
1438 self.sjoin = self.store.join
1442 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1439 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1443 b'devel', b'check-locks'
1440 b'devel', b'check-locks'
1444 ):
1441 ):
1445 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1442 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1446 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1443 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1447 else: # standard vfs
1444 else: # standard vfs
1448 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1445 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1449
1446
1450 self._dirstatevalidatewarned = False
1447 self._dirstatevalidatewarned = False
1451
1448
1452 self._branchcaches = branchmap.BranchMapCache()
1449 self._branchcaches = branchmap.BranchMapCache()
1453 self._revbranchcache = None
1450 self._revbranchcache = None
1454 self._filterpats = {}
1451 self._filterpats = {}
1455 self._datafilters = {}
1452 self._datafilters = {}
1456 self._transref = self._lockref = self._wlockref = None
1453 self._transref = self._lockref = self._wlockref = None
1457
1454
1458 # A cache for various files under .hg/ that tracks file changes,
1455 # A cache for various files under .hg/ that tracks file changes,
1459 # (used by the filecache decorator)
1456 # (used by the filecache decorator)
1460 #
1457 #
1461 # Maps a property name to its util.filecacheentry
1458 # Maps a property name to its util.filecacheentry
1462 self._filecache = {}
1459 self._filecache = {}
1463
1460
1464 # hold sets of revision to be filtered
1461 # hold sets of revision to be filtered
1465 # should be cleared when something might have changed the filter value:
1462 # should be cleared when something might have changed the filter value:
1466 # - new changesets,
1463 # - new changesets,
1467 # - phase change,
1464 # - phase change,
1468 # - new obsolescence marker,
1465 # - new obsolescence marker,
1469 # - working directory parent change,
1466 # - working directory parent change,
1470 # - bookmark changes
1467 # - bookmark changes
1471 self.filteredrevcache = {}
1468 self.filteredrevcache = {}
1472
1469
1473 # post-dirstate-status hooks
1470 # post-dirstate-status hooks
1474 self._postdsstatus = []
1471 self._postdsstatus = []
1475
1472
1476 # generic mapping between names and nodes
1473 # generic mapping between names and nodes
1477 self.names = namespaces.namespaces()
1474 self.names = namespaces.namespaces()
1478
1475
1479 # Key to signature value.
1476 # Key to signature value.
1480 self._sparsesignaturecache = {}
1477 self._sparsesignaturecache = {}
1481 # Signature to cached matcher instance.
1478 # Signature to cached matcher instance.
1482 self._sparsematchercache = {}
1479 self._sparsematchercache = {}
1483
1480
1484 self._extrafilterid = repoview.extrafilter(ui)
1481 self._extrafilterid = repoview.extrafilter(ui)
1485
1482
1486 self.filecopiesmode = None
1483 self.filecopiesmode = None
1487 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1484 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1488 self.filecopiesmode = b'changeset-sidedata'
1485 self.filecopiesmode = b'changeset-sidedata'
1489
1486
1490 self._wanted_sidedata = set()
1487 self._wanted_sidedata = set()
1491 self._sidedata_computers = {}
1488 self._sidedata_computers = {}
1492 sidedatamod.set_sidedata_spec_for_repo(self)
1489 sidedatamod.set_sidedata_spec_for_repo(self)
1493
1490
1494 def _getvfsward(self, origfunc):
1491 def _getvfsward(self, origfunc):
1495 """build a ward for self.vfs"""
1492 """build a ward for self.vfs"""
1496 rref = weakref.ref(self)
1493 rref = weakref.ref(self)
1497
1494
1498 def checkvfs(path, mode=None):
1495 def checkvfs(path, mode=None):
1499 ret = origfunc(path, mode=mode)
1496 ret = origfunc(path, mode=mode)
1500 repo = rref()
1497 repo = rref()
1501 if (
1498 if (
1502 repo is None
1499 repo is None
1503 or not util.safehasattr(repo, b'_wlockref')
1500 or not util.safehasattr(repo, b'_wlockref')
1504 or not util.safehasattr(repo, b'_lockref')
1501 or not util.safehasattr(repo, b'_lockref')
1505 ):
1502 ):
1506 return
1503 return
1507 if mode in (None, b'r', b'rb'):
1504 if mode in (None, b'r', b'rb'):
1508 return
1505 return
1509 if path.startswith(repo.path):
1506 if path.startswith(repo.path):
1510 # truncate name relative to the repository (.hg)
1507 # truncate name relative to the repository (.hg)
1511 path = path[len(repo.path) + 1 :]
1508 path = path[len(repo.path) + 1 :]
1512 if path.startswith(b'cache/'):
1509 if path.startswith(b'cache/'):
1513 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1510 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1514 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1511 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1515 # path prefixes covered by 'lock'
1512 # path prefixes covered by 'lock'
1516 vfs_path_prefixes = (
1513 vfs_path_prefixes = (
1517 b'journal.',
1514 b'journal.',
1518 b'undo.',
1515 b'undo.',
1519 b'strip-backup/',
1516 b'strip-backup/',
1520 b'cache/',
1517 b'cache/',
1521 )
1518 )
1522 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1519 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1523 if repo._currentlock(repo._lockref) is None:
1520 if repo._currentlock(repo._lockref) is None:
1524 repo.ui.develwarn(
1521 repo.ui.develwarn(
1525 b'write with no lock: "%s"' % path,
1522 b'write with no lock: "%s"' % path,
1526 stacklevel=3,
1523 stacklevel=3,
1527 config=b'check-locks',
1524 config=b'check-locks',
1528 )
1525 )
1529 elif repo._currentlock(repo._wlockref) is None:
1526 elif repo._currentlock(repo._wlockref) is None:
1530 # rest of vfs files are covered by 'wlock'
1527 # rest of vfs files are covered by 'wlock'
1531 #
1528 #
1532 # exclude special files
1529 # exclude special files
1533 for prefix in self._wlockfreeprefix:
1530 for prefix in self._wlockfreeprefix:
1534 if path.startswith(prefix):
1531 if path.startswith(prefix):
1535 return
1532 return
1536 repo.ui.develwarn(
1533 repo.ui.develwarn(
1537 b'write with no wlock: "%s"' % path,
1534 b'write with no wlock: "%s"' % path,
1538 stacklevel=3,
1535 stacklevel=3,
1539 config=b'check-locks',
1536 config=b'check-locks',
1540 )
1537 )
1541 return ret
1538 return ret
1542
1539
1543 return checkvfs
1540 return checkvfs
1544
1541
1545 def _getsvfsward(self, origfunc):
1542 def _getsvfsward(self, origfunc):
1546 """build a ward for self.svfs"""
1543 """build a ward for self.svfs"""
1547 rref = weakref.ref(self)
1544 rref = weakref.ref(self)
1548
1545
1549 def checksvfs(path, mode=None):
1546 def checksvfs(path, mode=None):
1550 ret = origfunc(path, mode=mode)
1547 ret = origfunc(path, mode=mode)
1551 repo = rref()
1548 repo = rref()
1552 if repo is None or not util.safehasattr(repo, b'_lockref'):
1549 if repo is None or not util.safehasattr(repo, b'_lockref'):
1553 return
1550 return
1554 if mode in (None, b'r', b'rb'):
1551 if mode in (None, b'r', b'rb'):
1555 return
1552 return
1556 if path.startswith(repo.sharedpath):
1553 if path.startswith(repo.sharedpath):
1557 # truncate name relative to the repository (.hg)
1554 # truncate name relative to the repository (.hg)
1558 path = path[len(repo.sharedpath) + 1 :]
1555 path = path[len(repo.sharedpath) + 1 :]
1559 if repo._currentlock(repo._lockref) is None:
1556 if repo._currentlock(repo._lockref) is None:
1560 repo.ui.develwarn(
1557 repo.ui.develwarn(
1561 b'write with no lock: "%s"' % path, stacklevel=4
1558 b'write with no lock: "%s"' % path, stacklevel=4
1562 )
1559 )
1563 return ret
1560 return ret
1564
1561
1565 return checksvfs
1562 return checksvfs
1566
1563
1567 def close(self):
1564 def close(self):
1568 self._writecaches()
1565 self._writecaches()
1569
1566
1570 def _writecaches(self):
1567 def _writecaches(self):
1571 if self._revbranchcache:
1568 if self._revbranchcache:
1572 self._revbranchcache.write()
1569 self._revbranchcache.write()
1573
1570
1574 def _restrictcapabilities(self, caps):
1571 def _restrictcapabilities(self, caps):
1575 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1572 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1576 caps = set(caps)
1573 caps = set(caps)
1577 capsblob = bundle2.encodecaps(
1574 capsblob = bundle2.encodecaps(
1578 bundle2.getrepocaps(self, role=b'client')
1575 bundle2.getrepocaps(self, role=b'client')
1579 )
1576 )
1580 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1577 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1581 if self.ui.configbool(b'experimental', b'narrow'):
1578 if self.ui.configbool(b'experimental', b'narrow'):
1582 caps.add(wireprototypes.NARROWCAP)
1579 caps.add(wireprototypes.NARROWCAP)
1583 return caps
1580 return caps
1584
1581
1585 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1582 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1586 # self -> auditor -> self._checknested -> self
1583 # self -> auditor -> self._checknested -> self
1587
1584
1588 @property
1585 @property
1589 def auditor(self):
1586 def auditor(self):
1590 # This is only used by context.workingctx.match in order to
1587 # This is only used by context.workingctx.match in order to
1591 # detect files in subrepos.
1588 # detect files in subrepos.
1592 return pathutil.pathauditor(self.root, callback=self._checknested)
1589 return pathutil.pathauditor(self.root, callback=self._checknested)
1593
1590
1594 @property
1591 @property
1595 def nofsauditor(self):
1592 def nofsauditor(self):
1596 # This is only used by context.basectx.match in order to detect
1593 # This is only used by context.basectx.match in order to detect
1597 # files in subrepos.
1594 # files in subrepos.
1598 return pathutil.pathauditor(
1595 return pathutil.pathauditor(
1599 self.root, callback=self._checknested, realfs=False, cached=True
1596 self.root, callback=self._checknested, realfs=False, cached=True
1600 )
1597 )
1601
1598
1602 def _checknested(self, path):
1599 def _checknested(self, path):
1603 """Determine if path is a legal nested repository."""
1600 """Determine if path is a legal nested repository."""
1604 if not path.startswith(self.root):
1601 if not path.startswith(self.root):
1605 return False
1602 return False
1606 subpath = path[len(self.root) + 1 :]
1603 subpath = path[len(self.root) + 1 :]
1607 normsubpath = util.pconvert(subpath)
1604 normsubpath = util.pconvert(subpath)
1608
1605
1609 # XXX: Checking against the current working copy is wrong in
1606 # XXX: Checking against the current working copy is wrong in
1610 # the sense that it can reject things like
1607 # the sense that it can reject things like
1611 #
1608 #
1612 # $ hg cat -r 10 sub/x.txt
1609 # $ hg cat -r 10 sub/x.txt
1613 #
1610 #
1614 # if sub/ is no longer a subrepository in the working copy
1611 # if sub/ is no longer a subrepository in the working copy
1615 # parent revision.
1612 # parent revision.
1616 #
1613 #
1617 # However, it can of course also allow things that would have
1614 # However, it can of course also allow things that would have
1618 # been rejected before, such as the above cat command if sub/
1615 # been rejected before, such as the above cat command if sub/
1619 # is a subrepository now, but was a normal directory before.
1616 # is a subrepository now, but was a normal directory before.
1620 # The old path auditor would have rejected by mistake since it
1617 # The old path auditor would have rejected by mistake since it
1621 # panics when it sees sub/.hg/.
1618 # panics when it sees sub/.hg/.
1622 #
1619 #
1623 # All in all, checking against the working copy seems sensible
1620 # All in all, checking against the working copy seems sensible
1624 # since we want to prevent access to nested repositories on
1621 # since we want to prevent access to nested repositories on
1625 # the filesystem *now*.
1622 # the filesystem *now*.
1626 ctx = self[None]
1623 ctx = self[None]
1627 parts = util.splitpath(subpath)
1624 parts = util.splitpath(subpath)
1628 while parts:
1625 while parts:
1629 prefix = b'/'.join(parts)
1626 prefix = b'/'.join(parts)
1630 if prefix in ctx.substate:
1627 if prefix in ctx.substate:
1631 if prefix == normsubpath:
1628 if prefix == normsubpath:
1632 return True
1629 return True
1633 else:
1630 else:
1634 sub = ctx.sub(prefix)
1631 sub = ctx.sub(prefix)
1635 return sub.checknested(subpath[len(prefix) + 1 :])
1632 return sub.checknested(subpath[len(prefix) + 1 :])
1636 else:
1633 else:
1637 parts.pop()
1634 parts.pop()
1638 return False
1635 return False
1639
1636
1640 def peer(self):
1637 def peer(self):
1641 return localpeer(self) # not cached to avoid reference cycle
1638 return localpeer(self) # not cached to avoid reference cycle
1642
1639
1643 def unfiltered(self):
1640 def unfiltered(self):
1644 """Return unfiltered version of the repository
1641 """Return unfiltered version of the repository
1645
1642
1646 Intended to be overwritten by filtered repo."""
1643 Intended to be overwritten by filtered repo."""
1647 return self
1644 return self
1648
1645
1649 def filtered(self, name, visibilityexceptions=None):
1646 def filtered(self, name, visibilityexceptions=None):
1650 """Return a filtered version of a repository
1647 """Return a filtered version of a repository
1651
1648
1652 The `name` parameter is the identifier of the requested view. This
1649 The `name` parameter is the identifier of the requested view. This
1653 will return a repoview object set "exactly" to the specified view.
1650 will return a repoview object set "exactly" to the specified view.
1654
1651
1655 This function does not apply recursive filtering to a repository. For
1652 This function does not apply recursive filtering to a repository. For
1656 example calling `repo.filtered("served")` will return a repoview using
1653 example calling `repo.filtered("served")` will return a repoview using
1657 the "served" view, regardless of the initial view used by `repo`.
1654 the "served" view, regardless of the initial view used by `repo`.
1658
1655
1659 In other word, there is always only one level of `repoview` "filtering".
1656 In other word, there is always only one level of `repoview` "filtering".
1660 """
1657 """
1661 if self._extrafilterid is not None and b'%' not in name:
1658 if self._extrafilterid is not None and b'%' not in name:
1662 name = name + b'%' + self._extrafilterid
1659 name = name + b'%' + self._extrafilterid
1663
1660
1664 cls = repoview.newtype(self.unfiltered().__class__)
1661 cls = repoview.newtype(self.unfiltered().__class__)
1665 return cls(self, name, visibilityexceptions)
1662 return cls(self, name, visibilityexceptions)
1666
1663
1667 @mixedrepostorecache(
1664 @mixedrepostorecache(
1668 (b'bookmarks', b'plain'),
1665 (b'bookmarks', b'plain'),
1669 (b'bookmarks.current', b'plain'),
1666 (b'bookmarks.current', b'plain'),
1670 (b'bookmarks', b''),
1667 (b'bookmarks', b''),
1671 (b'00changelog.i', b''),
1668 (b'00changelog.i', b''),
1672 )
1669 )
1673 def _bookmarks(self):
1670 def _bookmarks(self):
1674 # Since the multiple files involved in the transaction cannot be
1671 # Since the multiple files involved in the transaction cannot be
1675 # written atomically (with current repository format), there is a race
1672 # written atomically (with current repository format), there is a race
1676 # condition here.
1673 # condition here.
1677 #
1674 #
1678 # 1) changelog content A is read
1675 # 1) changelog content A is read
1679 # 2) outside transaction update changelog to content B
1676 # 2) outside transaction update changelog to content B
1680 # 3) outside transaction update bookmark file referring to content B
1677 # 3) outside transaction update bookmark file referring to content B
1681 # 4) bookmarks file content is read and filtered against changelog-A
1678 # 4) bookmarks file content is read and filtered against changelog-A
1682 #
1679 #
1683 # When this happens, bookmarks against nodes missing from A are dropped.
1680 # When this happens, bookmarks against nodes missing from A are dropped.
1684 #
1681 #
1685 # Having this happening during read is not great, but it become worse
1682 # Having this happening during read is not great, but it become worse
1686 # when this happen during write because the bookmarks to the "unknown"
1683 # when this happen during write because the bookmarks to the "unknown"
1687 # nodes will be dropped for good. However, writes happen within locks.
1684 # nodes will be dropped for good. However, writes happen within locks.
1688 # This locking makes it possible to have a race free consistent read.
1685 # This locking makes it possible to have a race free consistent read.
1689 # For this purpose data read from disc before locking are
1686 # For this purpose data read from disc before locking are
1690 # "invalidated" right after the locks are taken. This invalidations are
1687 # "invalidated" right after the locks are taken. This invalidations are
1691 # "light", the `filecache` mechanism keep the data in memory and will
1688 # "light", the `filecache` mechanism keep the data in memory and will
1692 # reuse them if the underlying files did not changed. Not parsing the
1689 # reuse them if the underlying files did not changed. Not parsing the
1693 # same data multiple times helps performances.
1690 # same data multiple times helps performances.
1694 #
1691 #
1695 # Unfortunately in the case describe above, the files tracked by the
1692 # Unfortunately in the case describe above, the files tracked by the
1696 # bookmarks file cache might not have changed, but the in-memory
1693 # bookmarks file cache might not have changed, but the in-memory
1697 # content is still "wrong" because we used an older changelog content
1694 # content is still "wrong" because we used an older changelog content
1698 # to process the on-disk data. So after locking, the changelog would be
1695 # to process the on-disk data. So after locking, the changelog would be
1699 # refreshed but `_bookmarks` would be preserved.
1696 # refreshed but `_bookmarks` would be preserved.
1700 # Adding `00changelog.i` to the list of tracked file is not
1697 # Adding `00changelog.i` to the list of tracked file is not
1701 # enough, because at the time we build the content for `_bookmarks` in
1698 # enough, because at the time we build the content for `_bookmarks` in
1702 # (4), the changelog file has already diverged from the content used
1699 # (4), the changelog file has already diverged from the content used
1703 # for loading `changelog` in (1)
1700 # for loading `changelog` in (1)
1704 #
1701 #
1705 # To prevent the issue, we force the changelog to be explicitly
1702 # To prevent the issue, we force the changelog to be explicitly
1706 # reloaded while computing `_bookmarks`. The data race can still happen
1703 # reloaded while computing `_bookmarks`. The data race can still happen
1707 # without the lock (with a narrower window), but it would no longer go
1704 # without the lock (with a narrower window), but it would no longer go
1708 # undetected during the lock time refresh.
1705 # undetected during the lock time refresh.
1709 #
1706 #
1710 # The new schedule is as follow
1707 # The new schedule is as follow
1711 #
1708 #
1712 # 1) filecache logic detect that `_bookmarks` needs to be computed
1709 # 1) filecache logic detect that `_bookmarks` needs to be computed
1713 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1710 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1714 # 3) We force `changelog` filecache to be tested
1711 # 3) We force `changelog` filecache to be tested
1715 # 4) cachestat for `changelog` are captured (for changelog)
1712 # 4) cachestat for `changelog` are captured (for changelog)
1716 # 5) `_bookmarks` is computed and cached
1713 # 5) `_bookmarks` is computed and cached
1717 #
1714 #
1718 # The step in (3) ensure we have a changelog at least as recent as the
1715 # The step in (3) ensure we have a changelog at least as recent as the
1719 # cache stat computed in (1). As a result at locking time:
1716 # cache stat computed in (1). As a result at locking time:
1720 # * if the changelog did not changed since (1) -> we can reuse the data
1717 # * if the changelog did not changed since (1) -> we can reuse the data
1721 # * otherwise -> the bookmarks get refreshed.
1718 # * otherwise -> the bookmarks get refreshed.
1722 self._refreshchangelog()
1719 self._refreshchangelog()
1723 return bookmarks.bmstore(self)
1720 return bookmarks.bmstore(self)
1724
1721
1725 def _refreshchangelog(self):
1722 def _refreshchangelog(self):
1726 """make sure the in memory changelog match the on-disk one"""
1723 """make sure the in memory changelog match the on-disk one"""
1727 if 'changelog' in vars(self) and self.currenttransaction() is None:
1724 if 'changelog' in vars(self) and self.currenttransaction() is None:
1728 del self.changelog
1725 del self.changelog
1729
1726
1730 @property
1727 @property
1731 def _activebookmark(self):
1728 def _activebookmark(self):
1732 return self._bookmarks.active
1729 return self._bookmarks.active
1733
1730
1734 # _phasesets depend on changelog. what we need is to call
1731 # _phasesets depend on changelog. what we need is to call
1735 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1732 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1736 # can't be easily expressed in filecache mechanism.
1733 # can't be easily expressed in filecache mechanism.
1737 @storecache(b'phaseroots', b'00changelog.i')
1734 @storecache(b'phaseroots', b'00changelog.i')
1738 def _phasecache(self):
1735 def _phasecache(self):
1739 return phases.phasecache(self, self._phasedefaults)
1736 return phases.phasecache(self, self._phasedefaults)
1740
1737
1741 @storecache(b'obsstore')
1738 @storecache(b'obsstore')
1742 def obsstore(self):
1739 def obsstore(self):
1743 return obsolete.makestore(self.ui, self)
1740 return obsolete.makestore(self.ui, self)
1744
1741
1745 @changelogcache()
1742 @changelogcache()
1746 def changelog(repo):
1743 def changelog(repo):
1747 # load dirstate before changelog to avoid race see issue6303
1744 # load dirstate before changelog to avoid race see issue6303
1748 repo.dirstate.prefetch_parents()
1745 repo.dirstate.prefetch_parents()
1749 return repo.store.changelog(
1746 return repo.store.changelog(
1750 txnutil.mayhavepending(repo.root),
1747 txnutil.mayhavepending(repo.root),
1751 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1748 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1752 )
1749 )
1753
1750
1754 @manifestlogcache()
1751 @manifestlogcache()
1755 def manifestlog(self):
1752 def manifestlog(self):
1756 return self.store.manifestlog(self, self._storenarrowmatch)
1753 return self.store.manifestlog(self, self._storenarrowmatch)
1757
1754
1758 @repofilecache(b'dirstate')
1755 @repofilecache(b'dirstate')
1759 def dirstate(self):
1756 def dirstate(self):
1760 return self._makedirstate()
1757 return self._makedirstate()
1761
1758
1762 def _makedirstate(self):
1759 def _makedirstate(self):
1763 """Extension point for wrapping the dirstate per-repo."""
1760 """Extension point for wrapping the dirstate per-repo."""
1764 sparsematchfn = None
1761 sparsematchfn = None
1765 if sparse.use_sparse(self):
1762 if sparse.use_sparse(self):
1766 sparsematchfn = lambda: sparse.matcher(self)
1763 sparsematchfn = lambda: sparse.matcher(self)
1767 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1764 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1768 th = requirementsmod.DIRSTATE_TRACKED_HINT_V1
1765 th = requirementsmod.DIRSTATE_TRACKED_HINT_V1
1769 use_dirstate_v2 = v2_req in self.requirements
1766 use_dirstate_v2 = v2_req in self.requirements
1770 use_tracked_hint = th in self.requirements
1767 use_tracked_hint = th in self.requirements
1771
1768
1772 return dirstate.dirstate(
1769 return dirstate.dirstate(
1773 self.vfs,
1770 self.vfs,
1774 self.ui,
1771 self.ui,
1775 self.root,
1772 self.root,
1776 self._dirstatevalidate,
1773 self._dirstatevalidate,
1777 sparsematchfn,
1774 sparsematchfn,
1778 self.nodeconstants,
1775 self.nodeconstants,
1779 use_dirstate_v2,
1776 use_dirstate_v2,
1780 use_tracked_hint=use_tracked_hint,
1777 use_tracked_hint=use_tracked_hint,
1781 )
1778 )
1782
1779
1783 def _dirstatevalidate(self, node):
1780 def _dirstatevalidate(self, node):
1784 try:
1781 try:
1785 self.changelog.rev(node)
1782 self.changelog.rev(node)
1786 return node
1783 return node
1787 except error.LookupError:
1784 except error.LookupError:
1788 if not self._dirstatevalidatewarned:
1785 if not self._dirstatevalidatewarned:
1789 self._dirstatevalidatewarned = True
1786 self._dirstatevalidatewarned = True
1790 self.ui.warn(
1787 self.ui.warn(
1791 _(b"warning: ignoring unknown working parent %s!\n")
1788 _(b"warning: ignoring unknown working parent %s!\n")
1792 % short(node)
1789 % short(node)
1793 )
1790 )
1794 return self.nullid
1791 return self.nullid
1795
1792
1796 @storecache(narrowspec.FILENAME)
1793 @storecache(narrowspec.FILENAME)
1797 def narrowpats(self):
1794 def narrowpats(self):
1798 """matcher patterns for this repository's narrowspec
1795 """matcher patterns for this repository's narrowspec
1799
1796
1800 A tuple of (includes, excludes).
1797 A tuple of (includes, excludes).
1801 """
1798 """
1802 return narrowspec.load(self)
1799 return narrowspec.load(self)
1803
1800
1804 @storecache(narrowspec.FILENAME)
1801 @storecache(narrowspec.FILENAME)
1805 def _storenarrowmatch(self):
1802 def _storenarrowmatch(self):
1806 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1803 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1807 return matchmod.always()
1804 return matchmod.always()
1808 include, exclude = self.narrowpats
1805 include, exclude = self.narrowpats
1809 return narrowspec.match(self.root, include=include, exclude=exclude)
1806 return narrowspec.match(self.root, include=include, exclude=exclude)
1810
1807
1811 @storecache(narrowspec.FILENAME)
1808 @storecache(narrowspec.FILENAME)
1812 def _narrowmatch(self):
1809 def _narrowmatch(self):
1813 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1810 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1814 return matchmod.always()
1811 return matchmod.always()
1815 narrowspec.checkworkingcopynarrowspec(self)
1812 narrowspec.checkworkingcopynarrowspec(self)
1816 include, exclude = self.narrowpats
1813 include, exclude = self.narrowpats
1817 return narrowspec.match(self.root, include=include, exclude=exclude)
1814 return narrowspec.match(self.root, include=include, exclude=exclude)
1818
1815
1819 def narrowmatch(self, match=None, includeexact=False):
1816 def narrowmatch(self, match=None, includeexact=False):
1820 """matcher corresponding the the repo's narrowspec
1817 """matcher corresponding the the repo's narrowspec
1821
1818
1822 If `match` is given, then that will be intersected with the narrow
1819 If `match` is given, then that will be intersected with the narrow
1823 matcher.
1820 matcher.
1824
1821
1825 If `includeexact` is True, then any exact matches from `match` will
1822 If `includeexact` is True, then any exact matches from `match` will
1826 be included even if they're outside the narrowspec.
1823 be included even if they're outside the narrowspec.
1827 """
1824 """
1828 if match:
1825 if match:
1829 if includeexact and not self._narrowmatch.always():
1826 if includeexact and not self._narrowmatch.always():
1830 # do not exclude explicitly-specified paths so that they can
1827 # do not exclude explicitly-specified paths so that they can
1831 # be warned later on
1828 # be warned later on
1832 em = matchmod.exact(match.files())
1829 em = matchmod.exact(match.files())
1833 nm = matchmod.unionmatcher([self._narrowmatch, em])
1830 nm = matchmod.unionmatcher([self._narrowmatch, em])
1834 return matchmod.intersectmatchers(match, nm)
1831 return matchmod.intersectmatchers(match, nm)
1835 return matchmod.intersectmatchers(match, self._narrowmatch)
1832 return matchmod.intersectmatchers(match, self._narrowmatch)
1836 return self._narrowmatch
1833 return self._narrowmatch
1837
1834
1838 def setnarrowpats(self, newincludes, newexcludes):
1835 def setnarrowpats(self, newincludes, newexcludes):
1839 narrowspec.save(self, newincludes, newexcludes)
1836 narrowspec.save(self, newincludes, newexcludes)
1840 self.invalidate(clearfilecache=True)
1837 self.invalidate(clearfilecache=True)
1841
1838
1842 @unfilteredpropertycache
1839 @unfilteredpropertycache
1843 def _quick_access_changeid_null(self):
1840 def _quick_access_changeid_null(self):
1844 return {
1841 return {
1845 b'null': (nullrev, self.nodeconstants.nullid),
1842 b'null': (nullrev, self.nodeconstants.nullid),
1846 nullrev: (nullrev, self.nodeconstants.nullid),
1843 nullrev: (nullrev, self.nodeconstants.nullid),
1847 self.nullid: (nullrev, self.nullid),
1844 self.nullid: (nullrev, self.nullid),
1848 }
1845 }
1849
1846
1850 @unfilteredpropertycache
1847 @unfilteredpropertycache
1851 def _quick_access_changeid_wc(self):
1848 def _quick_access_changeid_wc(self):
1852 # also fast path access to the working copy parents
1849 # also fast path access to the working copy parents
1853 # however, only do it for filter that ensure wc is visible.
1850 # however, only do it for filter that ensure wc is visible.
1854 quick = self._quick_access_changeid_null.copy()
1851 quick = self._quick_access_changeid_null.copy()
1855 cl = self.unfiltered().changelog
1852 cl = self.unfiltered().changelog
1856 for node in self.dirstate.parents():
1853 for node in self.dirstate.parents():
1857 if node == self.nullid:
1854 if node == self.nullid:
1858 continue
1855 continue
1859 rev = cl.index.get_rev(node)
1856 rev = cl.index.get_rev(node)
1860 if rev is None:
1857 if rev is None:
1861 # unknown working copy parent case:
1858 # unknown working copy parent case:
1862 #
1859 #
1863 # skip the fast path and let higher code deal with it
1860 # skip the fast path and let higher code deal with it
1864 continue
1861 continue
1865 pair = (rev, node)
1862 pair = (rev, node)
1866 quick[rev] = pair
1863 quick[rev] = pair
1867 quick[node] = pair
1864 quick[node] = pair
1868 # also add the parents of the parents
1865 # also add the parents of the parents
1869 for r in cl.parentrevs(rev):
1866 for r in cl.parentrevs(rev):
1870 if r == nullrev:
1867 if r == nullrev:
1871 continue
1868 continue
1872 n = cl.node(r)
1869 n = cl.node(r)
1873 pair = (r, n)
1870 pair = (r, n)
1874 quick[r] = pair
1871 quick[r] = pair
1875 quick[n] = pair
1872 quick[n] = pair
1876 p1node = self.dirstate.p1()
1873 p1node = self.dirstate.p1()
1877 if p1node != self.nullid:
1874 if p1node != self.nullid:
1878 quick[b'.'] = quick[p1node]
1875 quick[b'.'] = quick[p1node]
1879 return quick
1876 return quick
1880
1877
1881 @unfilteredmethod
1878 @unfilteredmethod
1882 def _quick_access_changeid_invalidate(self):
1879 def _quick_access_changeid_invalidate(self):
1883 if '_quick_access_changeid_wc' in vars(self):
1880 if '_quick_access_changeid_wc' in vars(self):
1884 del self.__dict__['_quick_access_changeid_wc']
1881 del self.__dict__['_quick_access_changeid_wc']
1885
1882
1886 @property
1883 @property
1887 def _quick_access_changeid(self):
1884 def _quick_access_changeid(self):
1888 """an helper dictionnary for __getitem__ calls
1885 """an helper dictionnary for __getitem__ calls
1889
1886
1890 This contains a list of symbol we can recognise right away without
1887 This contains a list of symbol we can recognise right away without
1891 further processing.
1888 further processing.
1892 """
1889 """
1893 if self.filtername in repoview.filter_has_wc:
1890 if self.filtername in repoview.filter_has_wc:
1894 return self._quick_access_changeid_wc
1891 return self._quick_access_changeid_wc
1895 return self._quick_access_changeid_null
1892 return self._quick_access_changeid_null
1896
1893
1897 def __getitem__(self, changeid):
1894 def __getitem__(self, changeid):
1898 # dealing with special cases
1895 # dealing with special cases
1899 if changeid is None:
1896 if changeid is None:
1900 return context.workingctx(self)
1897 return context.workingctx(self)
1901 if isinstance(changeid, context.basectx):
1898 if isinstance(changeid, context.basectx):
1902 return changeid
1899 return changeid
1903
1900
1904 # dealing with multiple revisions
1901 # dealing with multiple revisions
1905 if isinstance(changeid, slice):
1902 if isinstance(changeid, slice):
1906 # wdirrev isn't contiguous so the slice shouldn't include it
1903 # wdirrev isn't contiguous so the slice shouldn't include it
1907 return [
1904 return [
1908 self[i]
1905 self[i]
1909 for i in range(*changeid.indices(len(self)))
1906 for i in range(*changeid.indices(len(self)))
1910 if i not in self.changelog.filteredrevs
1907 if i not in self.changelog.filteredrevs
1911 ]
1908 ]
1912
1909
1913 # dealing with some special values
1910 # dealing with some special values
1914 quick_access = self._quick_access_changeid.get(changeid)
1911 quick_access = self._quick_access_changeid.get(changeid)
1915 if quick_access is not None:
1912 if quick_access is not None:
1916 rev, node = quick_access
1913 rev, node = quick_access
1917 return context.changectx(self, rev, node, maybe_filtered=False)
1914 return context.changectx(self, rev, node, maybe_filtered=False)
1918 if changeid == b'tip':
1915 if changeid == b'tip':
1919 node = self.changelog.tip()
1916 node = self.changelog.tip()
1920 rev = self.changelog.rev(node)
1917 rev = self.changelog.rev(node)
1921 return context.changectx(self, rev, node)
1918 return context.changectx(self, rev, node)
1922
1919
1923 # dealing with arbitrary values
1920 # dealing with arbitrary values
1924 try:
1921 try:
1925 if isinstance(changeid, int):
1922 if isinstance(changeid, int):
1926 node = self.changelog.node(changeid)
1923 node = self.changelog.node(changeid)
1927 rev = changeid
1924 rev = changeid
1928 elif changeid == b'.':
1925 elif changeid == b'.':
1929 # this is a hack to delay/avoid loading obsmarkers
1926 # this is a hack to delay/avoid loading obsmarkers
1930 # when we know that '.' won't be hidden
1927 # when we know that '.' won't be hidden
1931 node = self.dirstate.p1()
1928 node = self.dirstate.p1()
1932 rev = self.unfiltered().changelog.rev(node)
1929 rev = self.unfiltered().changelog.rev(node)
1933 elif len(changeid) == self.nodeconstants.nodelen:
1930 elif len(changeid) == self.nodeconstants.nodelen:
1934 try:
1931 try:
1935 node = changeid
1932 node = changeid
1936 rev = self.changelog.rev(changeid)
1933 rev = self.changelog.rev(changeid)
1937 except error.FilteredLookupError:
1934 except error.FilteredLookupError:
1938 changeid = hex(changeid) # for the error message
1935 changeid = hex(changeid) # for the error message
1939 raise
1936 raise
1940 except LookupError:
1937 except LookupError:
1941 # check if it might have come from damaged dirstate
1938 # check if it might have come from damaged dirstate
1942 #
1939 #
1943 # XXX we could avoid the unfiltered if we had a recognizable
1940 # XXX we could avoid the unfiltered if we had a recognizable
1944 # exception for filtered changeset access
1941 # exception for filtered changeset access
1945 if (
1942 if (
1946 self.local()
1943 self.local()
1947 and changeid in self.unfiltered().dirstate.parents()
1944 and changeid in self.unfiltered().dirstate.parents()
1948 ):
1945 ):
1949 msg = _(b"working directory has unknown parent '%s'!")
1946 msg = _(b"working directory has unknown parent '%s'!")
1950 raise error.Abort(msg % short(changeid))
1947 raise error.Abort(msg % short(changeid))
1951 changeid = hex(changeid) # for the error message
1948 changeid = hex(changeid) # for the error message
1952 raise
1949 raise
1953
1950
1954 elif len(changeid) == 2 * self.nodeconstants.nodelen:
1951 elif len(changeid) == 2 * self.nodeconstants.nodelen:
1955 node = bin(changeid)
1952 node = bin(changeid)
1956 rev = self.changelog.rev(node)
1953 rev = self.changelog.rev(node)
1957 else:
1954 else:
1958 raise error.ProgrammingError(
1955 raise error.ProgrammingError(
1959 b"unsupported changeid '%s' of type %s"
1956 b"unsupported changeid '%s' of type %s"
1960 % (changeid, pycompat.bytestr(type(changeid)))
1957 % (changeid, pycompat.bytestr(type(changeid)))
1961 )
1958 )
1962
1959
1963 return context.changectx(self, rev, node)
1960 return context.changectx(self, rev, node)
1964
1961
1965 except (error.FilteredIndexError, error.FilteredLookupError):
1962 except (error.FilteredIndexError, error.FilteredLookupError):
1966 raise error.FilteredRepoLookupError(
1963 raise error.FilteredRepoLookupError(
1967 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1964 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1968 )
1965 )
1969 except (IndexError, LookupError):
1966 except (IndexError, LookupError):
1970 raise error.RepoLookupError(
1967 raise error.RepoLookupError(
1971 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1968 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1972 )
1969 )
1973 except error.WdirUnsupported:
1970 except error.WdirUnsupported:
1974 return context.workingctx(self)
1971 return context.workingctx(self)
1975
1972
1976 def __contains__(self, changeid):
1973 def __contains__(self, changeid):
1977 """True if the given changeid exists"""
1974 """True if the given changeid exists"""
1978 try:
1975 try:
1979 self[changeid]
1976 self[changeid]
1980 return True
1977 return True
1981 except error.RepoLookupError:
1978 except error.RepoLookupError:
1982 return False
1979 return False
1983
1980
1984 def __nonzero__(self):
1981 def __nonzero__(self):
1985 return True
1982 return True
1986
1983
1987 __bool__ = __nonzero__
1984 __bool__ = __nonzero__
1988
1985
1989 def __len__(self):
1986 def __len__(self):
1990 # no need to pay the cost of repoview.changelog
1987 # no need to pay the cost of repoview.changelog
1991 unfi = self.unfiltered()
1988 unfi = self.unfiltered()
1992 return len(unfi.changelog)
1989 return len(unfi.changelog)
1993
1990
1994 def __iter__(self):
1991 def __iter__(self):
1995 return iter(self.changelog)
1992 return iter(self.changelog)
1996
1993
1997 def revs(self, expr: bytes, *args):
1994 def revs(self, expr: bytes, *args):
1998 """Find revisions matching a revset.
1995 """Find revisions matching a revset.
1999
1996
2000 The revset is specified as a string ``expr`` that may contain
1997 The revset is specified as a string ``expr`` that may contain
2001 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1998 %-formatting to escape certain types. See ``revsetlang.formatspec``.
2002
1999
2003 Revset aliases from the configuration are not expanded. To expand
2000 Revset aliases from the configuration are not expanded. To expand
2004 user aliases, consider calling ``scmutil.revrange()`` or
2001 user aliases, consider calling ``scmutil.revrange()`` or
2005 ``repo.anyrevs([expr], user=True)``.
2002 ``repo.anyrevs([expr], user=True)``.
2006
2003
2007 Returns a smartset.abstractsmartset, which is a list-like interface
2004 Returns a smartset.abstractsmartset, which is a list-like interface
2008 that contains integer revisions.
2005 that contains integer revisions.
2009 """
2006 """
2010 tree = revsetlang.spectree(expr, *args)
2007 tree = revsetlang.spectree(expr, *args)
2011 return revset.makematcher(tree)(self)
2008 return revset.makematcher(tree)(self)
2012
2009
2013 def set(self, expr: bytes, *args):
2010 def set(self, expr: bytes, *args):
2014 """Find revisions matching a revset and emit changectx instances.
2011 """Find revisions matching a revset and emit changectx instances.
2015
2012
2016 This is a convenience wrapper around ``revs()`` that iterates the
2013 This is a convenience wrapper around ``revs()`` that iterates the
2017 result and is a generator of changectx instances.
2014 result and is a generator of changectx instances.
2018
2015
2019 Revset aliases from the configuration are not expanded. To expand
2016 Revset aliases from the configuration are not expanded. To expand
2020 user aliases, consider calling ``scmutil.revrange()``.
2017 user aliases, consider calling ``scmutil.revrange()``.
2021 """
2018 """
2022 for r in self.revs(expr, *args):
2019 for r in self.revs(expr, *args):
2023 yield self[r]
2020 yield self[r]
2024
2021
2025 def anyrevs(self, specs: bytes, user=False, localalias=None):
2022 def anyrevs(self, specs: bytes, user=False, localalias=None):
2026 """Find revisions matching one of the given revsets.
2023 """Find revisions matching one of the given revsets.
2027
2024
2028 Revset aliases from the configuration are not expanded by default. To
2025 Revset aliases from the configuration are not expanded by default. To
2029 expand user aliases, specify ``user=True``. To provide some local
2026 expand user aliases, specify ``user=True``. To provide some local
2030 definitions overriding user aliases, set ``localalias`` to
2027 definitions overriding user aliases, set ``localalias`` to
2031 ``{name: definitionstring}``.
2028 ``{name: definitionstring}``.
2032 """
2029 """
2033 if specs == [b'null']:
2030 if specs == [b'null']:
2034 return revset.baseset([nullrev])
2031 return revset.baseset([nullrev])
2035 if specs == [b'.']:
2032 if specs == [b'.']:
2036 quick_data = self._quick_access_changeid.get(b'.')
2033 quick_data = self._quick_access_changeid.get(b'.')
2037 if quick_data is not None:
2034 if quick_data is not None:
2038 return revset.baseset([quick_data[0]])
2035 return revset.baseset([quick_data[0]])
2039 if user:
2036 if user:
2040 m = revset.matchany(
2037 m = revset.matchany(
2041 self.ui,
2038 self.ui,
2042 specs,
2039 specs,
2043 lookup=revset.lookupfn(self),
2040 lookup=revset.lookupfn(self),
2044 localalias=localalias,
2041 localalias=localalias,
2045 )
2042 )
2046 else:
2043 else:
2047 m = revset.matchany(None, specs, localalias=localalias)
2044 m = revset.matchany(None, specs, localalias=localalias)
2048 return m(self)
2045 return m(self)
2049
2046
2050 def url(self) -> bytes:
2047 def url(self) -> bytes:
2051 return b'file:' + self.root
2048 return b'file:' + self.root
2052
2049
2053 def hook(self, name, throw=False, **args):
2050 def hook(self, name, throw=False, **args):
2054 """Call a hook, passing this repo instance.
2051 """Call a hook, passing this repo instance.
2055
2052
2056 This a convenience method to aid invoking hooks. Extensions likely
2053 This a convenience method to aid invoking hooks. Extensions likely
2057 won't call this unless they have registered a custom hook or are
2054 won't call this unless they have registered a custom hook or are
2058 replacing code that is expected to call a hook.
2055 replacing code that is expected to call a hook.
2059 """
2056 """
2060 return hook.hook(self.ui, self, name, throw, **args)
2057 return hook.hook(self.ui, self, name, throw, **args)
2061
2058
2062 @filteredpropertycache
2059 @filteredpropertycache
2063 def _tagscache(self):
2060 def _tagscache(self):
2064 """Returns a tagscache object that contains various tags related
2061 """Returns a tagscache object that contains various tags related
2065 caches."""
2062 caches."""
2066
2063
2067 # This simplifies its cache management by having one decorated
2064 # This simplifies its cache management by having one decorated
2068 # function (this one) and the rest simply fetch things from it.
2065 # function (this one) and the rest simply fetch things from it.
2069 class tagscache:
2066 class tagscache:
2070 def __init__(self):
2067 def __init__(self):
2071 # These two define the set of tags for this repository. tags
2068 # These two define the set of tags for this repository. tags
2072 # maps tag name to node; tagtypes maps tag name to 'global' or
2069 # maps tag name to node; tagtypes maps tag name to 'global' or
2073 # 'local'. (Global tags are defined by .hgtags across all
2070 # 'local'. (Global tags are defined by .hgtags across all
2074 # heads, and local tags are defined in .hg/localtags.)
2071 # heads, and local tags are defined in .hg/localtags.)
2075 # They constitute the in-memory cache of tags.
2072 # They constitute the in-memory cache of tags.
2076 self.tags = self.tagtypes = None
2073 self.tags = self.tagtypes = None
2077
2074
2078 self.nodetagscache = self.tagslist = None
2075 self.nodetagscache = self.tagslist = None
2079
2076
2080 cache = tagscache()
2077 cache = tagscache()
2081 cache.tags, cache.tagtypes = self._findtags()
2078 cache.tags, cache.tagtypes = self._findtags()
2082
2079
2083 return cache
2080 return cache
2084
2081
2085 def tags(self):
2082 def tags(self):
2086 '''return a mapping of tag to node'''
2083 '''return a mapping of tag to node'''
2087 t = {}
2084 t = {}
2088 if self.changelog.filteredrevs:
2085 if self.changelog.filteredrevs:
2089 tags, tt = self._findtags()
2086 tags, tt = self._findtags()
2090 else:
2087 else:
2091 tags = self._tagscache.tags
2088 tags = self._tagscache.tags
2092 rev = self.changelog.rev
2089 rev = self.changelog.rev
2093 for k, v in tags.items():
2090 for k, v in tags.items():
2094 try:
2091 try:
2095 # ignore tags to unknown nodes
2092 # ignore tags to unknown nodes
2096 rev(v)
2093 rev(v)
2097 t[k] = v
2094 t[k] = v
2098 except (error.LookupError, ValueError):
2095 except (error.LookupError, ValueError):
2099 pass
2096 pass
2100 return t
2097 return t
2101
2098
2102 def _findtags(self):
2099 def _findtags(self):
2103 """Do the hard work of finding tags. Return a pair of dicts
2100 """Do the hard work of finding tags. Return a pair of dicts
2104 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2101 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2105 maps tag name to a string like \'global\' or \'local\'.
2102 maps tag name to a string like \'global\' or \'local\'.
2106 Subclasses or extensions are free to add their own tags, but
2103 Subclasses or extensions are free to add their own tags, but
2107 should be aware that the returned dicts will be retained for the
2104 should be aware that the returned dicts will be retained for the
2108 duration of the localrepo object."""
2105 duration of the localrepo object."""
2109
2106
2110 # XXX what tagtype should subclasses/extensions use? Currently
2107 # XXX what tagtype should subclasses/extensions use? Currently
2111 # mq and bookmarks add tags, but do not set the tagtype at all.
2108 # mq and bookmarks add tags, but do not set the tagtype at all.
2112 # Should each extension invent its own tag type? Should there
2109 # Should each extension invent its own tag type? Should there
2113 # be one tagtype for all such "virtual" tags? Or is the status
2110 # be one tagtype for all such "virtual" tags? Or is the status
2114 # quo fine?
2111 # quo fine?
2115
2112
2116 # map tag name to (node, hist)
2113 # map tag name to (node, hist)
2117 alltags = tagsmod.findglobaltags(self.ui, self)
2114 alltags = tagsmod.findglobaltags(self.ui, self)
2118 # map tag name to tag type
2115 # map tag name to tag type
2119 tagtypes = {tag: b'global' for tag in alltags}
2116 tagtypes = {tag: b'global' for tag in alltags}
2120
2117
2121 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2118 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2122
2119
2123 # Build the return dicts. Have to re-encode tag names because
2120 # Build the return dicts. Have to re-encode tag names because
2124 # the tags module always uses UTF-8 (in order not to lose info
2121 # the tags module always uses UTF-8 (in order not to lose info
2125 # writing to the cache), but the rest of Mercurial wants them in
2122 # writing to the cache), but the rest of Mercurial wants them in
2126 # local encoding.
2123 # local encoding.
2127 tags = {}
2124 tags = {}
2128 for (name, (node, hist)) in alltags.items():
2125 for (name, (node, hist)) in alltags.items():
2129 if node != self.nullid:
2126 if node != self.nullid:
2130 tags[encoding.tolocal(name)] = node
2127 tags[encoding.tolocal(name)] = node
2131 tags[b'tip'] = self.changelog.tip()
2128 tags[b'tip'] = self.changelog.tip()
2132 tagtypes = {
2129 tagtypes = {
2133 encoding.tolocal(name): value for (name, value) in tagtypes.items()
2130 encoding.tolocal(name): value for (name, value) in tagtypes.items()
2134 }
2131 }
2135 return (tags, tagtypes)
2132 return (tags, tagtypes)
2136
2133
2137 def tagtype(self, tagname):
2134 def tagtype(self, tagname):
2138 """
2135 """
2139 return the type of the given tag. result can be:
2136 return the type of the given tag. result can be:
2140
2137
2141 'local' : a local tag
2138 'local' : a local tag
2142 'global' : a global tag
2139 'global' : a global tag
2143 None : tag does not exist
2140 None : tag does not exist
2144 """
2141 """
2145
2142
2146 return self._tagscache.tagtypes.get(tagname)
2143 return self._tagscache.tagtypes.get(tagname)
2147
2144
2148 def tagslist(self):
2145 def tagslist(self):
2149 '''return a list of tags ordered by revision'''
2146 '''return a list of tags ordered by revision'''
2150 if not self._tagscache.tagslist:
2147 if not self._tagscache.tagslist:
2151 l = []
2148 l = []
2152 for t, n in self.tags().items():
2149 for t, n in self.tags().items():
2153 l.append((self.changelog.rev(n), t, n))
2150 l.append((self.changelog.rev(n), t, n))
2154 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2151 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2155
2152
2156 return self._tagscache.tagslist
2153 return self._tagscache.tagslist
2157
2154
2158 def nodetags(self, node):
2155 def nodetags(self, node):
2159 '''return the tags associated with a node'''
2156 '''return the tags associated with a node'''
2160 if not self._tagscache.nodetagscache:
2157 if not self._tagscache.nodetagscache:
2161 nodetagscache = {}
2158 nodetagscache = {}
2162 for t, n in self._tagscache.tags.items():
2159 for t, n in self._tagscache.tags.items():
2163 nodetagscache.setdefault(n, []).append(t)
2160 nodetagscache.setdefault(n, []).append(t)
2164 for tags in nodetagscache.values():
2161 for tags in nodetagscache.values():
2165 tags.sort()
2162 tags.sort()
2166 self._tagscache.nodetagscache = nodetagscache
2163 self._tagscache.nodetagscache = nodetagscache
2167 return self._tagscache.nodetagscache.get(node, [])
2164 return self._tagscache.nodetagscache.get(node, [])
2168
2165
2169 def nodebookmarks(self, node):
2166 def nodebookmarks(self, node):
2170 """return the list of bookmarks pointing to the specified node"""
2167 """return the list of bookmarks pointing to the specified node"""
2171 return self._bookmarks.names(node)
2168 return self._bookmarks.names(node)
2172
2169
2173 def branchmap(self):
2170 def branchmap(self):
2174 """returns a dictionary {branch: [branchheads]} with branchheads
2171 """returns a dictionary {branch: [branchheads]} with branchheads
2175 ordered by increasing revision number"""
2172 ordered by increasing revision number"""
2176 return self._branchcaches[self]
2173 return self._branchcaches[self]
2177
2174
2178 @unfilteredmethod
2175 @unfilteredmethod
2179 def revbranchcache(self):
2176 def revbranchcache(self):
2180 if not self._revbranchcache:
2177 if not self._revbranchcache:
2181 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2178 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2182 return self._revbranchcache
2179 return self._revbranchcache
2183
2180
2184 def register_changeset(self, rev, changelogrevision):
2181 def register_changeset(self, rev, changelogrevision):
2185 self.revbranchcache().setdata(rev, changelogrevision)
2182 self.revbranchcache().setdata(rev, changelogrevision)
2186
2183
2187 def branchtip(self, branch, ignoremissing=False):
2184 def branchtip(self, branch, ignoremissing=False):
2188 """return the tip node for a given branch
2185 """return the tip node for a given branch
2189
2186
2190 If ignoremissing is True, then this method will not raise an error.
2187 If ignoremissing is True, then this method will not raise an error.
2191 This is helpful for callers that only expect None for a missing branch
2188 This is helpful for callers that only expect None for a missing branch
2192 (e.g. namespace).
2189 (e.g. namespace).
2193
2190
2194 """
2191 """
2195 try:
2192 try:
2196 return self.branchmap().branchtip(branch)
2193 return self.branchmap().branchtip(branch)
2197 except KeyError:
2194 except KeyError:
2198 if not ignoremissing:
2195 if not ignoremissing:
2199 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2196 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2200 else:
2197 else:
2201 pass
2198 pass
2202
2199
2203 def lookup(self, key):
2200 def lookup(self, key):
2204 node = scmutil.revsymbol(self, key).node()
2201 node = scmutil.revsymbol(self, key).node()
2205 if node is None:
2202 if node is None:
2206 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2203 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2207 return node
2204 return node
2208
2205
2209 def lookupbranch(self, key):
2206 def lookupbranch(self, key):
2210 if self.branchmap().hasbranch(key):
2207 if self.branchmap().hasbranch(key):
2211 return key
2208 return key
2212
2209
2213 return scmutil.revsymbol(self, key).branch()
2210 return scmutil.revsymbol(self, key).branch()
2214
2211
2215 def known(self, nodes):
2212 def known(self, nodes):
2216 cl = self.changelog
2213 cl = self.changelog
2217 get_rev = cl.index.get_rev
2214 get_rev = cl.index.get_rev
2218 filtered = cl.filteredrevs
2215 filtered = cl.filteredrevs
2219 result = []
2216 result = []
2220 for n in nodes:
2217 for n in nodes:
2221 r = get_rev(n)
2218 r = get_rev(n)
2222 resp = not (r is None or r in filtered)
2219 resp = not (r is None or r in filtered)
2223 result.append(resp)
2220 result.append(resp)
2224 return result
2221 return result
2225
2222
2226 def local(self):
2223 def local(self):
2227 return self
2224 return self
2228
2225
2229 def publishing(self):
2226 def publishing(self):
2230 # it's safe (and desirable) to trust the publish flag unconditionally
2227 # it's safe (and desirable) to trust the publish flag unconditionally
2231 # so that we don't finalize changes shared between users via ssh or nfs
2228 # so that we don't finalize changes shared between users via ssh or nfs
2232 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2229 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2233
2230
2234 def cancopy(self):
2231 def cancopy(self):
2235 # so statichttprepo's override of local() works
2232 # so statichttprepo's override of local() works
2236 if not self.local():
2233 if not self.local():
2237 return False
2234 return False
2238 if not self.publishing():
2235 if not self.publishing():
2239 return True
2236 return True
2240 # if publishing we can't copy if there is filtered content
2237 # if publishing we can't copy if there is filtered content
2241 return not self.filtered(b'visible').changelog.filteredrevs
2238 return not self.filtered(b'visible').changelog.filteredrevs
2242
2239
2243 def shared(self):
2240 def shared(self):
2244 '''the type of shared repository (None if not shared)'''
2241 '''the type of shared repository (None if not shared)'''
2245 if self.sharedpath != self.path:
2242 if self.sharedpath != self.path:
2246 return b'store'
2243 return b'store'
2247 return None
2244 return None
2248
2245
2249 def wjoin(self, f: bytes, *insidef: bytes) -> bytes:
2246 def wjoin(self, f: bytes, *insidef: bytes) -> bytes:
2250 return self.vfs.reljoin(self.root, f, *insidef)
2247 return self.vfs.reljoin(self.root, f, *insidef)
2251
2248
2252 def setparents(self, p1, p2=None):
2249 def setparents(self, p1, p2=None):
2253 if p2 is None:
2250 if p2 is None:
2254 p2 = self.nullid
2251 p2 = self.nullid
2255 self[None].setparents(p1, p2)
2252 self[None].setparents(p1, p2)
2256 self._quick_access_changeid_invalidate()
2253 self._quick_access_changeid_invalidate()
2257
2254
2258 def filectx(self, path: bytes, changeid=None, fileid=None, changectx=None):
2255 def filectx(self, path: bytes, changeid=None, fileid=None, changectx=None):
2259 """changeid must be a changeset revision, if specified.
2256 """changeid must be a changeset revision, if specified.
2260 fileid can be a file revision or node."""
2257 fileid can be a file revision or node."""
2261 return context.filectx(
2258 return context.filectx(
2262 self, path, changeid, fileid, changectx=changectx
2259 self, path, changeid, fileid, changectx=changectx
2263 )
2260 )
2264
2261
2265 def getcwd(self) -> bytes:
2262 def getcwd(self) -> bytes:
2266 return self.dirstate.getcwd()
2263 return self.dirstate.getcwd()
2267
2264
2268 def pathto(self, f: bytes, cwd: Optional[bytes] = None) -> bytes:
2265 def pathto(self, f: bytes, cwd: Optional[bytes] = None) -> bytes:
2269 return self.dirstate.pathto(f, cwd)
2266 return self.dirstate.pathto(f, cwd)
2270
2267
2271 def _loadfilter(self, filter):
2268 def _loadfilter(self, filter):
2272 if filter not in self._filterpats:
2269 if filter not in self._filterpats:
2273 l = []
2270 l = []
2274 for pat, cmd in self.ui.configitems(filter):
2271 for pat, cmd in self.ui.configitems(filter):
2275 if cmd == b'!':
2272 if cmd == b'!':
2276 continue
2273 continue
2277 mf = matchmod.match(self.root, b'', [pat])
2274 mf = matchmod.match(self.root, b'', [pat])
2278 fn = None
2275 fn = None
2279 params = cmd
2276 params = cmd
2280 for name, filterfn in self._datafilters.items():
2277 for name, filterfn in self._datafilters.items():
2281 if cmd.startswith(name):
2278 if cmd.startswith(name):
2282 fn = filterfn
2279 fn = filterfn
2283 params = cmd[len(name) :].lstrip()
2280 params = cmd[len(name) :].lstrip()
2284 break
2281 break
2285 if not fn:
2282 if not fn:
2286 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2283 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2287 fn.__name__ = 'commandfilter'
2284 fn.__name__ = 'commandfilter'
2288 # Wrap old filters not supporting keyword arguments
2285 # Wrap old filters not supporting keyword arguments
2289 if not pycompat.getargspec(fn)[2]:
2286 if not pycompat.getargspec(fn)[2]:
2290 oldfn = fn
2287 oldfn = fn
2291 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2288 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2292 fn.__name__ = 'compat-' + oldfn.__name__
2289 fn.__name__ = 'compat-' + oldfn.__name__
2293 l.append((mf, fn, params))
2290 l.append((mf, fn, params))
2294 self._filterpats[filter] = l
2291 self._filterpats[filter] = l
2295 return self._filterpats[filter]
2292 return self._filterpats[filter]
2296
2293
2297 def _filter(self, filterpats, filename, data):
2294 def _filter(self, filterpats, filename, data):
2298 for mf, fn, cmd in filterpats:
2295 for mf, fn, cmd in filterpats:
2299 if mf(filename):
2296 if mf(filename):
2300 self.ui.debug(
2297 self.ui.debug(
2301 b"filtering %s through %s\n"
2298 b"filtering %s through %s\n"
2302 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2299 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2303 )
2300 )
2304 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2301 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2305 break
2302 break
2306
2303
2307 return data
2304 return data
2308
2305
2309 @unfilteredpropertycache
2306 @unfilteredpropertycache
2310 def _encodefilterpats(self):
2307 def _encodefilterpats(self):
2311 return self._loadfilter(b'encode')
2308 return self._loadfilter(b'encode')
2312
2309
2313 @unfilteredpropertycache
2310 @unfilteredpropertycache
2314 def _decodefilterpats(self):
2311 def _decodefilterpats(self):
2315 return self._loadfilter(b'decode')
2312 return self._loadfilter(b'decode')
2316
2313
2317 def adddatafilter(self, name, filter):
2314 def adddatafilter(self, name, filter):
2318 self._datafilters[name] = filter
2315 self._datafilters[name] = filter
2319
2316
2320 def wread(self, filename: bytes) -> bytes:
2317 def wread(self, filename: bytes) -> bytes:
2321 if self.wvfs.islink(filename):
2318 if self.wvfs.islink(filename):
2322 data = self.wvfs.readlink(filename)
2319 data = self.wvfs.readlink(filename)
2323 else:
2320 else:
2324 data = self.wvfs.read(filename)
2321 data = self.wvfs.read(filename)
2325 return self._filter(self._encodefilterpats, filename, data)
2322 return self._filter(self._encodefilterpats, filename, data)
2326
2323
2327 def wwrite(
2324 def wwrite(
2328 self,
2325 self,
2329 filename: bytes,
2326 filename: bytes,
2330 data: bytes,
2327 data: bytes,
2331 flags: bytes,
2328 flags: bytes,
2332 backgroundclose=False,
2329 backgroundclose=False,
2333 **kwargs
2330 **kwargs
2334 ) -> int:
2331 ) -> int:
2335 """write ``data`` into ``filename`` in the working directory
2332 """write ``data`` into ``filename`` in the working directory
2336
2333
2337 This returns length of written (maybe decoded) data.
2334 This returns length of written (maybe decoded) data.
2338 """
2335 """
2339 data = self._filter(self._decodefilterpats, filename, data)
2336 data = self._filter(self._decodefilterpats, filename, data)
2340 if b'l' in flags:
2337 if b'l' in flags:
2341 self.wvfs.symlink(data, filename)
2338 self.wvfs.symlink(data, filename)
2342 else:
2339 else:
2343 self.wvfs.write(
2340 self.wvfs.write(
2344 filename, data, backgroundclose=backgroundclose, **kwargs
2341 filename, data, backgroundclose=backgroundclose, **kwargs
2345 )
2342 )
2346 if b'x' in flags:
2343 if b'x' in flags:
2347 self.wvfs.setflags(filename, False, True)
2344 self.wvfs.setflags(filename, False, True)
2348 else:
2345 else:
2349 self.wvfs.setflags(filename, False, False)
2346 self.wvfs.setflags(filename, False, False)
2350 return len(data)
2347 return len(data)
2351
2348
2352 def wwritedata(self, filename: bytes, data: bytes) -> bytes:
2349 def wwritedata(self, filename: bytes, data: bytes) -> bytes:
2353 return self._filter(self._decodefilterpats, filename, data)
2350 return self._filter(self._decodefilterpats, filename, data)
2354
2351
2355 def currenttransaction(self):
2352 def currenttransaction(self):
2356 """return the current transaction or None if non exists"""
2353 """return the current transaction or None if non exists"""
2357 if self._transref:
2354 if self._transref:
2358 tr = self._transref()
2355 tr = self._transref()
2359 else:
2356 else:
2360 tr = None
2357 tr = None
2361
2358
2362 if tr and tr.running():
2359 if tr and tr.running():
2363 return tr
2360 return tr
2364 return None
2361 return None
2365
2362
2366 def transaction(self, desc, report=None):
2363 def transaction(self, desc, report=None):
2367 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2364 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2368 b'devel', b'check-locks'
2365 b'devel', b'check-locks'
2369 ):
2366 ):
2370 if self._currentlock(self._lockref) is None:
2367 if self._currentlock(self._lockref) is None:
2371 raise error.ProgrammingError(b'transaction requires locking')
2368 raise error.ProgrammingError(b'transaction requires locking')
2372 tr = self.currenttransaction()
2369 tr = self.currenttransaction()
2373 if tr is not None:
2370 if tr is not None:
2374 return tr.nest(name=desc)
2371 return tr.nest(name=desc)
2375
2372
2376 # abort here if the journal already exists
2373 # abort here if the journal already exists
2377 if self.svfs.exists(b"journal"):
2374 if self.svfs.exists(b"journal"):
2378 raise error.RepoError(
2375 raise error.RepoError(
2379 _(b"abandoned transaction found"),
2376 _(b"abandoned transaction found"),
2380 hint=_(b"run 'hg recover' to clean up transaction"),
2377 hint=_(b"run 'hg recover' to clean up transaction"),
2381 )
2378 )
2382
2379
2383 idbase = b"%.40f#%f" % (random.random(), time.time())
2380 idbase = b"%.40f#%f" % (random.random(), time.time())
2384 ha = hex(hashutil.sha1(idbase).digest())
2381 ha = hex(hashutil.sha1(idbase).digest())
2385 txnid = b'TXN:' + ha
2382 txnid = b'TXN:' + ha
2386 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2383 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2387
2384
2388 self._writejournal(desc)
2385 self._writejournal(desc)
2389 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2386 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2390 if report:
2387 if report:
2391 rp = report
2388 rp = report
2392 else:
2389 else:
2393 rp = self.ui.warn
2390 rp = self.ui.warn
2394 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2391 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2395 # we must avoid cyclic reference between repo and transaction.
2392 # we must avoid cyclic reference between repo and transaction.
2396 reporef = weakref.ref(self)
2393 reporef = weakref.ref(self)
2397 # Code to track tag movement
2394 # Code to track tag movement
2398 #
2395 #
2399 # Since tags are all handled as file content, it is actually quite hard
2396 # Since tags are all handled as file content, it is actually quite hard
2400 # to track these movement from a code perspective. So we fallback to a
2397 # to track these movement from a code perspective. So we fallback to a
2401 # tracking at the repository level. One could envision to track changes
2398 # tracking at the repository level. One could envision to track changes
2402 # to the '.hgtags' file through changegroup apply but that fails to
2399 # to the '.hgtags' file through changegroup apply but that fails to
2403 # cope with case where transaction expose new heads without changegroup
2400 # cope with case where transaction expose new heads without changegroup
2404 # being involved (eg: phase movement).
2401 # being involved (eg: phase movement).
2405 #
2402 #
2406 # For now, We gate the feature behind a flag since this likely comes
2403 # For now, We gate the feature behind a flag since this likely comes
2407 # with performance impacts. The current code run more often than needed
2404 # with performance impacts. The current code run more often than needed
2408 # and do not use caches as much as it could. The current focus is on
2405 # and do not use caches as much as it could. The current focus is on
2409 # the behavior of the feature so we disable it by default. The flag
2406 # the behavior of the feature so we disable it by default. The flag
2410 # will be removed when we are happy with the performance impact.
2407 # will be removed when we are happy with the performance impact.
2411 #
2408 #
2412 # Once this feature is no longer experimental move the following
2409 # Once this feature is no longer experimental move the following
2413 # documentation to the appropriate help section:
2410 # documentation to the appropriate help section:
2414 #
2411 #
2415 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2412 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2416 # tags (new or changed or deleted tags). In addition the details of
2413 # tags (new or changed or deleted tags). In addition the details of
2417 # these changes are made available in a file at:
2414 # these changes are made available in a file at:
2418 # ``REPOROOT/.hg/changes/tags.changes``.
2415 # ``REPOROOT/.hg/changes/tags.changes``.
2419 # Make sure you check for HG_TAG_MOVED before reading that file as it
2416 # Make sure you check for HG_TAG_MOVED before reading that file as it
2420 # might exist from a previous transaction even if no tag were touched
2417 # might exist from a previous transaction even if no tag were touched
2421 # in this one. Changes are recorded in a line base format::
2418 # in this one. Changes are recorded in a line base format::
2422 #
2419 #
2423 # <action> <hex-node> <tag-name>\n
2420 # <action> <hex-node> <tag-name>\n
2424 #
2421 #
2425 # Actions are defined as follow:
2422 # Actions are defined as follow:
2426 # "-R": tag is removed,
2423 # "-R": tag is removed,
2427 # "+A": tag is added,
2424 # "+A": tag is added,
2428 # "-M": tag is moved (old value),
2425 # "-M": tag is moved (old value),
2429 # "+M": tag is moved (new value),
2426 # "+M": tag is moved (new value),
2430 tracktags = lambda x: None
2427 tracktags = lambda x: None
2431 # experimental config: experimental.hook-track-tags
2428 # experimental config: experimental.hook-track-tags
2432 shouldtracktags = self.ui.configbool(
2429 shouldtracktags = self.ui.configbool(
2433 b'experimental', b'hook-track-tags'
2430 b'experimental', b'hook-track-tags'
2434 )
2431 )
2435 if desc != b'strip' and shouldtracktags:
2432 if desc != b'strip' and shouldtracktags:
2436 oldheads = self.changelog.headrevs()
2433 oldheads = self.changelog.headrevs()
2437
2434
2438 def tracktags(tr2):
2435 def tracktags(tr2):
2439 repo = reporef()
2436 repo = reporef()
2440 assert repo is not None # help pytype
2437 assert repo is not None # help pytype
2441 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2438 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2442 newheads = repo.changelog.headrevs()
2439 newheads = repo.changelog.headrevs()
2443 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2440 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2444 # notes: we compare lists here.
2441 # notes: we compare lists here.
2445 # As we do it only once buiding set would not be cheaper
2442 # As we do it only once buiding set would not be cheaper
2446 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2443 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2447 if changes:
2444 if changes:
2448 tr2.hookargs[b'tag_moved'] = b'1'
2445 tr2.hookargs[b'tag_moved'] = b'1'
2449 with repo.vfs(
2446 with repo.vfs(
2450 b'changes/tags.changes', b'w', atomictemp=True
2447 b'changes/tags.changes', b'w', atomictemp=True
2451 ) as changesfile:
2448 ) as changesfile:
2452 # note: we do not register the file to the transaction
2449 # note: we do not register the file to the transaction
2453 # because we needs it to still exist on the transaction
2450 # because we needs it to still exist on the transaction
2454 # is close (for txnclose hooks)
2451 # is close (for txnclose hooks)
2455 tagsmod.writediff(changesfile, changes)
2452 tagsmod.writediff(changesfile, changes)
2456
2453
2457 def validate(tr2):
2454 def validate(tr2):
2458 """will run pre-closing hooks"""
2455 """will run pre-closing hooks"""
2459 # XXX the transaction API is a bit lacking here so we take a hacky
2456 # XXX the transaction API is a bit lacking here so we take a hacky
2460 # path for now
2457 # path for now
2461 #
2458 #
2462 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2459 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2463 # dict is copied before these run. In addition we needs the data
2460 # dict is copied before these run. In addition we needs the data
2464 # available to in memory hooks too.
2461 # available to in memory hooks too.
2465 #
2462 #
2466 # Moreover, we also need to make sure this runs before txnclose
2463 # Moreover, we also need to make sure this runs before txnclose
2467 # hooks and there is no "pending" mechanism that would execute
2464 # hooks and there is no "pending" mechanism that would execute
2468 # logic only if hooks are about to run.
2465 # logic only if hooks are about to run.
2469 #
2466 #
2470 # Fixing this limitation of the transaction is also needed to track
2467 # Fixing this limitation of the transaction is also needed to track
2471 # other families of changes (bookmarks, phases, obsolescence).
2468 # other families of changes (bookmarks, phases, obsolescence).
2472 #
2469 #
2473 # This will have to be fixed before we remove the experimental
2470 # This will have to be fixed before we remove the experimental
2474 # gating.
2471 # gating.
2475 tracktags(tr2)
2472 tracktags(tr2)
2476 repo = reporef()
2473 repo = reporef()
2477 assert repo is not None # help pytype
2474 assert repo is not None # help pytype
2478
2475
2479 singleheadopt = (b'experimental', b'single-head-per-branch')
2476 singleheadopt = (b'experimental', b'single-head-per-branch')
2480 singlehead = repo.ui.configbool(*singleheadopt)
2477 singlehead = repo.ui.configbool(*singleheadopt)
2481 if singlehead:
2478 if singlehead:
2482 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2479 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2483 accountclosed = singleheadsub.get(
2480 accountclosed = singleheadsub.get(
2484 b"account-closed-heads", False
2481 b"account-closed-heads", False
2485 )
2482 )
2486 if singleheadsub.get(b"public-changes-only", False):
2483 if singleheadsub.get(b"public-changes-only", False):
2487 filtername = b"immutable"
2484 filtername = b"immutable"
2488 else:
2485 else:
2489 filtername = b"visible"
2486 filtername = b"visible"
2490 scmutil.enforcesinglehead(
2487 scmutil.enforcesinglehead(
2491 repo, tr2, desc, accountclosed, filtername
2488 repo, tr2, desc, accountclosed, filtername
2492 )
2489 )
2493 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2490 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2494 for name, (old, new) in sorted(
2491 for name, (old, new) in sorted(
2495 tr.changes[b'bookmarks'].items()
2492 tr.changes[b'bookmarks'].items()
2496 ):
2493 ):
2497 args = tr.hookargs.copy()
2494 args = tr.hookargs.copy()
2498 args.update(bookmarks.preparehookargs(name, old, new))
2495 args.update(bookmarks.preparehookargs(name, old, new))
2499 repo.hook(
2496 repo.hook(
2500 b'pretxnclose-bookmark',
2497 b'pretxnclose-bookmark',
2501 throw=True,
2498 throw=True,
2502 **pycompat.strkwargs(args)
2499 **pycompat.strkwargs(args)
2503 )
2500 )
2504 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2501 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2505 cl = repo.unfiltered().changelog
2502 cl = repo.unfiltered().changelog
2506 for revs, (old, new) in tr.changes[b'phases']:
2503 for revs, (old, new) in tr.changes[b'phases']:
2507 for rev in revs:
2504 for rev in revs:
2508 args = tr.hookargs.copy()
2505 args = tr.hookargs.copy()
2509 node = hex(cl.node(rev))
2506 node = hex(cl.node(rev))
2510 args.update(phases.preparehookargs(node, old, new))
2507 args.update(phases.preparehookargs(node, old, new))
2511 repo.hook(
2508 repo.hook(
2512 b'pretxnclose-phase',
2509 b'pretxnclose-phase',
2513 throw=True,
2510 throw=True,
2514 **pycompat.strkwargs(args)
2511 **pycompat.strkwargs(args)
2515 )
2512 )
2516
2513
2517 repo.hook(
2514 repo.hook(
2518 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2515 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2519 )
2516 )
2520
2517
2521 def releasefn(tr, success):
2518 def releasefn(tr, success):
2522 repo = reporef()
2519 repo = reporef()
2523 if repo is None:
2520 if repo is None:
2524 # If the repo has been GC'd (and this release function is being
2521 # If the repo has been GC'd (and this release function is being
2525 # called from transaction.__del__), there's not much we can do,
2522 # called from transaction.__del__), there's not much we can do,
2526 # so just leave the unfinished transaction there and let the
2523 # so just leave the unfinished transaction there and let the
2527 # user run `hg recover`.
2524 # user run `hg recover`.
2528 return
2525 return
2529 if success:
2526 if success:
2530 # this should be explicitly invoked here, because
2527 # this should be explicitly invoked here, because
2531 # in-memory changes aren't written out at closing
2528 # in-memory changes aren't written out at closing
2532 # transaction, if tr.addfilegenerator (via
2529 # transaction, if tr.addfilegenerator (via
2533 # dirstate.write or so) isn't invoked while
2530 # dirstate.write or so) isn't invoked while
2534 # transaction running
2531 # transaction running
2535 repo.dirstate.write(None)
2532 repo.dirstate.write(None)
2536 else:
2533 else:
2537 # discard all changes (including ones already written
2534 # discard all changes (including ones already written
2538 # out) in this transaction
2535 # out) in this transaction
2539 narrowspec.restorebackup(self, b'journal.narrowspec')
2536 narrowspec.restorebackup(self, b'journal.narrowspec')
2540 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2537 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2541 repo.dirstate.restorebackup(None, b'journal.dirstate')
2538 repo.dirstate.restorebackup(None, b'journal.dirstate')
2542
2539
2543 repo.invalidate(clearfilecache=True)
2540 repo.invalidate(clearfilecache=True)
2544
2541
2545 tr = transaction.transaction(
2542 tr = transaction.transaction(
2546 rp,
2543 rp,
2547 self.svfs,
2544 self.svfs,
2548 vfsmap,
2545 vfsmap,
2549 b"journal",
2546 b"journal",
2550 b"undo",
2547 b"undo",
2551 aftertrans(renames),
2548 aftertrans(renames),
2552 self.store.createmode,
2549 self.store.createmode,
2553 validator=validate,
2550 validator=validate,
2554 releasefn=releasefn,
2551 releasefn=releasefn,
2555 checkambigfiles=_cachedfiles,
2552 checkambigfiles=_cachedfiles,
2556 name=desc,
2553 name=desc,
2557 )
2554 )
2558 tr.changes[b'origrepolen'] = len(self)
2555 tr.changes[b'origrepolen'] = len(self)
2559 tr.changes[b'obsmarkers'] = set()
2556 tr.changes[b'obsmarkers'] = set()
2560 tr.changes[b'phases'] = []
2557 tr.changes[b'phases'] = []
2561 tr.changes[b'bookmarks'] = {}
2558 tr.changes[b'bookmarks'] = {}
2562
2559
2563 tr.hookargs[b'txnid'] = txnid
2560 tr.hookargs[b'txnid'] = txnid
2564 tr.hookargs[b'txnname'] = desc
2561 tr.hookargs[b'txnname'] = desc
2565 tr.hookargs[b'changes'] = tr.changes
2562 tr.hookargs[b'changes'] = tr.changes
2566 # note: writing the fncache only during finalize mean that the file is
2563 # note: writing the fncache only during finalize mean that the file is
2567 # outdated when running hooks. As fncache is used for streaming clone,
2564 # outdated when running hooks. As fncache is used for streaming clone,
2568 # this is not expected to break anything that happen during the hooks.
2565 # this is not expected to break anything that happen during the hooks.
2569 tr.addfinalize(b'flush-fncache', self.store.write)
2566 tr.addfinalize(b'flush-fncache', self.store.write)
2570
2567
2571 def txnclosehook(tr2):
2568 def txnclosehook(tr2):
2572 """To be run if transaction is successful, will schedule a hook run"""
2569 """To be run if transaction is successful, will schedule a hook run"""
2573 # Don't reference tr2 in hook() so we don't hold a reference.
2570 # Don't reference tr2 in hook() so we don't hold a reference.
2574 # This reduces memory consumption when there are multiple
2571 # This reduces memory consumption when there are multiple
2575 # transactions per lock. This can likely go away if issue5045
2572 # transactions per lock. This can likely go away if issue5045
2576 # fixes the function accumulation.
2573 # fixes the function accumulation.
2577 hookargs = tr2.hookargs
2574 hookargs = tr2.hookargs
2578
2575
2579 def hookfunc(unused_success):
2576 def hookfunc(unused_success):
2580 repo = reporef()
2577 repo = reporef()
2581 assert repo is not None # help pytype
2578 assert repo is not None # help pytype
2582
2579
2583 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2580 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2584 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2581 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2585 for name, (old, new) in bmchanges:
2582 for name, (old, new) in bmchanges:
2586 args = tr.hookargs.copy()
2583 args = tr.hookargs.copy()
2587 args.update(bookmarks.preparehookargs(name, old, new))
2584 args.update(bookmarks.preparehookargs(name, old, new))
2588 repo.hook(
2585 repo.hook(
2589 b'txnclose-bookmark',
2586 b'txnclose-bookmark',
2590 throw=False,
2587 throw=False,
2591 **pycompat.strkwargs(args)
2588 **pycompat.strkwargs(args)
2592 )
2589 )
2593
2590
2594 if hook.hashook(repo.ui, b'txnclose-phase'):
2591 if hook.hashook(repo.ui, b'txnclose-phase'):
2595 cl = repo.unfiltered().changelog
2592 cl = repo.unfiltered().changelog
2596 phasemv = sorted(
2593 phasemv = sorted(
2597 tr.changes[b'phases'], key=lambda r: r[0][0]
2594 tr.changes[b'phases'], key=lambda r: r[0][0]
2598 )
2595 )
2599 for revs, (old, new) in phasemv:
2596 for revs, (old, new) in phasemv:
2600 for rev in revs:
2597 for rev in revs:
2601 args = tr.hookargs.copy()
2598 args = tr.hookargs.copy()
2602 node = hex(cl.node(rev))
2599 node = hex(cl.node(rev))
2603 args.update(phases.preparehookargs(node, old, new))
2600 args.update(phases.preparehookargs(node, old, new))
2604 repo.hook(
2601 repo.hook(
2605 b'txnclose-phase',
2602 b'txnclose-phase',
2606 throw=False,
2603 throw=False,
2607 **pycompat.strkwargs(args)
2604 **pycompat.strkwargs(args)
2608 )
2605 )
2609
2606
2610 repo.hook(
2607 repo.hook(
2611 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2608 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2612 )
2609 )
2613
2610
2614 repo = reporef()
2611 repo = reporef()
2615 assert repo is not None # help pytype
2612 assert repo is not None # help pytype
2616 repo._afterlock(hookfunc)
2613 repo._afterlock(hookfunc)
2617
2614
2618 tr.addfinalize(b'txnclose-hook', txnclosehook)
2615 tr.addfinalize(b'txnclose-hook', txnclosehook)
2619 # Include a leading "-" to make it happen before the transaction summary
2616 # Include a leading "-" to make it happen before the transaction summary
2620 # reports registered via scmutil.registersummarycallback() whose names
2617 # reports registered via scmutil.registersummarycallback() whose names
2621 # are 00-txnreport etc. That way, the caches will be warm when the
2618 # are 00-txnreport etc. That way, the caches will be warm when the
2622 # callbacks run.
2619 # callbacks run.
2623 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2620 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2624
2621
2625 def txnaborthook(tr2):
2622 def txnaborthook(tr2):
2626 """To be run if transaction is aborted"""
2623 """To be run if transaction is aborted"""
2627 repo = reporef()
2624 repo = reporef()
2628 assert repo is not None # help pytype
2625 assert repo is not None # help pytype
2629 repo.hook(
2626 repo.hook(
2630 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2627 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2631 )
2628 )
2632
2629
2633 tr.addabort(b'txnabort-hook', txnaborthook)
2630 tr.addabort(b'txnabort-hook', txnaborthook)
2634 # avoid eager cache invalidation. in-memory data should be identical
2631 # avoid eager cache invalidation. in-memory data should be identical
2635 # to stored data if transaction has no error.
2632 # to stored data if transaction has no error.
2636 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2633 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2637 self._transref = weakref.ref(tr)
2634 self._transref = weakref.ref(tr)
2638 scmutil.registersummarycallback(self, tr, desc)
2635 scmutil.registersummarycallback(self, tr, desc)
2639 return tr
2636 return tr
2640
2637
2641 def _journalfiles(self):
2638 def _journalfiles(self):
2642 first = (
2639 first = (
2643 (self.svfs, b'journal'),
2640 (self.svfs, b'journal'),
2644 (self.svfs, b'journal.narrowspec'),
2641 (self.svfs, b'journal.narrowspec'),
2645 (self.vfs, b'journal.narrowspec.dirstate'),
2642 (self.vfs, b'journal.narrowspec.dirstate'),
2646 (self.vfs, b'journal.dirstate'),
2643 (self.vfs, b'journal.dirstate'),
2647 )
2644 )
2648 middle = []
2645 middle = []
2649 dirstate_data = self.dirstate.data_backup_filename(b'journal.dirstate')
2646 dirstate_data = self.dirstate.data_backup_filename(b'journal.dirstate')
2650 if dirstate_data is not None:
2647 if dirstate_data is not None:
2651 middle.append((self.vfs, dirstate_data))
2648 middle.append((self.vfs, dirstate_data))
2652 end = (
2649 end = (
2653 (self.vfs, b'journal.branch'),
2650 (self.vfs, b'journal.branch'),
2654 (self.vfs, b'journal.desc'),
2651 (self.vfs, b'journal.desc'),
2655 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2652 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2656 (self.svfs, b'journal.phaseroots'),
2653 (self.svfs, b'journal.phaseroots'),
2657 )
2654 )
2658 return first + tuple(middle) + end
2655 return first + tuple(middle) + end
2659
2656
2660 def undofiles(self):
2657 def undofiles(self):
2661 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2658 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2662
2659
2663 @unfilteredmethod
2660 @unfilteredmethod
2664 def _writejournal(self, desc):
2661 def _writejournal(self, desc):
2665 self.dirstate.savebackup(None, b'journal.dirstate')
2662 self.dirstate.savebackup(None, b'journal.dirstate')
2666 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2663 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2667 narrowspec.savebackup(self, b'journal.narrowspec')
2664 narrowspec.savebackup(self, b'journal.narrowspec')
2668 self.vfs.write(
2665 self.vfs.write(
2669 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2666 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2670 )
2667 )
2671 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2668 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2672 bookmarksvfs = bookmarks.bookmarksvfs(self)
2669 bookmarksvfs = bookmarks.bookmarksvfs(self)
2673 bookmarksvfs.write(
2670 bookmarksvfs.write(
2674 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2671 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2675 )
2672 )
2676 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2673 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2677
2674
2678 def recover(self):
2675 def recover(self):
2679 with self.lock():
2676 with self.lock():
2680 if self.svfs.exists(b"journal"):
2677 if self.svfs.exists(b"journal"):
2681 self.ui.status(_(b"rolling back interrupted transaction\n"))
2678 self.ui.status(_(b"rolling back interrupted transaction\n"))
2682 vfsmap = {
2679 vfsmap = {
2683 b'': self.svfs,
2680 b'': self.svfs,
2684 b'plain': self.vfs,
2681 b'plain': self.vfs,
2685 }
2682 }
2686 transaction.rollback(
2683 transaction.rollback(
2687 self.svfs,
2684 self.svfs,
2688 vfsmap,
2685 vfsmap,
2689 b"journal",
2686 b"journal",
2690 self.ui.warn,
2687 self.ui.warn,
2691 checkambigfiles=_cachedfiles,
2688 checkambigfiles=_cachedfiles,
2692 )
2689 )
2693 self.invalidate()
2690 self.invalidate()
2694 return True
2691 return True
2695 else:
2692 else:
2696 self.ui.warn(_(b"no interrupted transaction available\n"))
2693 self.ui.warn(_(b"no interrupted transaction available\n"))
2697 return False
2694 return False
2698
2695
2699 def rollback(self, dryrun=False, force=False):
2696 def rollback(self, dryrun=False, force=False):
2700 wlock = lock = dsguard = None
2697 wlock = lock = dsguard = None
2701 try:
2698 try:
2702 wlock = self.wlock()
2699 wlock = self.wlock()
2703 lock = self.lock()
2700 lock = self.lock()
2704 if self.svfs.exists(b"undo"):
2701 if self.svfs.exists(b"undo"):
2705 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2702 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2706
2703
2707 return self._rollback(dryrun, force, dsguard)
2704 return self._rollback(dryrun, force, dsguard)
2708 else:
2705 else:
2709 self.ui.warn(_(b"no rollback information available\n"))
2706 self.ui.warn(_(b"no rollback information available\n"))
2710 return 1
2707 return 1
2711 finally:
2708 finally:
2712 release(dsguard, lock, wlock)
2709 release(dsguard, lock, wlock)
2713
2710
2714 @unfilteredmethod # Until we get smarter cache management
2711 @unfilteredmethod # Until we get smarter cache management
2715 def _rollback(self, dryrun, force, dsguard):
2712 def _rollback(self, dryrun, force, dsguard):
2716 ui = self.ui
2713 ui = self.ui
2717 try:
2714 try:
2718 args = self.vfs.read(b'undo.desc').splitlines()
2715 args = self.vfs.read(b'undo.desc').splitlines()
2719 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2716 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2720 if len(args) >= 3:
2717 if len(args) >= 3:
2721 detail = args[2]
2718 detail = args[2]
2722 oldtip = oldlen - 1
2719 oldtip = oldlen - 1
2723
2720
2724 if detail and ui.verbose:
2721 if detail and ui.verbose:
2725 msg = _(
2722 msg = _(
2726 b'repository tip rolled back to revision %d'
2723 b'repository tip rolled back to revision %d'
2727 b' (undo %s: %s)\n'
2724 b' (undo %s: %s)\n'
2728 ) % (oldtip, desc, detail)
2725 ) % (oldtip, desc, detail)
2729 else:
2726 else:
2730 msg = _(
2727 msg = _(
2731 b'repository tip rolled back to revision %d (undo %s)\n'
2728 b'repository tip rolled back to revision %d (undo %s)\n'
2732 ) % (oldtip, desc)
2729 ) % (oldtip, desc)
2733 except IOError:
2730 except IOError:
2734 msg = _(b'rolling back unknown transaction\n')
2731 msg = _(b'rolling back unknown transaction\n')
2735 desc = None
2732 desc = None
2736
2733
2737 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2734 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2738 raise error.Abort(
2735 raise error.Abort(
2739 _(
2736 _(
2740 b'rollback of last commit while not checked out '
2737 b'rollback of last commit while not checked out '
2741 b'may lose data'
2738 b'may lose data'
2742 ),
2739 ),
2743 hint=_(b'use -f to force'),
2740 hint=_(b'use -f to force'),
2744 )
2741 )
2745
2742
2746 ui.status(msg)
2743 ui.status(msg)
2747 if dryrun:
2744 if dryrun:
2748 return 0
2745 return 0
2749
2746
2750 parents = self.dirstate.parents()
2747 parents = self.dirstate.parents()
2751 self.destroying()
2748 self.destroying()
2752 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2749 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2753 transaction.rollback(
2750 transaction.rollback(
2754 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2751 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2755 )
2752 )
2756 bookmarksvfs = bookmarks.bookmarksvfs(self)
2753 bookmarksvfs = bookmarks.bookmarksvfs(self)
2757 if bookmarksvfs.exists(b'undo.bookmarks'):
2754 if bookmarksvfs.exists(b'undo.bookmarks'):
2758 bookmarksvfs.rename(
2755 bookmarksvfs.rename(
2759 b'undo.bookmarks', b'bookmarks', checkambig=True
2756 b'undo.bookmarks', b'bookmarks', checkambig=True
2760 )
2757 )
2761 if self.svfs.exists(b'undo.phaseroots'):
2758 if self.svfs.exists(b'undo.phaseroots'):
2762 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2759 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2763 self.invalidate()
2760 self.invalidate()
2764
2761
2765 has_node = self.changelog.index.has_node
2762 has_node = self.changelog.index.has_node
2766 parentgone = any(not has_node(p) for p in parents)
2763 parentgone = any(not has_node(p) for p in parents)
2767 if parentgone:
2764 if parentgone:
2768 # prevent dirstateguard from overwriting already restored one
2765 # prevent dirstateguard from overwriting already restored one
2769 dsguard.close()
2766 dsguard.close()
2770
2767
2771 narrowspec.restorebackup(self, b'undo.narrowspec')
2768 narrowspec.restorebackup(self, b'undo.narrowspec')
2772 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2769 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2773 self.dirstate.restorebackup(None, b'undo.dirstate')
2770 self.dirstate.restorebackup(None, b'undo.dirstate')
2774 try:
2771 try:
2775 branch = self.vfs.read(b'undo.branch')
2772 branch = self.vfs.read(b'undo.branch')
2776 self.dirstate.setbranch(encoding.tolocal(branch))
2773 self.dirstate.setbranch(encoding.tolocal(branch))
2777 except IOError:
2774 except IOError:
2778 ui.warn(
2775 ui.warn(
2779 _(
2776 _(
2780 b'named branch could not be reset: '
2777 b'named branch could not be reset: '
2781 b'current branch is still \'%s\'\n'
2778 b'current branch is still \'%s\'\n'
2782 )
2779 )
2783 % self.dirstate.branch()
2780 % self.dirstate.branch()
2784 )
2781 )
2785
2782
2786 parents = tuple([p.rev() for p in self[None].parents()])
2783 parents = tuple([p.rev() for p in self[None].parents()])
2787 if len(parents) > 1:
2784 if len(parents) > 1:
2788 ui.status(
2785 ui.status(
2789 _(
2786 _(
2790 b'working directory now based on '
2787 b'working directory now based on '
2791 b'revisions %d and %d\n'
2788 b'revisions %d and %d\n'
2792 )
2789 )
2793 % parents
2790 % parents
2794 )
2791 )
2795 else:
2792 else:
2796 ui.status(
2793 ui.status(
2797 _(b'working directory now based on revision %d\n') % parents
2794 _(b'working directory now based on revision %d\n') % parents
2798 )
2795 )
2799 mergestatemod.mergestate.clean(self)
2796 mergestatemod.mergestate.clean(self)
2800
2797
2801 # TODO: if we know which new heads may result from this rollback, pass
2798 # TODO: if we know which new heads may result from this rollback, pass
2802 # them to destroy(), which will prevent the branchhead cache from being
2799 # them to destroy(), which will prevent the branchhead cache from being
2803 # invalidated.
2800 # invalidated.
2804 self.destroyed()
2801 self.destroyed()
2805 return 0
2802 return 0
2806
2803
2807 def _buildcacheupdater(self, newtransaction):
2804 def _buildcacheupdater(self, newtransaction):
2808 """called during transaction to build the callback updating cache
2805 """called during transaction to build the callback updating cache
2809
2806
2810 Lives on the repository to help extension who might want to augment
2807 Lives on the repository to help extension who might want to augment
2811 this logic. For this purpose, the created transaction is passed to the
2808 this logic. For this purpose, the created transaction is passed to the
2812 method.
2809 method.
2813 """
2810 """
2814 # we must avoid cyclic reference between repo and transaction.
2811 # we must avoid cyclic reference between repo and transaction.
2815 reporef = weakref.ref(self)
2812 reporef = weakref.ref(self)
2816
2813
2817 def updater(tr):
2814 def updater(tr):
2818 repo = reporef()
2815 repo = reporef()
2819 assert repo is not None # help pytype
2816 assert repo is not None # help pytype
2820 repo.updatecaches(tr)
2817 repo.updatecaches(tr)
2821
2818
2822 return updater
2819 return updater
2823
2820
2824 @unfilteredmethod
2821 @unfilteredmethod
2825 def updatecaches(self, tr=None, full=False, caches=None):
2822 def updatecaches(self, tr=None, full=False, caches=None):
2826 """warm appropriate caches
2823 """warm appropriate caches
2827
2824
2828 If this function is called after a transaction closed. The transaction
2825 If this function is called after a transaction closed. The transaction
2829 will be available in the 'tr' argument. This can be used to selectively
2826 will be available in the 'tr' argument. This can be used to selectively
2830 update caches relevant to the changes in that transaction.
2827 update caches relevant to the changes in that transaction.
2831
2828
2832 If 'full' is set, make sure all caches the function knows about have
2829 If 'full' is set, make sure all caches the function knows about have
2833 up-to-date data. Even the ones usually loaded more lazily.
2830 up-to-date data. Even the ones usually loaded more lazily.
2834
2831
2835 The `full` argument can take a special "post-clone" value. In this case
2832 The `full` argument can take a special "post-clone" value. In this case
2836 the cache warming is made after a clone and of the slower cache might
2833 the cache warming is made after a clone and of the slower cache might
2837 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2834 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2838 as we plan for a cleaner way to deal with this for 5.9.
2835 as we plan for a cleaner way to deal with this for 5.9.
2839 """
2836 """
2840 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2837 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2841 # During strip, many caches are invalid but
2838 # During strip, many caches are invalid but
2842 # later call to `destroyed` will refresh them.
2839 # later call to `destroyed` will refresh them.
2843 return
2840 return
2844
2841
2845 unfi = self.unfiltered()
2842 unfi = self.unfiltered()
2846
2843
2847 if full:
2844 if full:
2848 msg = (
2845 msg = (
2849 "`full` argument for `repo.updatecaches` is deprecated\n"
2846 "`full` argument for `repo.updatecaches` is deprecated\n"
2850 "(use `caches=repository.CACHE_ALL` instead)"
2847 "(use `caches=repository.CACHE_ALL` instead)"
2851 )
2848 )
2852 self.ui.deprecwarn(msg, b"5.9")
2849 self.ui.deprecwarn(msg, b"5.9")
2853 caches = repository.CACHES_ALL
2850 caches = repository.CACHES_ALL
2854 if full == b"post-clone":
2851 if full == b"post-clone":
2855 caches = repository.CACHES_POST_CLONE
2852 caches = repository.CACHES_POST_CLONE
2856 caches = repository.CACHES_ALL
2853 caches = repository.CACHES_ALL
2857 elif caches is None:
2854 elif caches is None:
2858 caches = repository.CACHES_DEFAULT
2855 caches = repository.CACHES_DEFAULT
2859
2856
2860 if repository.CACHE_BRANCHMAP_SERVED in caches:
2857 if repository.CACHE_BRANCHMAP_SERVED in caches:
2861 if tr is None or tr.changes[b'origrepolen'] < len(self):
2858 if tr is None or tr.changes[b'origrepolen'] < len(self):
2862 # accessing the 'served' branchmap should refresh all the others,
2859 # accessing the 'served' branchmap should refresh all the others,
2863 self.ui.debug(b'updating the branch cache\n')
2860 self.ui.debug(b'updating the branch cache\n')
2864 self.filtered(b'served').branchmap()
2861 self.filtered(b'served').branchmap()
2865 self.filtered(b'served.hidden').branchmap()
2862 self.filtered(b'served.hidden').branchmap()
2866 # flush all possibly delayed write.
2863 # flush all possibly delayed write.
2867 self._branchcaches.write_delayed(self)
2864 self._branchcaches.write_delayed(self)
2868
2865
2869 if repository.CACHE_CHANGELOG_CACHE in caches:
2866 if repository.CACHE_CHANGELOG_CACHE in caches:
2870 self.changelog.update_caches(transaction=tr)
2867 self.changelog.update_caches(transaction=tr)
2871
2868
2872 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2869 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2873 self.manifestlog.update_caches(transaction=tr)
2870 self.manifestlog.update_caches(transaction=tr)
2874
2871
2875 if repository.CACHE_REV_BRANCH in caches:
2872 if repository.CACHE_REV_BRANCH in caches:
2876 rbc = unfi.revbranchcache()
2873 rbc = unfi.revbranchcache()
2877 for r in unfi.changelog:
2874 for r in unfi.changelog:
2878 rbc.branchinfo(r)
2875 rbc.branchinfo(r)
2879 rbc.write()
2876 rbc.write()
2880
2877
2881 if repository.CACHE_FULL_MANIFEST in caches:
2878 if repository.CACHE_FULL_MANIFEST in caches:
2882 # ensure the working copy parents are in the manifestfulltextcache
2879 # ensure the working copy parents are in the manifestfulltextcache
2883 for ctx in self[b'.'].parents():
2880 for ctx in self[b'.'].parents():
2884 ctx.manifest() # accessing the manifest is enough
2881 ctx.manifest() # accessing the manifest is enough
2885
2882
2886 if repository.CACHE_FILE_NODE_TAGS in caches:
2883 if repository.CACHE_FILE_NODE_TAGS in caches:
2887 # accessing fnode cache warms the cache
2884 # accessing fnode cache warms the cache
2888 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2885 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2889
2886
2890 if repository.CACHE_TAGS_DEFAULT in caches:
2887 if repository.CACHE_TAGS_DEFAULT in caches:
2891 # accessing tags warm the cache
2888 # accessing tags warm the cache
2892 self.tags()
2889 self.tags()
2893 if repository.CACHE_TAGS_SERVED in caches:
2890 if repository.CACHE_TAGS_SERVED in caches:
2894 self.filtered(b'served').tags()
2891 self.filtered(b'served').tags()
2895
2892
2896 if repository.CACHE_BRANCHMAP_ALL in caches:
2893 if repository.CACHE_BRANCHMAP_ALL in caches:
2897 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2894 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2898 # so we're forcing a write to cause these caches to be warmed up
2895 # so we're forcing a write to cause these caches to be warmed up
2899 # even if they haven't explicitly been requested yet (if they've
2896 # even if they haven't explicitly been requested yet (if they've
2900 # never been used by hg, they won't ever have been written, even if
2897 # never been used by hg, they won't ever have been written, even if
2901 # they're a subset of another kind of cache that *has* been used).
2898 # they're a subset of another kind of cache that *has* been used).
2902 for filt in repoview.filtertable.keys():
2899 for filt in repoview.filtertable.keys():
2903 filtered = self.filtered(filt)
2900 filtered = self.filtered(filt)
2904 filtered.branchmap().write(filtered)
2901 filtered.branchmap().write(filtered)
2905
2902
2906 def invalidatecaches(self):
2903 def invalidatecaches(self):
2907
2904
2908 if '_tagscache' in vars(self):
2905 if '_tagscache' in vars(self):
2909 # can't use delattr on proxy
2906 # can't use delattr on proxy
2910 del self.__dict__['_tagscache']
2907 del self.__dict__['_tagscache']
2911
2908
2912 self._branchcaches.clear()
2909 self._branchcaches.clear()
2913 self.invalidatevolatilesets()
2910 self.invalidatevolatilesets()
2914 self._sparsesignaturecache.clear()
2911 self._sparsesignaturecache.clear()
2915
2912
2916 def invalidatevolatilesets(self):
2913 def invalidatevolatilesets(self):
2917 self.filteredrevcache.clear()
2914 self.filteredrevcache.clear()
2918 obsolete.clearobscaches(self)
2915 obsolete.clearobscaches(self)
2919 self._quick_access_changeid_invalidate()
2916 self._quick_access_changeid_invalidate()
2920
2917
2921 def invalidatedirstate(self):
2918 def invalidatedirstate(self):
2922 """Invalidates the dirstate, causing the next call to dirstate
2919 """Invalidates the dirstate, causing the next call to dirstate
2923 to check if it was modified since the last time it was read,
2920 to check if it was modified since the last time it was read,
2924 rereading it if it has.
2921 rereading it if it has.
2925
2922
2926 This is different to dirstate.invalidate() that it doesn't always
2923 This is different to dirstate.invalidate() that it doesn't always
2927 rereads the dirstate. Use dirstate.invalidate() if you want to
2924 rereads the dirstate. Use dirstate.invalidate() if you want to
2928 explicitly read the dirstate again (i.e. restoring it to a previous
2925 explicitly read the dirstate again (i.e. restoring it to a previous
2929 known good state)."""
2926 known good state)."""
2930 if hasunfilteredcache(self, 'dirstate'):
2927 if hasunfilteredcache(self, 'dirstate'):
2931 for k in self.dirstate._filecache:
2928 for k in self.dirstate._filecache:
2932 try:
2929 try:
2933 delattr(self.dirstate, k)
2930 delattr(self.dirstate, k)
2934 except AttributeError:
2931 except AttributeError:
2935 pass
2932 pass
2936 delattr(self.unfiltered(), 'dirstate')
2933 delattr(self.unfiltered(), 'dirstate')
2937
2934
2938 def invalidate(self, clearfilecache=False):
2935 def invalidate(self, clearfilecache=False):
2939 """Invalidates both store and non-store parts other than dirstate
2936 """Invalidates both store and non-store parts other than dirstate
2940
2937
2941 If a transaction is running, invalidation of store is omitted,
2938 If a transaction is running, invalidation of store is omitted,
2942 because discarding in-memory changes might cause inconsistency
2939 because discarding in-memory changes might cause inconsistency
2943 (e.g. incomplete fncache causes unintentional failure, but
2940 (e.g. incomplete fncache causes unintentional failure, but
2944 redundant one doesn't).
2941 redundant one doesn't).
2945 """
2942 """
2946 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2943 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2947 for k in list(self._filecache.keys()):
2944 for k in list(self._filecache.keys()):
2948 # dirstate is invalidated separately in invalidatedirstate()
2945 # dirstate is invalidated separately in invalidatedirstate()
2949 if k == b'dirstate':
2946 if k == b'dirstate':
2950 continue
2947 continue
2951 if (
2948 if (
2952 k == b'changelog'
2949 k == b'changelog'
2953 and self.currenttransaction()
2950 and self.currenttransaction()
2954 and self.changelog._delayed
2951 and self.changelog._delayed
2955 ):
2952 ):
2956 # The changelog object may store unwritten revisions. We don't
2953 # The changelog object may store unwritten revisions. We don't
2957 # want to lose them.
2954 # want to lose them.
2958 # TODO: Solve the problem instead of working around it.
2955 # TODO: Solve the problem instead of working around it.
2959 continue
2956 continue
2960
2957
2961 if clearfilecache:
2958 if clearfilecache:
2962 del self._filecache[k]
2959 del self._filecache[k]
2963 try:
2960 try:
2964 delattr(unfiltered, k)
2961 delattr(unfiltered, k)
2965 except AttributeError:
2962 except AttributeError:
2966 pass
2963 pass
2967 self.invalidatecaches()
2964 self.invalidatecaches()
2968 if not self.currenttransaction():
2965 if not self.currenttransaction():
2969 # TODO: Changing contents of store outside transaction
2966 # TODO: Changing contents of store outside transaction
2970 # causes inconsistency. We should make in-memory store
2967 # causes inconsistency. We should make in-memory store
2971 # changes detectable, and abort if changed.
2968 # changes detectable, and abort if changed.
2972 self.store.invalidatecaches()
2969 self.store.invalidatecaches()
2973
2970
2974 def invalidateall(self):
2971 def invalidateall(self):
2975 """Fully invalidates both store and non-store parts, causing the
2972 """Fully invalidates both store and non-store parts, causing the
2976 subsequent operation to reread any outside changes."""
2973 subsequent operation to reread any outside changes."""
2977 # extension should hook this to invalidate its caches
2974 # extension should hook this to invalidate its caches
2978 self.invalidate()
2975 self.invalidate()
2979 self.invalidatedirstate()
2976 self.invalidatedirstate()
2980
2977
2981 @unfilteredmethod
2978 @unfilteredmethod
2982 def _refreshfilecachestats(self, tr):
2979 def _refreshfilecachestats(self, tr):
2983 """Reload stats of cached files so that they are flagged as valid"""
2980 """Reload stats of cached files so that they are flagged as valid"""
2984 for k, ce in self._filecache.items():
2981 for k, ce in self._filecache.items():
2985 k = pycompat.sysstr(k)
2982 k = pycompat.sysstr(k)
2986 if k == 'dirstate' or k not in self.__dict__:
2983 if k == 'dirstate' or k not in self.__dict__:
2987 continue
2984 continue
2988 ce.refresh()
2985 ce.refresh()
2989
2986
2990 def _lock(
2987 def _lock(
2991 self,
2988 self,
2992 vfs,
2989 vfs,
2993 lockname,
2990 lockname,
2994 wait,
2991 wait,
2995 releasefn,
2992 releasefn,
2996 acquirefn,
2993 acquirefn,
2997 desc,
2994 desc,
2998 ):
2995 ):
2999 timeout = 0
2996 timeout = 0
3000 warntimeout = 0
2997 warntimeout = 0
3001 if wait:
2998 if wait:
3002 timeout = self.ui.configint(b"ui", b"timeout")
2999 timeout = self.ui.configint(b"ui", b"timeout")
3003 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
3000 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
3004 # internal config: ui.signal-safe-lock
3001 # internal config: ui.signal-safe-lock
3005 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
3002 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
3006
3003
3007 l = lockmod.trylock(
3004 l = lockmod.trylock(
3008 self.ui,
3005 self.ui,
3009 vfs,
3006 vfs,
3010 lockname,
3007 lockname,
3011 timeout,
3008 timeout,
3012 warntimeout,
3009 warntimeout,
3013 releasefn=releasefn,
3010 releasefn=releasefn,
3014 acquirefn=acquirefn,
3011 acquirefn=acquirefn,
3015 desc=desc,
3012 desc=desc,
3016 signalsafe=signalsafe,
3013 signalsafe=signalsafe,
3017 )
3014 )
3018 return l
3015 return l
3019
3016
3020 def _afterlock(self, callback):
3017 def _afterlock(self, callback):
3021 """add a callback to be run when the repository is fully unlocked
3018 """add a callback to be run when the repository is fully unlocked
3022
3019
3023 The callback will be executed when the outermost lock is released
3020 The callback will be executed when the outermost lock is released
3024 (with wlock being higher level than 'lock')."""
3021 (with wlock being higher level than 'lock')."""
3025 for ref in (self._wlockref, self._lockref):
3022 for ref in (self._wlockref, self._lockref):
3026 l = ref and ref()
3023 l = ref and ref()
3027 if l and l.held:
3024 if l and l.held:
3028 l.postrelease.append(callback)
3025 l.postrelease.append(callback)
3029 break
3026 break
3030 else: # no lock have been found.
3027 else: # no lock have been found.
3031 callback(True)
3028 callback(True)
3032
3029
3033 def lock(self, wait=True):
3030 def lock(self, wait=True):
3034 """Lock the repository store (.hg/store) and return a weak reference
3031 """Lock the repository store (.hg/store) and return a weak reference
3035 to the lock. Use this before modifying the store (e.g. committing or
3032 to the lock. Use this before modifying the store (e.g. committing or
3036 stripping). If you are opening a transaction, get a lock as well.)
3033 stripping). If you are opening a transaction, get a lock as well.)
3037
3034
3038 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3035 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3039 'wlock' first to avoid a dead-lock hazard."""
3036 'wlock' first to avoid a dead-lock hazard."""
3040 l = self._currentlock(self._lockref)
3037 l = self._currentlock(self._lockref)
3041 if l is not None:
3038 if l is not None:
3042 l.lock()
3039 l.lock()
3043 return l
3040 return l
3044
3041
3045 l = self._lock(
3042 l = self._lock(
3046 vfs=self.svfs,
3043 vfs=self.svfs,
3047 lockname=b"lock",
3044 lockname=b"lock",
3048 wait=wait,
3045 wait=wait,
3049 releasefn=None,
3046 releasefn=None,
3050 acquirefn=self.invalidate,
3047 acquirefn=self.invalidate,
3051 desc=_(b'repository %s') % self.origroot,
3048 desc=_(b'repository %s') % self.origroot,
3052 )
3049 )
3053 self._lockref = weakref.ref(l)
3050 self._lockref = weakref.ref(l)
3054 return l
3051 return l
3055
3052
3056 def wlock(self, wait=True):
3053 def wlock(self, wait=True):
3057 """Lock the non-store parts of the repository (everything under
3054 """Lock the non-store parts of the repository (everything under
3058 .hg except .hg/store) and return a weak reference to the lock.
3055 .hg except .hg/store) and return a weak reference to the lock.
3059
3056
3060 Use this before modifying files in .hg.
3057 Use this before modifying files in .hg.
3061
3058
3062 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3059 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3063 'wlock' first to avoid a dead-lock hazard."""
3060 'wlock' first to avoid a dead-lock hazard."""
3064 l = self._wlockref() if self._wlockref else None
3061 l = self._wlockref() if self._wlockref else None
3065 if l is not None and l.held:
3062 if l is not None and l.held:
3066 l.lock()
3063 l.lock()
3067 return l
3064 return l
3068
3065
3069 # We do not need to check for non-waiting lock acquisition. Such
3066 # We do not need to check for non-waiting lock acquisition. Such
3070 # acquisition would not cause dead-lock as they would just fail.
3067 # acquisition would not cause dead-lock as they would just fail.
3071 if wait and (
3068 if wait and (
3072 self.ui.configbool(b'devel', b'all-warnings')
3069 self.ui.configbool(b'devel', b'all-warnings')
3073 or self.ui.configbool(b'devel', b'check-locks')
3070 or self.ui.configbool(b'devel', b'check-locks')
3074 ):
3071 ):
3075 if self._currentlock(self._lockref) is not None:
3072 if self._currentlock(self._lockref) is not None:
3076 self.ui.develwarn(b'"wlock" acquired after "lock"')
3073 self.ui.develwarn(b'"wlock" acquired after "lock"')
3077
3074
3078 def unlock():
3075 def unlock():
3079 if self.dirstate.pendingparentchange():
3076 if self.dirstate.pendingparentchange():
3080 self.dirstate.invalidate()
3077 self.dirstate.invalidate()
3081 else:
3078 else:
3082 self.dirstate.write(None)
3079 self.dirstate.write(None)
3083
3080
3084 self._filecache[b'dirstate'].refresh()
3081 self._filecache[b'dirstate'].refresh()
3085
3082
3086 l = self._lock(
3083 l = self._lock(
3087 self.vfs,
3084 self.vfs,
3088 b"wlock",
3085 b"wlock",
3089 wait,
3086 wait,
3090 unlock,
3087 unlock,
3091 self.invalidatedirstate,
3088 self.invalidatedirstate,
3092 _(b'working directory of %s') % self.origroot,
3089 _(b'working directory of %s') % self.origroot,
3093 )
3090 )
3094 self._wlockref = weakref.ref(l)
3091 self._wlockref = weakref.ref(l)
3095 return l
3092 return l
3096
3093
3097 def _currentlock(self, lockref):
3094 def _currentlock(self, lockref):
3098 """Returns the lock if it's held, or None if it's not."""
3095 """Returns the lock if it's held, or None if it's not."""
3099 if lockref is None:
3096 if lockref is None:
3100 return None
3097 return None
3101 l = lockref()
3098 l = lockref()
3102 if l is None or not l.held:
3099 if l is None or not l.held:
3103 return None
3100 return None
3104 return l
3101 return l
3105
3102
3106 def currentwlock(self):
3103 def currentwlock(self):
3107 """Returns the wlock if it's held, or None if it's not."""
3104 """Returns the wlock if it's held, or None if it's not."""
3108 return self._currentlock(self._wlockref)
3105 return self._currentlock(self._wlockref)
3109
3106
3110 def checkcommitpatterns(self, wctx, match, status, fail):
3107 def checkcommitpatterns(self, wctx, match, status, fail):
3111 """check for commit arguments that aren't committable"""
3108 """check for commit arguments that aren't committable"""
3112 if match.isexact() or match.prefix():
3109 if match.isexact() or match.prefix():
3113 matched = set(status.modified + status.added + status.removed)
3110 matched = set(status.modified + status.added + status.removed)
3114
3111
3115 for f in match.files():
3112 for f in match.files():
3116 f = self.dirstate.normalize(f)
3113 f = self.dirstate.normalize(f)
3117 if f == b'.' or f in matched or f in wctx.substate:
3114 if f == b'.' or f in matched or f in wctx.substate:
3118 continue
3115 continue
3119 if f in status.deleted:
3116 if f in status.deleted:
3120 fail(f, _(b'file not found!'))
3117 fail(f, _(b'file not found!'))
3121 # Is it a directory that exists or used to exist?
3118 # Is it a directory that exists or used to exist?
3122 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3119 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3123 d = f + b'/'
3120 d = f + b'/'
3124 for mf in matched:
3121 for mf in matched:
3125 if mf.startswith(d):
3122 if mf.startswith(d):
3126 break
3123 break
3127 else:
3124 else:
3128 fail(f, _(b"no match under directory!"))
3125 fail(f, _(b"no match under directory!"))
3129 elif f not in self.dirstate:
3126 elif f not in self.dirstate:
3130 fail(f, _(b"file not tracked!"))
3127 fail(f, _(b"file not tracked!"))
3131
3128
3132 @unfilteredmethod
3129 @unfilteredmethod
3133 def commit(
3130 def commit(
3134 self,
3131 self,
3135 text=b"",
3132 text=b"",
3136 user=None,
3133 user=None,
3137 date=None,
3134 date=None,
3138 match=None,
3135 match=None,
3139 force=False,
3136 force=False,
3140 editor=None,
3137 editor=None,
3141 extra=None,
3138 extra=None,
3142 ):
3139 ):
3143 """Add a new revision to current repository.
3140 """Add a new revision to current repository.
3144
3141
3145 Revision information is gathered from the working directory,
3142 Revision information is gathered from the working directory,
3146 match can be used to filter the committed files. If editor is
3143 match can be used to filter the committed files. If editor is
3147 supplied, it is called to get a commit message.
3144 supplied, it is called to get a commit message.
3148 """
3145 """
3149 if extra is None:
3146 if extra is None:
3150 extra = {}
3147 extra = {}
3151
3148
3152 def fail(f, msg):
3149 def fail(f, msg):
3153 raise error.InputError(b'%s: %s' % (f, msg))
3150 raise error.InputError(b'%s: %s' % (f, msg))
3154
3151
3155 if not match:
3152 if not match:
3156 match = matchmod.always()
3153 match = matchmod.always()
3157
3154
3158 if not force:
3155 if not force:
3159 match.bad = fail
3156 match.bad = fail
3160
3157
3161 # lock() for recent changelog (see issue4368)
3158 # lock() for recent changelog (see issue4368)
3162 with self.wlock(), self.lock():
3159 with self.wlock(), self.lock():
3163 wctx = self[None]
3160 wctx = self[None]
3164 merge = len(wctx.parents()) > 1
3161 merge = len(wctx.parents()) > 1
3165
3162
3166 if not force and merge and not match.always():
3163 if not force and merge and not match.always():
3167 raise error.Abort(
3164 raise error.Abort(
3168 _(
3165 _(
3169 b'cannot partially commit a merge '
3166 b'cannot partially commit a merge '
3170 b'(do not specify files or patterns)'
3167 b'(do not specify files or patterns)'
3171 )
3168 )
3172 )
3169 )
3173
3170
3174 status = self.status(match=match, clean=force)
3171 status = self.status(match=match, clean=force)
3175 if force:
3172 if force:
3176 status.modified.extend(
3173 status.modified.extend(
3177 status.clean
3174 status.clean
3178 ) # mq may commit clean files
3175 ) # mq may commit clean files
3179
3176
3180 # check subrepos
3177 # check subrepos
3181 subs, commitsubs, newstate = subrepoutil.precommit(
3178 subs, commitsubs, newstate = subrepoutil.precommit(
3182 self.ui, wctx, status, match, force=force
3179 self.ui, wctx, status, match, force=force
3183 )
3180 )
3184
3181
3185 # make sure all explicit patterns are matched
3182 # make sure all explicit patterns are matched
3186 if not force:
3183 if not force:
3187 self.checkcommitpatterns(wctx, match, status, fail)
3184 self.checkcommitpatterns(wctx, match, status, fail)
3188
3185
3189 cctx = context.workingcommitctx(
3186 cctx = context.workingcommitctx(
3190 self, status, text, user, date, extra
3187 self, status, text, user, date, extra
3191 )
3188 )
3192
3189
3193 ms = mergestatemod.mergestate.read(self)
3190 ms = mergestatemod.mergestate.read(self)
3194 mergeutil.checkunresolved(ms)
3191 mergeutil.checkunresolved(ms)
3195
3192
3196 # internal config: ui.allowemptycommit
3193 # internal config: ui.allowemptycommit
3197 if cctx.isempty() and not self.ui.configbool(
3194 if cctx.isempty() and not self.ui.configbool(
3198 b'ui', b'allowemptycommit'
3195 b'ui', b'allowemptycommit'
3199 ):
3196 ):
3200 self.ui.debug(b'nothing to commit, clearing merge state\n')
3197 self.ui.debug(b'nothing to commit, clearing merge state\n')
3201 ms.reset()
3198 ms.reset()
3202 return None
3199 return None
3203
3200
3204 if merge and cctx.deleted():
3201 if merge and cctx.deleted():
3205 raise error.Abort(_(b"cannot commit merge with missing files"))
3202 raise error.Abort(_(b"cannot commit merge with missing files"))
3206
3203
3207 if editor:
3204 if editor:
3208 cctx._text = editor(self, cctx, subs)
3205 cctx._text = editor(self, cctx, subs)
3209 edited = text != cctx._text
3206 edited = text != cctx._text
3210
3207
3211 # Save commit message in case this transaction gets rolled back
3208 # Save commit message in case this transaction gets rolled back
3212 # (e.g. by a pretxncommit hook). Leave the content alone on
3209 # (e.g. by a pretxncommit hook). Leave the content alone on
3213 # the assumption that the user will use the same editor again.
3210 # the assumption that the user will use the same editor again.
3214 msg_path = self.savecommitmessage(cctx._text)
3211 msg_path = self.savecommitmessage(cctx._text)
3215
3212
3216 # commit subs and write new state
3213 # commit subs and write new state
3217 if subs:
3214 if subs:
3218 uipathfn = scmutil.getuipathfn(self)
3215 uipathfn = scmutil.getuipathfn(self)
3219 for s in sorted(commitsubs):
3216 for s in sorted(commitsubs):
3220 sub = wctx.sub(s)
3217 sub = wctx.sub(s)
3221 self.ui.status(
3218 self.ui.status(
3222 _(b'committing subrepository %s\n')
3219 _(b'committing subrepository %s\n')
3223 % uipathfn(subrepoutil.subrelpath(sub))
3220 % uipathfn(subrepoutil.subrelpath(sub))
3224 )
3221 )
3225 sr = sub.commit(cctx._text, user, date)
3222 sr = sub.commit(cctx._text, user, date)
3226 newstate[s] = (newstate[s][0], sr)
3223 newstate[s] = (newstate[s][0], sr)
3227 subrepoutil.writestate(self, newstate)
3224 subrepoutil.writestate(self, newstate)
3228
3225
3229 p1, p2 = self.dirstate.parents()
3226 p1, p2 = self.dirstate.parents()
3230 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3227 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3231 try:
3228 try:
3232 self.hook(
3229 self.hook(
3233 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3230 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3234 )
3231 )
3235 with self.transaction(b'commit'):
3232 with self.transaction(b'commit'):
3236 ret = self.commitctx(cctx, True)
3233 ret = self.commitctx(cctx, True)
3237 # update bookmarks, dirstate and mergestate
3234 # update bookmarks, dirstate and mergestate
3238 bookmarks.update(self, [p1, p2], ret)
3235 bookmarks.update(self, [p1, p2], ret)
3239 cctx.markcommitted(ret)
3236 cctx.markcommitted(ret)
3240 ms.reset()
3237 ms.reset()
3241 except: # re-raises
3238 except: # re-raises
3242 if edited:
3239 if edited:
3243 self.ui.write(
3240 self.ui.write(
3244 _(b'note: commit message saved in %s\n') % msg_path
3241 _(b'note: commit message saved in %s\n') % msg_path
3245 )
3242 )
3246 self.ui.write(
3243 self.ui.write(
3247 _(
3244 _(
3248 b"note: use 'hg commit --logfile "
3245 b"note: use 'hg commit --logfile "
3249 b"%s --edit' to reuse it\n"
3246 b"%s --edit' to reuse it\n"
3250 )
3247 )
3251 % msg_path
3248 % msg_path
3252 )
3249 )
3253 raise
3250 raise
3254
3251
3255 def commithook(unused_success):
3252 def commithook(unused_success):
3256 # hack for command that use a temporary commit (eg: histedit)
3253 # hack for command that use a temporary commit (eg: histedit)
3257 # temporary commit got stripped before hook release
3254 # temporary commit got stripped before hook release
3258 if self.changelog.hasnode(ret):
3255 if self.changelog.hasnode(ret):
3259 self.hook(
3256 self.hook(
3260 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3257 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3261 )
3258 )
3262
3259
3263 self._afterlock(commithook)
3260 self._afterlock(commithook)
3264 return ret
3261 return ret
3265
3262
3266 @unfilteredmethod
3263 @unfilteredmethod
3267 def commitctx(self, ctx, error=False, origctx=None):
3264 def commitctx(self, ctx, error=False, origctx=None):
3268 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3265 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3269
3266
3270 @unfilteredmethod
3267 @unfilteredmethod
3271 def destroying(self):
3268 def destroying(self):
3272 """Inform the repository that nodes are about to be destroyed.
3269 """Inform the repository that nodes are about to be destroyed.
3273 Intended for use by strip and rollback, so there's a common
3270 Intended for use by strip and rollback, so there's a common
3274 place for anything that has to be done before destroying history.
3271 place for anything that has to be done before destroying history.
3275
3272
3276 This is mostly useful for saving state that is in memory and waiting
3273 This is mostly useful for saving state that is in memory and waiting
3277 to be flushed when the current lock is released. Because a call to
3274 to be flushed when the current lock is released. Because a call to
3278 destroyed is imminent, the repo will be invalidated causing those
3275 destroyed is imminent, the repo will be invalidated causing those
3279 changes to stay in memory (waiting for the next unlock), or vanish
3276 changes to stay in memory (waiting for the next unlock), or vanish
3280 completely.
3277 completely.
3281 """
3278 """
3282 # When using the same lock to commit and strip, the phasecache is left
3279 # When using the same lock to commit and strip, the phasecache is left
3283 # dirty after committing. Then when we strip, the repo is invalidated,
3280 # dirty after committing. Then when we strip, the repo is invalidated,
3284 # causing those changes to disappear.
3281 # causing those changes to disappear.
3285 if '_phasecache' in vars(self):
3282 if '_phasecache' in vars(self):
3286 self._phasecache.write()
3283 self._phasecache.write()
3287
3284
3288 @unfilteredmethod
3285 @unfilteredmethod
3289 def destroyed(self):
3286 def destroyed(self):
3290 """Inform the repository that nodes have been destroyed.
3287 """Inform the repository that nodes have been destroyed.
3291 Intended for use by strip and rollback, so there's a common
3288 Intended for use by strip and rollback, so there's a common
3292 place for anything that has to be done after destroying history.
3289 place for anything that has to be done after destroying history.
3293 """
3290 """
3294 # When one tries to:
3291 # When one tries to:
3295 # 1) destroy nodes thus calling this method (e.g. strip)
3292 # 1) destroy nodes thus calling this method (e.g. strip)
3296 # 2) use phasecache somewhere (e.g. commit)
3293 # 2) use phasecache somewhere (e.g. commit)
3297 #
3294 #
3298 # then 2) will fail because the phasecache contains nodes that were
3295 # then 2) will fail because the phasecache contains nodes that were
3299 # removed. We can either remove phasecache from the filecache,
3296 # removed. We can either remove phasecache from the filecache,
3300 # causing it to reload next time it is accessed, or simply filter
3297 # causing it to reload next time it is accessed, or simply filter
3301 # the removed nodes now and write the updated cache.
3298 # the removed nodes now and write the updated cache.
3302 self._phasecache.filterunknown(self)
3299 self._phasecache.filterunknown(self)
3303 self._phasecache.write()
3300 self._phasecache.write()
3304
3301
3305 # refresh all repository caches
3302 # refresh all repository caches
3306 self.updatecaches()
3303 self.updatecaches()
3307
3304
3308 # Ensure the persistent tag cache is updated. Doing it now
3305 # Ensure the persistent tag cache is updated. Doing it now
3309 # means that the tag cache only has to worry about destroyed
3306 # means that the tag cache only has to worry about destroyed
3310 # heads immediately after a strip/rollback. That in turn
3307 # heads immediately after a strip/rollback. That in turn
3311 # guarantees that "cachetip == currenttip" (comparing both rev
3308 # guarantees that "cachetip == currenttip" (comparing both rev
3312 # and node) always means no nodes have been added or destroyed.
3309 # and node) always means no nodes have been added or destroyed.
3313
3310
3314 # XXX this is suboptimal when qrefresh'ing: we strip the current
3311 # XXX this is suboptimal when qrefresh'ing: we strip the current
3315 # head, refresh the tag cache, then immediately add a new head.
3312 # head, refresh the tag cache, then immediately add a new head.
3316 # But I think doing it this way is necessary for the "instant
3313 # But I think doing it this way is necessary for the "instant
3317 # tag cache retrieval" case to work.
3314 # tag cache retrieval" case to work.
3318 self.invalidate()
3315 self.invalidate()
3319
3316
3320 def status(
3317 def status(
3321 self,
3318 self,
3322 node1=b'.',
3319 node1=b'.',
3323 node2=None,
3320 node2=None,
3324 match=None,
3321 match=None,
3325 ignored=False,
3322 ignored=False,
3326 clean=False,
3323 clean=False,
3327 unknown=False,
3324 unknown=False,
3328 listsubrepos=False,
3325 listsubrepos=False,
3329 ):
3326 ):
3330 '''a convenience method that calls node1.status(node2)'''
3327 '''a convenience method that calls node1.status(node2)'''
3331 return self[node1].status(
3328 return self[node1].status(
3332 node2, match, ignored, clean, unknown, listsubrepos
3329 node2, match, ignored, clean, unknown, listsubrepos
3333 )
3330 )
3334
3331
3335 def addpostdsstatus(self, ps):
3332 def addpostdsstatus(self, ps):
3336 """Add a callback to run within the wlock, at the point at which status
3333 """Add a callback to run within the wlock, at the point at which status
3337 fixups happen.
3334 fixups happen.
3338
3335
3339 On status completion, callback(wctx, status) will be called with the
3336 On status completion, callback(wctx, status) will be called with the
3340 wlock held, unless the dirstate has changed from underneath or the wlock
3337 wlock held, unless the dirstate has changed from underneath or the wlock
3341 couldn't be grabbed.
3338 couldn't be grabbed.
3342
3339
3343 Callbacks should not capture and use a cached copy of the dirstate --
3340 Callbacks should not capture and use a cached copy of the dirstate --
3344 it might change in the meanwhile. Instead, they should access the
3341 it might change in the meanwhile. Instead, they should access the
3345 dirstate via wctx.repo().dirstate.
3342 dirstate via wctx.repo().dirstate.
3346
3343
3347 This list is emptied out after each status run -- extensions should
3344 This list is emptied out after each status run -- extensions should
3348 make sure it adds to this list each time dirstate.status is called.
3345 make sure it adds to this list each time dirstate.status is called.
3349 Extensions should also make sure they don't call this for statuses
3346 Extensions should also make sure they don't call this for statuses
3350 that don't involve the dirstate.
3347 that don't involve the dirstate.
3351 """
3348 """
3352
3349
3353 # The list is located here for uniqueness reasons -- it is actually
3350 # The list is located here for uniqueness reasons -- it is actually
3354 # managed by the workingctx, but that isn't unique per-repo.
3351 # managed by the workingctx, but that isn't unique per-repo.
3355 self._postdsstatus.append(ps)
3352 self._postdsstatus.append(ps)
3356
3353
3357 def postdsstatus(self):
3354 def postdsstatus(self):
3358 """Used by workingctx to get the list of post-dirstate-status hooks."""
3355 """Used by workingctx to get the list of post-dirstate-status hooks."""
3359 return self._postdsstatus
3356 return self._postdsstatus
3360
3357
3361 def clearpostdsstatus(self):
3358 def clearpostdsstatus(self):
3362 """Used by workingctx to clear post-dirstate-status hooks."""
3359 """Used by workingctx to clear post-dirstate-status hooks."""
3363 del self._postdsstatus[:]
3360 del self._postdsstatus[:]
3364
3361
3365 def heads(self, start=None):
3362 def heads(self, start=None):
3366 if start is None:
3363 if start is None:
3367 cl = self.changelog
3364 cl = self.changelog
3368 headrevs = reversed(cl.headrevs())
3365 headrevs = reversed(cl.headrevs())
3369 return [cl.node(rev) for rev in headrevs]
3366 return [cl.node(rev) for rev in headrevs]
3370
3367
3371 heads = self.changelog.heads(start)
3368 heads = self.changelog.heads(start)
3372 # sort the output in rev descending order
3369 # sort the output in rev descending order
3373 return sorted(heads, key=self.changelog.rev, reverse=True)
3370 return sorted(heads, key=self.changelog.rev, reverse=True)
3374
3371
3375 def branchheads(self, branch=None, start=None, closed=False):
3372 def branchheads(self, branch=None, start=None, closed=False):
3376 """return a (possibly filtered) list of heads for the given branch
3373 """return a (possibly filtered) list of heads for the given branch
3377
3374
3378 Heads are returned in topological order, from newest to oldest.
3375 Heads are returned in topological order, from newest to oldest.
3379 If branch is None, use the dirstate branch.
3376 If branch is None, use the dirstate branch.
3380 If start is not None, return only heads reachable from start.
3377 If start is not None, return only heads reachable from start.
3381 If closed is True, return heads that are marked as closed as well.
3378 If closed is True, return heads that are marked as closed as well.
3382 """
3379 """
3383 if branch is None:
3380 if branch is None:
3384 branch = self[None].branch()
3381 branch = self[None].branch()
3385 branches = self.branchmap()
3382 branches = self.branchmap()
3386 if not branches.hasbranch(branch):
3383 if not branches.hasbranch(branch):
3387 return []
3384 return []
3388 # the cache returns heads ordered lowest to highest
3385 # the cache returns heads ordered lowest to highest
3389 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3386 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3390 if start is not None:
3387 if start is not None:
3391 # filter out the heads that cannot be reached from startrev
3388 # filter out the heads that cannot be reached from startrev
3392 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3389 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3393 bheads = [h for h in bheads if h in fbheads]
3390 bheads = [h for h in bheads if h in fbheads]
3394 return bheads
3391 return bheads
3395
3392
3396 def branches(self, nodes):
3393 def branches(self, nodes):
3397 if not nodes:
3394 if not nodes:
3398 nodes = [self.changelog.tip()]
3395 nodes = [self.changelog.tip()]
3399 b = []
3396 b = []
3400 for n in nodes:
3397 for n in nodes:
3401 t = n
3398 t = n
3402 while True:
3399 while True:
3403 p = self.changelog.parents(n)
3400 p = self.changelog.parents(n)
3404 if p[1] != self.nullid or p[0] == self.nullid:
3401 if p[1] != self.nullid or p[0] == self.nullid:
3405 b.append((t, n, p[0], p[1]))
3402 b.append((t, n, p[0], p[1]))
3406 break
3403 break
3407 n = p[0]
3404 n = p[0]
3408 return b
3405 return b
3409
3406
3410 def between(self, pairs):
3407 def between(self, pairs):
3411 r = []
3408 r = []
3412
3409
3413 for top, bottom in pairs:
3410 for top, bottom in pairs:
3414 n, l, i = top, [], 0
3411 n, l, i = top, [], 0
3415 f = 1
3412 f = 1
3416
3413
3417 while n != bottom and n != self.nullid:
3414 while n != bottom and n != self.nullid:
3418 p = self.changelog.parents(n)[0]
3415 p = self.changelog.parents(n)[0]
3419 if i == f:
3416 if i == f:
3420 l.append(n)
3417 l.append(n)
3421 f = f * 2
3418 f = f * 2
3422 n = p
3419 n = p
3423 i += 1
3420 i += 1
3424
3421
3425 r.append(l)
3422 r.append(l)
3426
3423
3427 return r
3424 return r
3428
3425
3429 def checkpush(self, pushop):
3426 def checkpush(self, pushop):
3430 """Extensions can override this function if additional checks have
3427 """Extensions can override this function if additional checks have
3431 to be performed before pushing, or call it if they override push
3428 to be performed before pushing, or call it if they override push
3432 command.
3429 command.
3433 """
3430 """
3434
3431
3435 @unfilteredpropertycache
3432 @unfilteredpropertycache
3436 def prepushoutgoinghooks(self):
3433 def prepushoutgoinghooks(self):
3437 """Return util.hooks consists of a pushop with repo, remote, outgoing
3434 """Return util.hooks consists of a pushop with repo, remote, outgoing
3438 methods, which are called before pushing changesets.
3435 methods, which are called before pushing changesets.
3439 """
3436 """
3440 return util.hooks()
3437 return util.hooks()
3441
3438
3442 def pushkey(self, namespace, key, old, new):
3439 def pushkey(self, namespace, key, old, new):
3443 try:
3440 try:
3444 tr = self.currenttransaction()
3441 tr = self.currenttransaction()
3445 hookargs = {}
3442 hookargs = {}
3446 if tr is not None:
3443 if tr is not None:
3447 hookargs.update(tr.hookargs)
3444 hookargs.update(tr.hookargs)
3448 hookargs = pycompat.strkwargs(hookargs)
3445 hookargs = pycompat.strkwargs(hookargs)
3449 hookargs['namespace'] = namespace
3446 hookargs['namespace'] = namespace
3450 hookargs['key'] = key
3447 hookargs['key'] = key
3451 hookargs['old'] = old
3448 hookargs['old'] = old
3452 hookargs['new'] = new
3449 hookargs['new'] = new
3453 self.hook(b'prepushkey', throw=True, **hookargs)
3450 self.hook(b'prepushkey', throw=True, **hookargs)
3454 except error.HookAbort as exc:
3451 except error.HookAbort as exc:
3455 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3452 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3456 if exc.hint:
3453 if exc.hint:
3457 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3454 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3458 return False
3455 return False
3459 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3456 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3460 ret = pushkey.push(self, namespace, key, old, new)
3457 ret = pushkey.push(self, namespace, key, old, new)
3461
3458
3462 def runhook(unused_success):
3459 def runhook(unused_success):
3463 self.hook(
3460 self.hook(
3464 b'pushkey',
3461 b'pushkey',
3465 namespace=namespace,
3462 namespace=namespace,
3466 key=key,
3463 key=key,
3467 old=old,
3464 old=old,
3468 new=new,
3465 new=new,
3469 ret=ret,
3466 ret=ret,
3470 )
3467 )
3471
3468
3472 self._afterlock(runhook)
3469 self._afterlock(runhook)
3473 return ret
3470 return ret
3474
3471
3475 def listkeys(self, namespace):
3472 def listkeys(self, namespace):
3476 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3473 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3477 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3474 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3478 values = pushkey.list(self, namespace)
3475 values = pushkey.list(self, namespace)
3479 self.hook(b'listkeys', namespace=namespace, values=values)
3476 self.hook(b'listkeys', namespace=namespace, values=values)
3480 return values
3477 return values
3481
3478
3482 def debugwireargs(self, one, two, three=None, four=None, five=None):
3479 def debugwireargs(self, one, two, three=None, four=None, five=None):
3483 '''used to test argument passing over the wire'''
3480 '''used to test argument passing over the wire'''
3484 return b"%s %s %s %s %s" % (
3481 return b"%s %s %s %s %s" % (
3485 one,
3482 one,
3486 two,
3483 two,
3487 pycompat.bytestr(three),
3484 pycompat.bytestr(three),
3488 pycompat.bytestr(four),
3485 pycompat.bytestr(four),
3489 pycompat.bytestr(five),
3486 pycompat.bytestr(five),
3490 )
3487 )
3491
3488
3492 def savecommitmessage(self, text):
3489 def savecommitmessage(self, text):
3493 fp = self.vfs(b'last-message.txt', b'wb')
3490 fp = self.vfs(b'last-message.txt', b'wb')
3494 try:
3491 try:
3495 fp.write(text)
3492 fp.write(text)
3496 finally:
3493 finally:
3497 fp.close()
3494 fp.close()
3498 return self.pathto(fp.name[len(self.root) + 1 :])
3495 return self.pathto(fp.name[len(self.root) + 1 :])
3499
3496
3500 def register_wanted_sidedata(self, category):
3497 def register_wanted_sidedata(self, category):
3501 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3498 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3502 # Only revlogv2 repos can want sidedata.
3499 # Only revlogv2 repos can want sidedata.
3503 return
3500 return
3504 self._wanted_sidedata.add(pycompat.bytestr(category))
3501 self._wanted_sidedata.add(pycompat.bytestr(category))
3505
3502
3506 def register_sidedata_computer(
3503 def register_sidedata_computer(
3507 self, kind, category, keys, computer, flags, replace=False
3504 self, kind, category, keys, computer, flags, replace=False
3508 ):
3505 ):
3509 if kind not in revlogconst.ALL_KINDS:
3506 if kind not in revlogconst.ALL_KINDS:
3510 msg = _(b"unexpected revlog kind '%s'.")
3507 msg = _(b"unexpected revlog kind '%s'.")
3511 raise error.ProgrammingError(msg % kind)
3508 raise error.ProgrammingError(msg % kind)
3512 category = pycompat.bytestr(category)
3509 category = pycompat.bytestr(category)
3513 already_registered = category in self._sidedata_computers.get(kind, [])
3510 already_registered = category in self._sidedata_computers.get(kind, [])
3514 if already_registered and not replace:
3511 if already_registered and not replace:
3515 msg = _(
3512 msg = _(
3516 b"cannot register a sidedata computer twice for category '%s'."
3513 b"cannot register a sidedata computer twice for category '%s'."
3517 )
3514 )
3518 raise error.ProgrammingError(msg % category)
3515 raise error.ProgrammingError(msg % category)
3519 if replace and not already_registered:
3516 if replace and not already_registered:
3520 msg = _(
3517 msg = _(
3521 b"cannot replace a sidedata computer that isn't registered "
3518 b"cannot replace a sidedata computer that isn't registered "
3522 b"for category '%s'."
3519 b"for category '%s'."
3523 )
3520 )
3524 raise error.ProgrammingError(msg % category)
3521 raise error.ProgrammingError(msg % category)
3525 self._sidedata_computers.setdefault(kind, {})
3522 self._sidedata_computers.setdefault(kind, {})
3526 self._sidedata_computers[kind][category] = (keys, computer, flags)
3523 self._sidedata_computers[kind][category] = (keys, computer, flags)
3527
3524
3528
3525
3529 # used to avoid circular references so destructors work
3526 # used to avoid circular references so destructors work
3530 def aftertrans(files):
3527 def aftertrans(files):
3531 renamefiles = [tuple(t) for t in files]
3528 renamefiles = [tuple(t) for t in files]
3532
3529
3533 def a():
3530 def a():
3534 for vfs, src, dest in renamefiles:
3531 for vfs, src, dest in renamefiles:
3535 # if src and dest refer to a same file, vfs.rename is a no-op,
3532 # if src and dest refer to a same file, vfs.rename is a no-op,
3536 # leaving both src and dest on disk. delete dest to make sure
3533 # leaving both src and dest on disk. delete dest to make sure
3537 # the rename couldn't be such a no-op.
3534 # the rename couldn't be such a no-op.
3538 vfs.tryunlink(dest)
3535 vfs.tryunlink(dest)
3539 try:
3536 try:
3540 vfs.rename(src, dest)
3537 vfs.rename(src, dest)
3541 except FileNotFoundError: # journal file does not yet exist
3538 except FileNotFoundError: # journal file does not yet exist
3542 pass
3539 pass
3543
3540
3544 return a
3541 return a
3545
3542
3546
3543
3547 def undoname(fn: bytes) -> bytes:
3544 def undoname(fn: bytes) -> bytes:
3548 base, name = os.path.split(fn)
3545 base, name = os.path.split(fn)
3549 assert name.startswith(b'journal')
3546 assert name.startswith(b'journal')
3550 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3547 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3551
3548
3552
3549
3553 def instance(ui, path: bytes, create, intents=None, createopts=None):
3550 def instance(ui, path: bytes, create, intents=None, createopts=None):
3554
3551
3555 # prevent cyclic import localrepo -> upgrade -> localrepo
3552 # prevent cyclic import localrepo -> upgrade -> localrepo
3556 from . import upgrade
3553 from . import upgrade
3557
3554
3558 localpath = urlutil.urllocalpath(path)
3555 localpath = urlutil.urllocalpath(path)
3559 if create:
3556 if create:
3560 createrepository(ui, localpath, createopts=createopts)
3557 createrepository(ui, localpath, createopts=createopts)
3561
3558
3562 def repo_maker():
3559 def repo_maker():
3563 return makelocalrepository(ui, localpath, intents=intents)
3560 return makelocalrepository(ui, localpath, intents=intents)
3564
3561
3565 repo = repo_maker()
3562 repo = repo_maker()
3566 repo = upgrade.may_auto_upgrade(repo, repo_maker)
3563 repo = upgrade.may_auto_upgrade(repo, repo_maker)
3567 return repo
3564 return repo
3568
3565
3569
3566
3570 def islocal(path: bytes) -> bool:
3567 def islocal(path: bytes) -> bool:
3571 return True
3568 return True
3572
3569
3573
3570
3574 def defaultcreateopts(ui, createopts=None):
3571 def defaultcreateopts(ui, createopts=None):
3575 """Populate the default creation options for a repository.
3572 """Populate the default creation options for a repository.
3576
3573
3577 A dictionary of explicitly requested creation options can be passed
3574 A dictionary of explicitly requested creation options can be passed
3578 in. Missing keys will be populated.
3575 in. Missing keys will be populated.
3579 """
3576 """
3580 createopts = dict(createopts or {})
3577 createopts = dict(createopts or {})
3581
3578
3582 if b'backend' not in createopts:
3579 if b'backend' not in createopts:
3583 # experimental config: storage.new-repo-backend
3580 # experimental config: storage.new-repo-backend
3584 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3581 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3585
3582
3586 return createopts
3583 return createopts
3587
3584
3588
3585
3589 def clone_requirements(ui, createopts, srcrepo):
3586 def clone_requirements(ui, createopts, srcrepo):
3590 """clone the requirements of a local repo for a local clone
3587 """clone the requirements of a local repo for a local clone
3591
3588
3592 The store requirements are unchanged while the working copy requirements
3589 The store requirements are unchanged while the working copy requirements
3593 depends on the configuration
3590 depends on the configuration
3594 """
3591 """
3595 target_requirements = set()
3592 target_requirements = set()
3596 if not srcrepo.requirements:
3593 if not srcrepo.requirements:
3597 # this is a legacy revlog "v0" repository, we cannot do anything fancy
3594 # this is a legacy revlog "v0" repository, we cannot do anything fancy
3598 # with it.
3595 # with it.
3599 return target_requirements
3596 return target_requirements
3600 createopts = defaultcreateopts(ui, createopts=createopts)
3597 createopts = defaultcreateopts(ui, createopts=createopts)
3601 for r in newreporequirements(ui, createopts):
3598 for r in newreporequirements(ui, createopts):
3602 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3599 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3603 target_requirements.add(r)
3600 target_requirements.add(r)
3604
3601
3605 for r in srcrepo.requirements:
3602 for r in srcrepo.requirements:
3606 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3603 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3607 target_requirements.add(r)
3604 target_requirements.add(r)
3608 return target_requirements
3605 return target_requirements
3609
3606
3610
3607
3611 def newreporequirements(ui, createopts):
3608 def newreporequirements(ui, createopts):
3612 """Determine the set of requirements for a new local repository.
3609 """Determine the set of requirements for a new local repository.
3613
3610
3614 Extensions can wrap this function to specify custom requirements for
3611 Extensions can wrap this function to specify custom requirements for
3615 new repositories.
3612 new repositories.
3616 """
3613 """
3617
3614
3618 if b'backend' not in createopts:
3615 if b'backend' not in createopts:
3619 raise error.ProgrammingError(
3616 raise error.ProgrammingError(
3620 b'backend key not present in createopts; '
3617 b'backend key not present in createopts; '
3621 b'was defaultcreateopts() called?'
3618 b'was defaultcreateopts() called?'
3622 )
3619 )
3623
3620
3624 if createopts[b'backend'] != b'revlogv1':
3621 if createopts[b'backend'] != b'revlogv1':
3625 raise error.Abort(
3622 raise error.Abort(
3626 _(
3623 _(
3627 b'unable to determine repository requirements for '
3624 b'unable to determine repository requirements for '
3628 b'storage backend: %s'
3625 b'storage backend: %s'
3629 )
3626 )
3630 % createopts[b'backend']
3627 % createopts[b'backend']
3631 )
3628 )
3632
3629
3633 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3630 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3634 if ui.configbool(b'format', b'usestore'):
3631 if ui.configbool(b'format', b'usestore'):
3635 requirements.add(requirementsmod.STORE_REQUIREMENT)
3632 requirements.add(requirementsmod.STORE_REQUIREMENT)
3636 if ui.configbool(b'format', b'usefncache'):
3633 if ui.configbool(b'format', b'usefncache'):
3637 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3634 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3638 if ui.configbool(b'format', b'dotencode'):
3635 if ui.configbool(b'format', b'dotencode'):
3639 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3636 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3640
3637
3641 compengines = ui.configlist(b'format', b'revlog-compression')
3638 compengines = ui.configlist(b'format', b'revlog-compression')
3642 for compengine in compengines:
3639 for compengine in compengines:
3643 if compengine in util.compengines:
3640 if compengine in util.compengines:
3644 engine = util.compengines[compengine]
3641 engine = util.compengines[compengine]
3645 if engine.available() and engine.revlogheader():
3642 if engine.available() and engine.revlogheader():
3646 break
3643 break
3647 else:
3644 else:
3648 raise error.Abort(
3645 raise error.Abort(
3649 _(
3646 _(
3650 b'compression engines %s defined by '
3647 b'compression engines %s defined by '
3651 b'format.revlog-compression not available'
3648 b'format.revlog-compression not available'
3652 )
3649 )
3653 % b', '.join(b'"%s"' % e for e in compengines),
3650 % b', '.join(b'"%s"' % e for e in compengines),
3654 hint=_(
3651 hint=_(
3655 b'run "hg debuginstall" to list available '
3652 b'run "hg debuginstall" to list available '
3656 b'compression engines'
3653 b'compression engines'
3657 ),
3654 ),
3658 )
3655 )
3659
3656
3660 # zlib is the historical default and doesn't need an explicit requirement.
3657 # zlib is the historical default and doesn't need an explicit requirement.
3661 if compengine == b'zstd':
3658 if compengine == b'zstd':
3662 requirements.add(b'revlog-compression-zstd')
3659 requirements.add(b'revlog-compression-zstd')
3663 elif compengine != b'zlib':
3660 elif compengine != b'zlib':
3664 requirements.add(b'exp-compression-%s' % compengine)
3661 requirements.add(b'exp-compression-%s' % compengine)
3665
3662
3666 if scmutil.gdinitconfig(ui):
3663 if scmutil.gdinitconfig(ui):
3667 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3664 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3668 if ui.configbool(b'format', b'sparse-revlog'):
3665 if ui.configbool(b'format', b'sparse-revlog'):
3669 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3666 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3670
3667
3671 # experimental config: format.use-dirstate-v2
3668 # experimental config: format.use-dirstate-v2
3672 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3669 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3673 if ui.configbool(b'format', b'use-dirstate-v2'):
3670 if ui.configbool(b'format', b'use-dirstate-v2'):
3674 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3671 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3675
3672
3676 # experimental config: format.exp-use-copies-side-data-changeset
3673 # experimental config: format.exp-use-copies-side-data-changeset
3677 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3674 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3678 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3675 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3679 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3676 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3680 if ui.configbool(b'experimental', b'treemanifest'):
3677 if ui.configbool(b'experimental', b'treemanifest'):
3681 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3678 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3682
3679
3683 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3680 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3684 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3681 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3685 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3682 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3686
3683
3687 revlogv2 = ui.config(b'experimental', b'revlogv2')
3684 revlogv2 = ui.config(b'experimental', b'revlogv2')
3688 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3685 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3689 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3686 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3690 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3687 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3691 # experimental config: format.internal-phase
3688 # experimental config: format.internal-phase
3692 if ui.configbool(b'format', b'use-internal-phase'):
3689 if ui.configbool(b'format', b'use-internal-phase'):
3693 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3690 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3694
3691
3695 # experimental config: format.exp-archived-phase
3692 # experimental config: format.exp-archived-phase
3696 if ui.configbool(b'format', b'exp-archived-phase'):
3693 if ui.configbool(b'format', b'exp-archived-phase'):
3697 requirements.add(requirementsmod.ARCHIVED_PHASE_REQUIREMENT)
3694 requirements.add(requirementsmod.ARCHIVED_PHASE_REQUIREMENT)
3698
3695
3699 if createopts.get(b'narrowfiles'):
3696 if createopts.get(b'narrowfiles'):
3700 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3697 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3701
3698
3702 if createopts.get(b'lfs'):
3699 if createopts.get(b'lfs'):
3703 requirements.add(b'lfs')
3700 requirements.add(b'lfs')
3704
3701
3705 if ui.configbool(b'format', b'bookmarks-in-store'):
3702 if ui.configbool(b'format', b'bookmarks-in-store'):
3706 requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3703 requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3707
3704
3708 if ui.configbool(b'format', b'use-persistent-nodemap'):
3705 if ui.configbool(b'format', b'use-persistent-nodemap'):
3709 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3706 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3710
3707
3711 # if share-safe is enabled, let's create the new repository with the new
3708 # if share-safe is enabled, let's create the new repository with the new
3712 # requirement
3709 # requirement
3713 if ui.configbool(b'format', b'use-share-safe'):
3710 if ui.configbool(b'format', b'use-share-safe'):
3714 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3711 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3715
3712
3716 # if we are creating a share-repoΒΉ we have to handle requirement
3713 # if we are creating a share-repoΒΉ we have to handle requirement
3717 # differently.
3714 # differently.
3718 #
3715 #
3719 # [1] (i.e. reusing the store from another repository, just having a
3716 # [1] (i.e. reusing the store from another repository, just having a
3720 # working copy)
3717 # working copy)
3721 if b'sharedrepo' in createopts:
3718 if b'sharedrepo' in createopts:
3722 source_requirements = set(createopts[b'sharedrepo'].requirements)
3719 source_requirements = set(createopts[b'sharedrepo'].requirements)
3723
3720
3724 if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements:
3721 if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements:
3725 # share to an old school repository, we have to copy the
3722 # share to an old school repository, we have to copy the
3726 # requirements and hope for the best.
3723 # requirements and hope for the best.
3727 requirements = source_requirements
3724 requirements = source_requirements
3728 else:
3725 else:
3729 # We have control on the working copy only, so "copy" the non
3726 # We have control on the working copy only, so "copy" the non
3730 # working copy part over, ignoring previous logic.
3727 # working copy part over, ignoring previous logic.
3731 to_drop = set()
3728 to_drop = set()
3732 for req in requirements:
3729 for req in requirements:
3733 if req in requirementsmod.WORKING_DIR_REQUIREMENTS:
3730 if req in requirementsmod.WORKING_DIR_REQUIREMENTS:
3734 continue
3731 continue
3735 if req in source_requirements:
3732 if req in source_requirements:
3736 continue
3733 continue
3737 to_drop.add(req)
3734 to_drop.add(req)
3738 requirements -= to_drop
3735 requirements -= to_drop
3739 requirements |= source_requirements
3736 requirements |= source_requirements
3740
3737
3741 if createopts.get(b'sharedrelative'):
3738 if createopts.get(b'sharedrelative'):
3742 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3739 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3743 else:
3740 else:
3744 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3741 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3745
3742
3746 if ui.configbool(b'format', b'use-dirstate-tracked-hint'):
3743 if ui.configbool(b'format', b'use-dirstate-tracked-hint'):
3747 version = ui.configint(b'format', b'use-dirstate-tracked-hint.version')
3744 version = ui.configint(b'format', b'use-dirstate-tracked-hint.version')
3748 msg = _(b"ignoring unknown tracked key version: %d\n")
3745 msg = _(b"ignoring unknown tracked key version: %d\n")
3749 hint = _(
3746 hint = _(
3750 b"see `hg help config.format.use-dirstate-tracked-hint-version"
3747 b"see `hg help config.format.use-dirstate-tracked-hint-version"
3751 )
3748 )
3752 if version != 1:
3749 if version != 1:
3753 ui.warn(msg % version, hint=hint)
3750 ui.warn(msg % version, hint=hint)
3754 else:
3751 else:
3755 requirements.add(requirementsmod.DIRSTATE_TRACKED_HINT_V1)
3752 requirements.add(requirementsmod.DIRSTATE_TRACKED_HINT_V1)
3756
3753
3757 return requirements
3754 return requirements
3758
3755
3759
3756
3760 def checkrequirementscompat(ui, requirements):
3757 def checkrequirementscompat(ui, requirements):
3761 """Checks compatibility of repository requirements enabled and disabled.
3758 """Checks compatibility of repository requirements enabled and disabled.
3762
3759
3763 Returns a set of requirements which needs to be dropped because dependend
3760 Returns a set of requirements which needs to be dropped because dependend
3764 requirements are not enabled. Also warns users about it"""
3761 requirements are not enabled. Also warns users about it"""
3765
3762
3766 dropped = set()
3763 dropped = set()
3767
3764
3768 if requirementsmod.STORE_REQUIREMENT not in requirements:
3765 if requirementsmod.STORE_REQUIREMENT not in requirements:
3769 if requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3766 if requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3770 ui.warn(
3767 ui.warn(
3771 _(
3768 _(
3772 b'ignoring enabled \'format.bookmarks-in-store\' config '
3769 b'ignoring enabled \'format.bookmarks-in-store\' config '
3773 b'beacuse it is incompatible with disabled '
3770 b'beacuse it is incompatible with disabled '
3774 b'\'format.usestore\' config\n'
3771 b'\'format.usestore\' config\n'
3775 )
3772 )
3776 )
3773 )
3777 dropped.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3774 dropped.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3778
3775
3779 if (
3776 if (
3780 requirementsmod.SHARED_REQUIREMENT in requirements
3777 requirementsmod.SHARED_REQUIREMENT in requirements
3781 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3778 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3782 ):
3779 ):
3783 raise error.Abort(
3780 raise error.Abort(
3784 _(
3781 _(
3785 b"cannot create shared repository as source was created"
3782 b"cannot create shared repository as source was created"
3786 b" with 'format.usestore' config disabled"
3783 b" with 'format.usestore' config disabled"
3787 )
3784 )
3788 )
3785 )
3789
3786
3790 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3787 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3791 if ui.hasconfig(b'format', b'use-share-safe'):
3788 if ui.hasconfig(b'format', b'use-share-safe'):
3792 msg = _(
3789 msg = _(
3793 b"ignoring enabled 'format.use-share-safe' config because "
3790 b"ignoring enabled 'format.use-share-safe' config because "
3794 b"it is incompatible with disabled 'format.usestore'"
3791 b"it is incompatible with disabled 'format.usestore'"
3795 b" config\n"
3792 b" config\n"
3796 )
3793 )
3797 ui.warn(msg)
3794 ui.warn(msg)
3798 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3795 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3799
3796
3800 return dropped
3797 return dropped
3801
3798
3802
3799
3803 def filterknowncreateopts(ui, createopts):
3800 def filterknowncreateopts(ui, createopts):
3804 """Filters a dict of repo creation options against options that are known.
3801 """Filters a dict of repo creation options against options that are known.
3805
3802
3806 Receives a dict of repo creation options and returns a dict of those
3803 Receives a dict of repo creation options and returns a dict of those
3807 options that we don't know how to handle.
3804 options that we don't know how to handle.
3808
3805
3809 This function is called as part of repository creation. If the
3806 This function is called as part of repository creation. If the
3810 returned dict contains any items, repository creation will not
3807 returned dict contains any items, repository creation will not
3811 be allowed, as it means there was a request to create a repository
3808 be allowed, as it means there was a request to create a repository
3812 with options not recognized by loaded code.
3809 with options not recognized by loaded code.
3813
3810
3814 Extensions can wrap this function to filter out creation options
3811 Extensions can wrap this function to filter out creation options
3815 they know how to handle.
3812 they know how to handle.
3816 """
3813 """
3817 known = {
3814 known = {
3818 b'backend',
3815 b'backend',
3819 b'lfs',
3816 b'lfs',
3820 b'narrowfiles',
3817 b'narrowfiles',
3821 b'sharedrepo',
3818 b'sharedrepo',
3822 b'sharedrelative',
3819 b'sharedrelative',
3823 b'shareditems',
3820 b'shareditems',
3824 b'shallowfilestore',
3821 b'shallowfilestore',
3825 }
3822 }
3826
3823
3827 return {k: v for k, v in createopts.items() if k not in known}
3824 return {k: v for k, v in createopts.items() if k not in known}
3828
3825
3829
3826
3830 def createrepository(ui, path: bytes, createopts=None, requirements=None):
3827 def createrepository(ui, path: bytes, createopts=None, requirements=None):
3831 """Create a new repository in a vfs.
3828 """Create a new repository in a vfs.
3832
3829
3833 ``path`` path to the new repo's working directory.
3830 ``path`` path to the new repo's working directory.
3834 ``createopts`` options for the new repository.
3831 ``createopts`` options for the new repository.
3835 ``requirement`` predefined set of requirements.
3832 ``requirement`` predefined set of requirements.
3836 (incompatible with ``createopts``)
3833 (incompatible with ``createopts``)
3837
3834
3838 The following keys for ``createopts`` are recognized:
3835 The following keys for ``createopts`` are recognized:
3839
3836
3840 backend
3837 backend
3841 The storage backend to use.
3838 The storage backend to use.
3842 lfs
3839 lfs
3843 Repository will be created with ``lfs`` requirement. The lfs extension
3840 Repository will be created with ``lfs`` requirement. The lfs extension
3844 will automatically be loaded when the repository is accessed.
3841 will automatically be loaded when the repository is accessed.
3845 narrowfiles
3842 narrowfiles
3846 Set up repository to support narrow file storage.
3843 Set up repository to support narrow file storage.
3847 sharedrepo
3844 sharedrepo
3848 Repository object from which storage should be shared.
3845 Repository object from which storage should be shared.
3849 sharedrelative
3846 sharedrelative
3850 Boolean indicating if the path to the shared repo should be
3847 Boolean indicating if the path to the shared repo should be
3851 stored as relative. By default, the pointer to the "parent" repo
3848 stored as relative. By default, the pointer to the "parent" repo
3852 is stored as an absolute path.
3849 is stored as an absolute path.
3853 shareditems
3850 shareditems
3854 Set of items to share to the new repository (in addition to storage).
3851 Set of items to share to the new repository (in addition to storage).
3855 shallowfilestore
3852 shallowfilestore
3856 Indicates that storage for files should be shallow (not all ancestor
3853 Indicates that storage for files should be shallow (not all ancestor
3857 revisions are known).
3854 revisions are known).
3858 """
3855 """
3859
3856
3860 if requirements is not None:
3857 if requirements is not None:
3861 if createopts is not None:
3858 if createopts is not None:
3862 msg = b'cannot specify both createopts and requirements'
3859 msg = b'cannot specify both createopts and requirements'
3863 raise error.ProgrammingError(msg)
3860 raise error.ProgrammingError(msg)
3864 createopts = {}
3861 createopts = {}
3865 else:
3862 else:
3866 createopts = defaultcreateopts(ui, createopts=createopts)
3863 createopts = defaultcreateopts(ui, createopts=createopts)
3867
3864
3868 unknownopts = filterknowncreateopts(ui, createopts)
3865 unknownopts = filterknowncreateopts(ui, createopts)
3869
3866
3870 if not isinstance(unknownopts, dict):
3867 if not isinstance(unknownopts, dict):
3871 raise error.ProgrammingError(
3868 raise error.ProgrammingError(
3872 b'filterknowncreateopts() did not return a dict'
3869 b'filterknowncreateopts() did not return a dict'
3873 )
3870 )
3874
3871
3875 if unknownopts:
3872 if unknownopts:
3876 raise error.Abort(
3873 raise error.Abort(
3877 _(
3874 _(
3878 b'unable to create repository because of unknown '
3875 b'unable to create repository because of unknown '
3879 b'creation option: %s'
3876 b'creation option: %s'
3880 )
3877 )
3881 % b', '.join(sorted(unknownopts)),
3878 % b', '.join(sorted(unknownopts)),
3882 hint=_(b'is a required extension not loaded?'),
3879 hint=_(b'is a required extension not loaded?'),
3883 )
3880 )
3884
3881
3885 requirements = newreporequirements(ui, createopts=createopts)
3882 requirements = newreporequirements(ui, createopts=createopts)
3886 requirements -= checkrequirementscompat(ui, requirements)
3883 requirements -= checkrequirementscompat(ui, requirements)
3887
3884
3888 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3885 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3889
3886
3890 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3887 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3891 if hgvfs.exists():
3888 if hgvfs.exists():
3892 raise error.RepoError(_(b'repository %s already exists') % path)
3889 raise error.RepoError(_(b'repository %s already exists') % path)
3893
3890
3894 if b'sharedrepo' in createopts:
3891 if b'sharedrepo' in createopts:
3895 sharedpath = createopts[b'sharedrepo'].sharedpath
3892 sharedpath = createopts[b'sharedrepo'].sharedpath
3896
3893
3897 if createopts.get(b'sharedrelative'):
3894 if createopts.get(b'sharedrelative'):
3898 try:
3895 try:
3899 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3896 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3900 sharedpath = util.pconvert(sharedpath)
3897 sharedpath = util.pconvert(sharedpath)
3901 except (IOError, ValueError) as e:
3898 except (IOError, ValueError) as e:
3902 # ValueError is raised on Windows if the drive letters differ
3899 # ValueError is raised on Windows if the drive letters differ
3903 # on each path.
3900 # on each path.
3904 raise error.Abort(
3901 raise error.Abort(
3905 _(b'cannot calculate relative path'),
3902 _(b'cannot calculate relative path'),
3906 hint=stringutil.forcebytestr(e),
3903 hint=stringutil.forcebytestr(e),
3907 )
3904 )
3908
3905
3909 if not wdirvfs.exists():
3906 if not wdirvfs.exists():
3910 wdirvfs.makedirs()
3907 wdirvfs.makedirs()
3911
3908
3912 hgvfs.makedir(notindexed=True)
3909 hgvfs.makedir(notindexed=True)
3913 if b'sharedrepo' not in createopts:
3910 if b'sharedrepo' not in createopts:
3914 hgvfs.mkdir(b'cache')
3911 hgvfs.mkdir(b'cache')
3915 hgvfs.mkdir(b'wcache')
3912 hgvfs.mkdir(b'wcache')
3916
3913
3917 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3914 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3918 if has_store and b'sharedrepo' not in createopts:
3915 if has_store and b'sharedrepo' not in createopts:
3919 hgvfs.mkdir(b'store')
3916 hgvfs.mkdir(b'store')
3920
3917
3921 # We create an invalid changelog outside the store so very old
3918 # We create an invalid changelog outside the store so very old
3922 # Mercurial versions (which didn't know about the requirements
3919 # Mercurial versions (which didn't know about the requirements
3923 # file) encounter an error on reading the changelog. This
3920 # file) encounter an error on reading the changelog. This
3924 # effectively locks out old clients and prevents them from
3921 # effectively locks out old clients and prevents them from
3925 # mucking with a repo in an unknown format.
3922 # mucking with a repo in an unknown format.
3926 #
3923 #
3927 # The revlog header has version 65535, which won't be recognized by
3924 # The revlog header has version 65535, which won't be recognized by
3928 # such old clients.
3925 # such old clients.
3929 hgvfs.append(
3926 hgvfs.append(
3930 b'00changelog.i',
3927 b'00changelog.i',
3931 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3928 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3932 b'layout',
3929 b'layout',
3933 )
3930 )
3934
3931
3935 # Filter the requirements into working copy and store ones
3932 # Filter the requirements into working copy and store ones
3936 wcreq, storereq = scmutil.filterrequirements(requirements)
3933 wcreq, storereq = scmutil.filterrequirements(requirements)
3937 # write working copy ones
3934 # write working copy ones
3938 scmutil.writerequires(hgvfs, wcreq)
3935 scmutil.writerequires(hgvfs, wcreq)
3939 # If there are store requirements and the current repository
3936 # If there are store requirements and the current repository
3940 # is not a shared one, write stored requirements
3937 # is not a shared one, write stored requirements
3941 # For new shared repository, we don't need to write the store
3938 # For new shared repository, we don't need to write the store
3942 # requirements as they are already present in store requires
3939 # requirements as they are already present in store requires
3943 if storereq and b'sharedrepo' not in createopts:
3940 if storereq and b'sharedrepo' not in createopts:
3944 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3941 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3945 scmutil.writerequires(storevfs, storereq)
3942 scmutil.writerequires(storevfs, storereq)
3946
3943
3947 # Write out file telling readers where to find the shared store.
3944 # Write out file telling readers where to find the shared store.
3948 if b'sharedrepo' in createopts:
3945 if b'sharedrepo' in createopts:
3949 hgvfs.write(b'sharedpath', sharedpath)
3946 hgvfs.write(b'sharedpath', sharedpath)
3950
3947
3951 if createopts.get(b'shareditems'):
3948 if createopts.get(b'shareditems'):
3952 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3949 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3953 hgvfs.write(b'shared', shared)
3950 hgvfs.write(b'shared', shared)
3954
3951
3955
3952
3956 def poisonrepository(repo):
3953 def poisonrepository(repo):
3957 """Poison a repository instance so it can no longer be used."""
3954 """Poison a repository instance so it can no longer be used."""
3958 # Perform any cleanup on the instance.
3955 # Perform any cleanup on the instance.
3959 repo.close()
3956 repo.close()
3960
3957
3961 # Our strategy is to replace the type of the object with one that
3958 # Our strategy is to replace the type of the object with one that
3962 # has all attribute lookups result in error.
3959 # has all attribute lookups result in error.
3963 #
3960 #
3964 # But we have to allow the close() method because some constructors
3961 # But we have to allow the close() method because some constructors
3965 # of repos call close() on repo references.
3962 # of repos call close() on repo references.
3966 class poisonedrepository:
3963 class poisonedrepository:
3967 def __getattribute__(self, item):
3964 def __getattribute__(self, item):
3968 if item == 'close':
3965 if item == 'close':
3969 return object.__getattribute__(self, item)
3966 return object.__getattribute__(self, item)
3970
3967
3971 raise error.ProgrammingError(
3968 raise error.ProgrammingError(
3972 b'repo instances should not be used after unshare'
3969 b'repo instances should not be used after unshare'
3973 )
3970 )
3974
3971
3975 def close(self):
3972 def close(self):
3976 pass
3973 pass
3977
3974
3978 # We may have a repoview, which intercepts __setattr__. So be sure
3975 # We may have a repoview, which intercepts __setattr__. So be sure
3979 # we operate at the lowest level possible.
3976 # we operate at the lowest level possible.
3980 object.__setattr__(repo, '__class__', poisonedrepository)
3977 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,678 +1,675 b''
1 # sshpeer.py - ssh repository proxy class for mercurial
1 # sshpeer.py - ssh repository proxy class for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8
8
9 import re
9 import re
10 import uuid
10 import uuid
11
11
12 from .i18n import _
12 from .i18n import _
13 from .pycompat import getattr
13 from .pycompat import getattr
14 from . import (
14 from . import (
15 error,
15 error,
16 pycompat,
16 pycompat,
17 util,
17 util,
18 wireprototypes,
18 wireprototypes,
19 wireprotov1peer,
19 wireprotov1peer,
20 wireprotov1server,
20 wireprotov1server,
21 )
21 )
22 from .utils import (
22 from .utils import (
23 procutil,
23 procutil,
24 stringutil,
24 stringutil,
25 urlutil,
25 urlutil,
26 )
26 )
27
27
28
28
29 def _serverquote(s):
29 def _serverquote(s):
30 """quote a string for the remote shell ... which we assume is sh"""
30 """quote a string for the remote shell ... which we assume is sh"""
31 if not s:
31 if not s:
32 return s
32 return s
33 if re.match(b'[a-zA-Z0-9@%_+=:,./-]*$', s):
33 if re.match(b'[a-zA-Z0-9@%_+=:,./-]*$', s):
34 return s
34 return s
35 return b"'%s'" % s.replace(b"'", b"'\\''")
35 return b"'%s'" % s.replace(b"'", b"'\\''")
36
36
37
37
38 def _forwardoutput(ui, pipe, warn=False):
38 def _forwardoutput(ui, pipe, warn=False):
39 """display all data currently available on pipe as remote output.
39 """display all data currently available on pipe as remote output.
40
40
41 This is non blocking."""
41 This is non blocking."""
42 if pipe and not pipe.closed:
42 if pipe and not pipe.closed:
43 s = procutil.readpipe(pipe)
43 s = procutil.readpipe(pipe)
44 if s:
44 if s:
45 display = ui.warn if warn else ui.status
45 display = ui.warn if warn else ui.status
46 for l in s.splitlines():
46 for l in s.splitlines():
47 display(_(b"remote: "), l, b'\n')
47 display(_(b"remote: "), l, b'\n')
48
48
49
49
50 class doublepipe:
50 class doublepipe:
51 """Operate a side-channel pipe in addition of a main one
51 """Operate a side-channel pipe in addition of a main one
52
52
53 The side-channel pipe contains server output to be forwarded to the user
53 The side-channel pipe contains server output to be forwarded to the user
54 input. The double pipe will behave as the "main" pipe, but will ensure the
54 input. The double pipe will behave as the "main" pipe, but will ensure the
55 content of the "side" pipe is properly processed while we wait for blocking
55 content of the "side" pipe is properly processed while we wait for blocking
56 call on the "main" pipe.
56 call on the "main" pipe.
57
57
58 If large amounts of data are read from "main", the forward will cease after
58 If large amounts of data are read from "main", the forward will cease after
59 the first bytes start to appear. This simplifies the implementation
59 the first bytes start to appear. This simplifies the implementation
60 without affecting actual output of sshpeer too much as we rarely issue
60 without affecting actual output of sshpeer too much as we rarely issue
61 large read for data not yet emitted by the server.
61 large read for data not yet emitted by the server.
62
62
63 The main pipe is expected to be a 'bufferedinputpipe' from the util module
63 The main pipe is expected to be a 'bufferedinputpipe' from the util module
64 that handle all the os specific bits. This class lives in this module
64 that handle all the os specific bits. This class lives in this module
65 because it focus on behavior specific to the ssh protocol."""
65 because it focus on behavior specific to the ssh protocol."""
66
66
67 def __init__(self, ui, main, side):
67 def __init__(self, ui, main, side):
68 self._ui = ui
68 self._ui = ui
69 self._main = main
69 self._main = main
70 self._side = side
70 self._side = side
71
71
72 def _wait(self):
72 def _wait(self):
73 """wait until some data are available on main or side
73 """wait until some data are available on main or side
74
74
75 return a pair of boolean (ismainready, issideready)
75 return a pair of boolean (ismainready, issideready)
76
76
77 (This will only wait for data if the setup is supported by `util.poll`)
77 (This will only wait for data if the setup is supported by `util.poll`)
78 """
78 """
79 if (
79 if (
80 isinstance(self._main, util.bufferedinputpipe)
80 isinstance(self._main, util.bufferedinputpipe)
81 and self._main.hasbuffer
81 and self._main.hasbuffer
82 ):
82 ):
83 # Main has data. Assume side is worth poking at.
83 # Main has data. Assume side is worth poking at.
84 return True, True
84 return True, True
85
85
86 fds = [self._main.fileno(), self._side.fileno()]
86 fds = [self._main.fileno(), self._side.fileno()]
87 try:
87 try:
88 act = util.poll(fds)
88 act = util.poll(fds)
89 except NotImplementedError:
89 except NotImplementedError:
90 # non supported yet case, assume all have data.
90 # non supported yet case, assume all have data.
91 act = fds
91 act = fds
92 return (self._main.fileno() in act, self._side.fileno() in act)
92 return (self._main.fileno() in act, self._side.fileno() in act)
93
93
94 def write(self, data):
94 def write(self, data):
95 return self._call(b'write', data)
95 return self._call(b'write', data)
96
96
97 def read(self, size):
97 def read(self, size):
98 r = self._call(b'read', size)
98 r = self._call(b'read', size)
99 if size != 0 and not r:
99 if size != 0 and not r:
100 # We've observed a condition that indicates the
100 # We've observed a condition that indicates the
101 # stdout closed unexpectedly. Check stderr one
101 # stdout closed unexpectedly. Check stderr one
102 # more time and snag anything that's there before
102 # more time and snag anything that's there before
103 # letting anyone know the main part of the pipe
103 # letting anyone know the main part of the pipe
104 # closed prematurely.
104 # closed prematurely.
105 _forwardoutput(self._ui, self._side)
105 _forwardoutput(self._ui, self._side)
106 return r
106 return r
107
107
108 def unbufferedread(self, size):
108 def unbufferedread(self, size):
109 r = self._call(b'unbufferedread', size)
109 r = self._call(b'unbufferedread', size)
110 if size != 0 and not r:
110 if size != 0 and not r:
111 # We've observed a condition that indicates the
111 # We've observed a condition that indicates the
112 # stdout closed unexpectedly. Check stderr one
112 # stdout closed unexpectedly. Check stderr one
113 # more time and snag anything that's there before
113 # more time and snag anything that's there before
114 # letting anyone know the main part of the pipe
114 # letting anyone know the main part of the pipe
115 # closed prematurely.
115 # closed prematurely.
116 _forwardoutput(self._ui, self._side)
116 _forwardoutput(self._ui, self._side)
117 return r
117 return r
118
118
119 def readline(self):
119 def readline(self):
120 return self._call(b'readline')
120 return self._call(b'readline')
121
121
122 def _call(self, methname, data=None):
122 def _call(self, methname, data=None):
123 """call <methname> on "main", forward output of "side" while blocking"""
123 """call <methname> on "main", forward output of "side" while blocking"""
124 # data can be '' or 0
124 # data can be '' or 0
125 if (data is not None and not data) or self._main.closed:
125 if (data is not None and not data) or self._main.closed:
126 _forwardoutput(self._ui, self._side)
126 _forwardoutput(self._ui, self._side)
127 return b''
127 return b''
128 while True:
128 while True:
129 mainready, sideready = self._wait()
129 mainready, sideready = self._wait()
130 if sideready:
130 if sideready:
131 _forwardoutput(self._ui, self._side)
131 _forwardoutput(self._ui, self._side)
132 if mainready:
132 if mainready:
133 meth = getattr(self._main, methname)
133 meth = getattr(self._main, methname)
134 if data is None:
134 if data is None:
135 return meth()
135 return meth()
136 else:
136 else:
137 return meth(data)
137 return meth(data)
138
138
139 def close(self):
139 def close(self):
140 return self._main.close()
140 return self._main.close()
141
141
142 @property
142 @property
143 def closed(self):
143 def closed(self):
144 return self._main.closed
144 return self._main.closed
145
145
146 def flush(self):
146 def flush(self):
147 return self._main.flush()
147 return self._main.flush()
148
148
149
149
150 def _cleanuppipes(ui, pipei, pipeo, pipee, warn):
150 def _cleanuppipes(ui, pipei, pipeo, pipee, warn):
151 """Clean up pipes used by an SSH connection."""
151 """Clean up pipes used by an SSH connection."""
152 didsomething = False
152 didsomething = False
153 if pipeo and not pipeo.closed:
153 if pipeo and not pipeo.closed:
154 didsomething = True
154 didsomething = True
155 pipeo.close()
155 pipeo.close()
156 if pipei and not pipei.closed:
156 if pipei and not pipei.closed:
157 didsomething = True
157 didsomething = True
158 pipei.close()
158 pipei.close()
159
159
160 if pipee and not pipee.closed:
160 if pipee and not pipee.closed:
161 didsomething = True
161 didsomething = True
162 # Try to read from the err descriptor until EOF.
162 # Try to read from the err descriptor until EOF.
163 try:
163 try:
164 for l in pipee:
164 for l in pipee:
165 ui.status(_(b'remote: '), l)
165 ui.status(_(b'remote: '), l)
166 except (IOError, ValueError):
166 except (IOError, ValueError):
167 pass
167 pass
168
168
169 pipee.close()
169 pipee.close()
170
170
171 if didsomething and warn is not None:
171 if didsomething and warn is not None:
172 # Encourage explicit close of sshpeers. Closing via __del__ is
172 # Encourage explicit close of sshpeers. Closing via __del__ is
173 # not very predictable when exceptions are thrown, which has led
173 # not very predictable when exceptions are thrown, which has led
174 # to deadlocks due to a peer get gc'ed in a fork
174 # to deadlocks due to a peer get gc'ed in a fork
175 # We add our own stack trace, because the stacktrace when called
175 # We add our own stack trace, because the stacktrace when called
176 # from __del__ is useless.
176 # from __del__ is useless.
177 ui.develwarn(b'missing close on SSH connection created at:\n%s' % warn)
177 ui.develwarn(b'missing close on SSH connection created at:\n%s' % warn)
178
178
179
179
180 def _makeconnection(ui, sshcmd, args, remotecmd, path, sshenv=None):
180 def _makeconnection(ui, sshcmd, args, remotecmd, path, sshenv=None):
181 """Create an SSH connection to a server.
181 """Create an SSH connection to a server.
182
182
183 Returns a tuple of (process, stdin, stdout, stderr) for the
183 Returns a tuple of (process, stdin, stdout, stderr) for the
184 spawned process.
184 spawned process.
185 """
185 """
186 cmd = b'%s %s %s' % (
186 cmd = b'%s %s %s' % (
187 sshcmd,
187 sshcmd,
188 args,
188 args,
189 procutil.shellquote(
189 procutil.shellquote(
190 b'%s -R %s serve --stdio'
190 b'%s -R %s serve --stdio'
191 % (_serverquote(remotecmd), _serverquote(path))
191 % (_serverquote(remotecmd), _serverquote(path))
192 ),
192 ),
193 )
193 )
194
194
195 ui.debug(b'running %s\n' % cmd)
195 ui.debug(b'running %s\n' % cmd)
196
196
197 # no buffer allow the use of 'select'
197 # no buffer allow the use of 'select'
198 # feel free to remove buffering and select usage when we ultimately
198 # feel free to remove buffering and select usage when we ultimately
199 # move to threading.
199 # move to threading.
200 stdin, stdout, stderr, proc = procutil.popen4(cmd, bufsize=0, env=sshenv)
200 stdin, stdout, stderr, proc = procutil.popen4(cmd, bufsize=0, env=sshenv)
201
201
202 return proc, stdin, stdout, stderr
202 return proc, stdin, stdout, stderr
203
203
204
204
205 def _clientcapabilities():
205 def _clientcapabilities():
206 """Return list of capabilities of this client.
206 """Return list of capabilities of this client.
207
207
208 Returns a list of capabilities that are supported by this client.
208 Returns a list of capabilities that are supported by this client.
209 """
209 """
210 protoparams = {b'partial-pull'}
210 protoparams = {b'partial-pull'}
211 comps = [
211 comps = [
212 e.wireprotosupport().name
212 e.wireprotosupport().name
213 for e in util.compengines.supportedwireengines(util.CLIENTROLE)
213 for e in util.compengines.supportedwireengines(util.CLIENTROLE)
214 ]
214 ]
215 protoparams.add(b'comp=%s' % b','.join(comps))
215 protoparams.add(b'comp=%s' % b','.join(comps))
216 return protoparams
216 return protoparams
217
217
218
218
219 def _performhandshake(ui, stdin, stdout, stderr):
219 def _performhandshake(ui, stdin, stdout, stderr):
220 def badresponse():
220 def badresponse():
221 # Flush any output on stderr. In general, the stderr contains errors
221 # Flush any output on stderr. In general, the stderr contains errors
222 # from the remote (ssh errors, some hg errors), and status indications
222 # from the remote (ssh errors, some hg errors), and status indications
223 # (like "adding changes"), with no current way to tell them apart.
223 # (like "adding changes"), with no current way to tell them apart.
224 # Here we failed so early that it's almost certainly only errors, so
224 # Here we failed so early that it's almost certainly only errors, so
225 # use warn=True so -q doesn't hide them.
225 # use warn=True so -q doesn't hide them.
226 _forwardoutput(ui, stderr, warn=True)
226 _forwardoutput(ui, stderr, warn=True)
227
227
228 msg = _(b'no suitable response from remote hg')
228 msg = _(b'no suitable response from remote hg')
229 hint = ui.config(b'ui', b'ssherrorhint')
229 hint = ui.config(b'ui', b'ssherrorhint')
230 raise error.RepoError(msg, hint=hint)
230 raise error.RepoError(msg, hint=hint)
231
231
232 # The handshake consists of sending wire protocol commands in reverse
232 # The handshake consists of sending wire protocol commands in reverse
233 # order of protocol implementation and then sniffing for a response
233 # order of protocol implementation and then sniffing for a response
234 # to one of them.
234 # to one of them.
235 #
235 #
236 # Those commands (from oldest to newest) are:
236 # Those commands (from oldest to newest) are:
237 #
237 #
238 # ``between``
238 # ``between``
239 # Asks for the set of revisions between a pair of revisions. Command
239 # Asks for the set of revisions between a pair of revisions. Command
240 # present in all Mercurial server implementations.
240 # present in all Mercurial server implementations.
241 #
241 #
242 # ``hello``
242 # ``hello``
243 # Instructs the server to advertise its capabilities. Introduced in
243 # Instructs the server to advertise its capabilities. Introduced in
244 # Mercurial 0.9.1.
244 # Mercurial 0.9.1.
245 #
245 #
246 # ``upgrade``
246 # ``upgrade``
247 # Requests upgrade from default transport protocol version 1 to
247 # Requests upgrade from default transport protocol version 1 to
248 # a newer version. Introduced in Mercurial 4.6 as an experimental
248 # a newer version. Introduced in Mercurial 4.6 as an experimental
249 # feature.
249 # feature.
250 #
250 #
251 # The ``between`` command is issued with a request for the null
251 # The ``between`` command is issued with a request for the null
252 # range. If the remote is a Mercurial server, this request will
252 # range. If the remote is a Mercurial server, this request will
253 # generate a specific response: ``1\n\n``. This represents the
253 # generate a specific response: ``1\n\n``. This represents the
254 # wire protocol encoded value for ``\n``. We look for ``1\n\n``
254 # wire protocol encoded value for ``\n``. We look for ``1\n\n``
255 # in the output stream and know this is the response to ``between``
255 # in the output stream and know this is the response to ``between``
256 # and we're at the end of our handshake reply.
256 # and we're at the end of our handshake reply.
257 #
257 #
258 # The response to the ``hello`` command will be a line with the
258 # The response to the ``hello`` command will be a line with the
259 # length of the value returned by that command followed by that
259 # length of the value returned by that command followed by that
260 # value. If the server doesn't support ``hello`` (which should be
260 # value. If the server doesn't support ``hello`` (which should be
261 # rare), that line will be ``0\n``. Otherwise, the value will contain
261 # rare), that line will be ``0\n``. Otherwise, the value will contain
262 # RFC 822 like lines. Of these, the ``capabilities:`` line contains
262 # RFC 822 like lines. Of these, the ``capabilities:`` line contains
263 # the capabilities of the server.
263 # the capabilities of the server.
264 #
264 #
265 # The ``upgrade`` command isn't really a command in the traditional
265 # The ``upgrade`` command isn't really a command in the traditional
266 # sense of version 1 of the transport because it isn't using the
266 # sense of version 1 of the transport because it isn't using the
267 # proper mechanism for formatting insteads: instead, it just encodes
267 # proper mechanism for formatting insteads: instead, it just encodes
268 # arguments on the line, delimited by spaces.
268 # arguments on the line, delimited by spaces.
269 #
269 #
270 # The ``upgrade`` line looks like ``upgrade <token> <capabilities>``.
270 # The ``upgrade`` line looks like ``upgrade <token> <capabilities>``.
271 # If the server doesn't support protocol upgrades, it will reply to
271 # If the server doesn't support protocol upgrades, it will reply to
272 # this line with ``0\n``. Otherwise, it emits an
272 # this line with ``0\n``. Otherwise, it emits an
273 # ``upgraded <token> <protocol>`` line to both stdout and stderr.
273 # ``upgraded <token> <protocol>`` line to both stdout and stderr.
274 # Content immediately following this line describes additional
274 # Content immediately following this line describes additional
275 # protocol and server state.
275 # protocol and server state.
276 #
276 #
277 # In addition to the responses to our command requests, the server
277 # In addition to the responses to our command requests, the server
278 # may emit "banner" output on stdout. SSH servers are allowed to
278 # may emit "banner" output on stdout. SSH servers are allowed to
279 # print messages to stdout on login. Issuing commands on connection
279 # print messages to stdout on login. Issuing commands on connection
280 # allows us to flush this banner output from the server by scanning
280 # allows us to flush this banner output from the server by scanning
281 # for output to our well-known ``between`` command. Of course, if
281 # for output to our well-known ``between`` command. Of course, if
282 # the banner contains ``1\n\n``, this will throw off our detection.
282 # the banner contains ``1\n\n``, this will throw off our detection.
283
283
284 requestlog = ui.configbool(b'devel', b'debug.peer-request')
284 requestlog = ui.configbool(b'devel', b'debug.peer-request')
285
285
286 # Generate a random token to help identify responses to version 2
286 # Generate a random token to help identify responses to version 2
287 # upgrade request.
287 # upgrade request.
288 token = pycompat.sysbytes(str(uuid.uuid4()))
288 token = pycompat.sysbytes(str(uuid.uuid4()))
289
289
290 try:
290 try:
291 pairsarg = b'%s-%s' % (b'0' * 40, b'0' * 40)
291 pairsarg = b'%s-%s' % (b'0' * 40, b'0' * 40)
292 handshake = [
292 handshake = [
293 b'hello\n',
293 b'hello\n',
294 b'between\n',
294 b'between\n',
295 b'pairs %d\n' % len(pairsarg),
295 b'pairs %d\n' % len(pairsarg),
296 pairsarg,
296 pairsarg,
297 ]
297 ]
298
298
299 if requestlog:
299 if requestlog:
300 ui.debug(b'devel-peer-request: hello+between\n')
300 ui.debug(b'devel-peer-request: hello+between\n')
301 ui.debug(b'devel-peer-request: pairs: %d bytes\n' % len(pairsarg))
301 ui.debug(b'devel-peer-request: pairs: %d bytes\n' % len(pairsarg))
302 ui.debug(b'sending hello command\n')
302 ui.debug(b'sending hello command\n')
303 ui.debug(b'sending between command\n')
303 ui.debug(b'sending between command\n')
304
304
305 stdin.write(b''.join(handshake))
305 stdin.write(b''.join(handshake))
306 stdin.flush()
306 stdin.flush()
307 except IOError:
307 except IOError:
308 badresponse()
308 badresponse()
309
309
310 # Assume version 1 of wire protocol by default.
310 # Assume version 1 of wire protocol by default.
311 protoname = wireprototypes.SSHV1
311 protoname = wireprototypes.SSHV1
312 reupgraded = re.compile(b'^upgraded %s (.*)$' % stringutil.reescape(token))
312 reupgraded = re.compile(b'^upgraded %s (.*)$' % stringutil.reescape(token))
313
313
314 lines = [b'', b'dummy']
314 lines = [b'', b'dummy']
315 max_noise = 500
315 max_noise = 500
316 while lines[-1] and max_noise:
316 while lines[-1] and max_noise:
317 try:
317 try:
318 l = stdout.readline()
318 l = stdout.readline()
319 _forwardoutput(ui, stderr, warn=True)
319 _forwardoutput(ui, stderr, warn=True)
320
320
321 # Look for reply to protocol upgrade request. It has a token
321 # Look for reply to protocol upgrade request. It has a token
322 # in it, so there should be no false positives.
322 # in it, so there should be no false positives.
323 m = reupgraded.match(l)
323 m = reupgraded.match(l)
324 if m:
324 if m:
325 protoname = m.group(1)
325 protoname = m.group(1)
326 ui.debug(b'protocol upgraded to %s\n' % protoname)
326 ui.debug(b'protocol upgraded to %s\n' % protoname)
327 # If an upgrade was handled, the ``hello`` and ``between``
327 # If an upgrade was handled, the ``hello`` and ``between``
328 # requests are ignored. The next output belongs to the
328 # requests are ignored. The next output belongs to the
329 # protocol, so stop scanning lines.
329 # protocol, so stop scanning lines.
330 break
330 break
331
331
332 # Otherwise it could be a banner, ``0\n`` response if server
332 # Otherwise it could be a banner, ``0\n`` response if server
333 # doesn't support upgrade.
333 # doesn't support upgrade.
334
334
335 if lines[-1] == b'1\n' and l == b'\n':
335 if lines[-1] == b'1\n' and l == b'\n':
336 break
336 break
337 if l:
337 if l:
338 ui.debug(b'remote: ', l)
338 ui.debug(b'remote: ', l)
339 lines.append(l)
339 lines.append(l)
340 max_noise -= 1
340 max_noise -= 1
341 except IOError:
341 except IOError:
342 badresponse()
342 badresponse()
343 else:
343 else:
344 badresponse()
344 badresponse()
345
345
346 caps = set()
346 caps = set()
347
347
348 # For version 1, we should see a ``capabilities`` line in response to the
348 # For version 1, we should see a ``capabilities`` line in response to the
349 # ``hello`` command.
349 # ``hello`` command.
350 if protoname == wireprototypes.SSHV1:
350 if protoname == wireprototypes.SSHV1:
351 for l in reversed(lines):
351 for l in reversed(lines):
352 # Look for response to ``hello`` command. Scan from the back so
352 # Look for response to ``hello`` command. Scan from the back so
353 # we don't misinterpret banner output as the command reply.
353 # we don't misinterpret banner output as the command reply.
354 if l.startswith(b'capabilities:'):
354 if l.startswith(b'capabilities:'):
355 caps.update(l[:-1].split(b':')[1].split())
355 caps.update(l[:-1].split(b':')[1].split())
356 break
356 break
357
357
358 # Error if we couldn't find capabilities, this means:
358 # Error if we couldn't find capabilities, this means:
359 #
359 #
360 # 1. Remote isn't a Mercurial server
360 # 1. Remote isn't a Mercurial server
361 # 2. Remote is a <0.9.1 Mercurial server
361 # 2. Remote is a <0.9.1 Mercurial server
362 # 3. Remote is a future Mercurial server that dropped ``hello``
362 # 3. Remote is a future Mercurial server that dropped ``hello``
363 # and other attempted handshake mechanisms.
363 # and other attempted handshake mechanisms.
364 if not caps:
364 if not caps:
365 badresponse()
365 badresponse()
366
366
367 # Flush any output on stderr before proceeding.
367 # Flush any output on stderr before proceeding.
368 _forwardoutput(ui, stderr, warn=True)
368 _forwardoutput(ui, stderr, warn=True)
369
369
370 return protoname, caps
370 return protoname, caps
371
371
372
372
373 class sshv1peer(wireprotov1peer.wirepeer):
373 class sshv1peer(wireprotov1peer.wirepeer):
374 def __init__(
374 def __init__(
375 self, ui, url, proc, stdin, stdout, stderr, caps, autoreadstderr=True
375 self, ui, url, proc, stdin, stdout, stderr, caps, autoreadstderr=True
376 ):
376 ):
377 """Create a peer from an existing SSH connection.
377 """Create a peer from an existing SSH connection.
378
378
379 ``proc`` is a handle on the underlying SSH process.
379 ``proc`` is a handle on the underlying SSH process.
380 ``stdin``, ``stdout``, and ``stderr`` are handles on the stdio
380 ``stdin``, ``stdout``, and ``stderr`` are handles on the stdio
381 pipes for that process.
381 pipes for that process.
382 ``caps`` is a set of capabilities supported by the remote.
382 ``caps`` is a set of capabilities supported by the remote.
383 ``autoreadstderr`` denotes whether to automatically read from
383 ``autoreadstderr`` denotes whether to automatically read from
384 stderr and to forward its output.
384 stderr and to forward its output.
385 """
385 """
386 self._url = url
386 self._url = url
387 self.ui = ui
387 self.ui = ui
388 # self._subprocess is unused. Keeping a handle on the process
388 # self._subprocess is unused. Keeping a handle on the process
389 # holds a reference and prevents it from being garbage collected.
389 # holds a reference and prevents it from being garbage collected.
390 self._subprocess = proc
390 self._subprocess = proc
391
391
392 # And we hook up our "doublepipe" wrapper to allow querying
392 # And we hook up our "doublepipe" wrapper to allow querying
393 # stderr any time we perform I/O.
393 # stderr any time we perform I/O.
394 if autoreadstderr:
394 if autoreadstderr:
395 stdout = doublepipe(ui, util.bufferedinputpipe(stdout), stderr)
395 stdout = doublepipe(ui, util.bufferedinputpipe(stdout), stderr)
396 stdin = doublepipe(ui, stdin, stderr)
396 stdin = doublepipe(ui, stdin, stderr)
397
397
398 self._pipeo = stdin
398 self._pipeo = stdin
399 self._pipei = stdout
399 self._pipei = stdout
400 self._pipee = stderr
400 self._pipee = stderr
401 self._caps = caps
401 self._caps = caps
402 self._autoreadstderr = autoreadstderr
402 self._autoreadstderr = autoreadstderr
403 self._initstack = b''.join(util.getstackframes(1))
403 self._initstack = b''.join(util.getstackframes(1))
404
404
405 # Commands that have a "framed" response where the first line of the
405 # Commands that have a "framed" response where the first line of the
406 # response contains the length of that response.
406 # response contains the length of that response.
407 _FRAMED_COMMANDS = {
407 _FRAMED_COMMANDS = {
408 b'batch',
408 b'batch',
409 }
409 }
410
410
411 # Begin of ipeerconnection interface.
411 # Begin of ipeerconnection interface.
412
412
413 def url(self):
413 def url(self):
414 return self._url
414 return self._url
415
415
416 def local(self):
416 def local(self):
417 return None
417 return None
418
418
419 def peer(self):
420 return self
421
422 def canpush(self):
419 def canpush(self):
423 return True
420 return True
424
421
425 def close(self):
422 def close(self):
426 self._cleanup()
423 self._cleanup()
427
424
428 # End of ipeerconnection interface.
425 # End of ipeerconnection interface.
429
426
430 # Begin of ipeercommands interface.
427 # Begin of ipeercommands interface.
431
428
432 def capabilities(self):
429 def capabilities(self):
433 return self._caps
430 return self._caps
434
431
435 # End of ipeercommands interface.
432 # End of ipeercommands interface.
436
433
437 def _readerr(self):
434 def _readerr(self):
438 _forwardoutput(self.ui, self._pipee)
435 _forwardoutput(self.ui, self._pipee)
439
436
440 def _abort(self, exception):
437 def _abort(self, exception):
441 self._cleanup()
438 self._cleanup()
442 raise exception
439 raise exception
443
440
444 def _cleanup(self, warn=None):
441 def _cleanup(self, warn=None):
445 _cleanuppipes(self.ui, self._pipei, self._pipeo, self._pipee, warn=warn)
442 _cleanuppipes(self.ui, self._pipei, self._pipeo, self._pipee, warn=warn)
446
443
447 def __del__(self):
444 def __del__(self):
448 self._cleanup(warn=self._initstack)
445 self._cleanup(warn=self._initstack)
449
446
450 def _sendrequest(self, cmd, args, framed=False):
447 def _sendrequest(self, cmd, args, framed=False):
451 if self.ui.debugflag and self.ui.configbool(
448 if self.ui.debugflag and self.ui.configbool(
452 b'devel', b'debug.peer-request'
449 b'devel', b'debug.peer-request'
453 ):
450 ):
454 dbg = self.ui.debug
451 dbg = self.ui.debug
455 line = b'devel-peer-request: %s\n'
452 line = b'devel-peer-request: %s\n'
456 dbg(line % cmd)
453 dbg(line % cmd)
457 for key, value in sorted(args.items()):
454 for key, value in sorted(args.items()):
458 if not isinstance(value, dict):
455 if not isinstance(value, dict):
459 dbg(line % b' %s: %d bytes' % (key, len(value)))
456 dbg(line % b' %s: %d bytes' % (key, len(value)))
460 else:
457 else:
461 for dk, dv in sorted(value.items()):
458 for dk, dv in sorted(value.items()):
462 dbg(line % b' %s-%s: %d' % (key, dk, len(dv)))
459 dbg(line % b' %s-%s: %d' % (key, dk, len(dv)))
463 self.ui.debug(b"sending %s command\n" % cmd)
460 self.ui.debug(b"sending %s command\n" % cmd)
464 self._pipeo.write(b"%s\n" % cmd)
461 self._pipeo.write(b"%s\n" % cmd)
465 _func, names = wireprotov1server.commands[cmd]
462 _func, names = wireprotov1server.commands[cmd]
466 keys = names.split()
463 keys = names.split()
467 wireargs = {}
464 wireargs = {}
468 for k in keys:
465 for k in keys:
469 if k == b'*':
466 if k == b'*':
470 wireargs[b'*'] = args
467 wireargs[b'*'] = args
471 break
468 break
472 else:
469 else:
473 wireargs[k] = args[k]
470 wireargs[k] = args[k]
474 del args[k]
471 del args[k]
475 for k, v in sorted(wireargs.items()):
472 for k, v in sorted(wireargs.items()):
476 self._pipeo.write(b"%s %d\n" % (k, len(v)))
473 self._pipeo.write(b"%s %d\n" % (k, len(v)))
477 if isinstance(v, dict):
474 if isinstance(v, dict):
478 for dk, dv in v.items():
475 for dk, dv in v.items():
479 self._pipeo.write(b"%s %d\n" % (dk, len(dv)))
476 self._pipeo.write(b"%s %d\n" % (dk, len(dv)))
480 self._pipeo.write(dv)
477 self._pipeo.write(dv)
481 else:
478 else:
482 self._pipeo.write(v)
479 self._pipeo.write(v)
483 self._pipeo.flush()
480 self._pipeo.flush()
484
481
485 # We know exactly how many bytes are in the response. So return a proxy
482 # We know exactly how many bytes are in the response. So return a proxy
486 # around the raw output stream that allows reading exactly this many
483 # around the raw output stream that allows reading exactly this many
487 # bytes. Callers then can read() without fear of overrunning the
484 # bytes. Callers then can read() without fear of overrunning the
488 # response.
485 # response.
489 if framed:
486 if framed:
490 amount = self._getamount()
487 amount = self._getamount()
491 return util.cappedreader(self._pipei, amount)
488 return util.cappedreader(self._pipei, amount)
492
489
493 return self._pipei
490 return self._pipei
494
491
495 def _callstream(self, cmd, **args):
492 def _callstream(self, cmd, **args):
496 args = pycompat.byteskwargs(args)
493 args = pycompat.byteskwargs(args)
497 return self._sendrequest(cmd, args, framed=cmd in self._FRAMED_COMMANDS)
494 return self._sendrequest(cmd, args, framed=cmd in self._FRAMED_COMMANDS)
498
495
499 def _callcompressable(self, cmd, **args):
496 def _callcompressable(self, cmd, **args):
500 args = pycompat.byteskwargs(args)
497 args = pycompat.byteskwargs(args)
501 return self._sendrequest(cmd, args, framed=cmd in self._FRAMED_COMMANDS)
498 return self._sendrequest(cmd, args, framed=cmd in self._FRAMED_COMMANDS)
502
499
503 def _call(self, cmd, **args):
500 def _call(self, cmd, **args):
504 args = pycompat.byteskwargs(args)
501 args = pycompat.byteskwargs(args)
505 return self._sendrequest(cmd, args, framed=True).read()
502 return self._sendrequest(cmd, args, framed=True).read()
506
503
507 def _callpush(self, cmd, fp, **args):
504 def _callpush(self, cmd, fp, **args):
508 # The server responds with an empty frame if the client should
505 # The server responds with an empty frame if the client should
509 # continue submitting the payload.
506 # continue submitting the payload.
510 r = self._call(cmd, **args)
507 r = self._call(cmd, **args)
511 if r:
508 if r:
512 return b'', r
509 return b'', r
513
510
514 # The payload consists of frames with content followed by an empty
511 # The payload consists of frames with content followed by an empty
515 # frame.
512 # frame.
516 for d in iter(lambda: fp.read(4096), b''):
513 for d in iter(lambda: fp.read(4096), b''):
517 self._writeframed(d)
514 self._writeframed(d)
518 self._writeframed(b"", flush=True)
515 self._writeframed(b"", flush=True)
519
516
520 # In case of success, there is an empty frame and a frame containing
517 # In case of success, there is an empty frame and a frame containing
521 # the integer result (as a string).
518 # the integer result (as a string).
522 # In case of error, there is a non-empty frame containing the error.
519 # In case of error, there is a non-empty frame containing the error.
523 r = self._readframed()
520 r = self._readframed()
524 if r:
521 if r:
525 return b'', r
522 return b'', r
526 return self._readframed(), b''
523 return self._readframed(), b''
527
524
528 def _calltwowaystream(self, cmd, fp, **args):
525 def _calltwowaystream(self, cmd, fp, **args):
529 # The server responds with an empty frame if the client should
526 # The server responds with an empty frame if the client should
530 # continue submitting the payload.
527 # continue submitting the payload.
531 r = self._call(cmd, **args)
528 r = self._call(cmd, **args)
532 if r:
529 if r:
533 # XXX needs to be made better
530 # XXX needs to be made better
534 raise error.Abort(_(b'unexpected remote reply: %s') % r)
531 raise error.Abort(_(b'unexpected remote reply: %s') % r)
535
532
536 # The payload consists of frames with content followed by an empty
533 # The payload consists of frames with content followed by an empty
537 # frame.
534 # frame.
538 for d in iter(lambda: fp.read(4096), b''):
535 for d in iter(lambda: fp.read(4096), b''):
539 self._writeframed(d)
536 self._writeframed(d)
540 self._writeframed(b"", flush=True)
537 self._writeframed(b"", flush=True)
541
538
542 return self._pipei
539 return self._pipei
543
540
544 def _getamount(self):
541 def _getamount(self):
545 l = self._pipei.readline()
542 l = self._pipei.readline()
546 if l == b'\n':
543 if l == b'\n':
547 if self._autoreadstderr:
544 if self._autoreadstderr:
548 self._readerr()
545 self._readerr()
549 msg = _(b'check previous remote output')
546 msg = _(b'check previous remote output')
550 self._abort(error.OutOfBandError(hint=msg))
547 self._abort(error.OutOfBandError(hint=msg))
551 if self._autoreadstderr:
548 if self._autoreadstderr:
552 self._readerr()
549 self._readerr()
553 try:
550 try:
554 return int(l)
551 return int(l)
555 except ValueError:
552 except ValueError:
556 self._abort(error.ResponseError(_(b"unexpected response:"), l))
553 self._abort(error.ResponseError(_(b"unexpected response:"), l))
557
554
558 def _readframed(self):
555 def _readframed(self):
559 size = self._getamount()
556 size = self._getamount()
560 if not size:
557 if not size:
561 return b''
558 return b''
562
559
563 return self._pipei.read(size)
560 return self._pipei.read(size)
564
561
565 def _writeframed(self, data, flush=False):
562 def _writeframed(self, data, flush=False):
566 self._pipeo.write(b"%d\n" % len(data))
563 self._pipeo.write(b"%d\n" % len(data))
567 if data:
564 if data:
568 self._pipeo.write(data)
565 self._pipeo.write(data)
569 if flush:
566 if flush:
570 self._pipeo.flush()
567 self._pipeo.flush()
571 if self._autoreadstderr:
568 if self._autoreadstderr:
572 self._readerr()
569 self._readerr()
573
570
574
571
575 def makepeer(ui, path, proc, stdin, stdout, stderr, autoreadstderr=True):
572 def makepeer(ui, path, proc, stdin, stdout, stderr, autoreadstderr=True):
576 """Make a peer instance from existing pipes.
573 """Make a peer instance from existing pipes.
577
574
578 ``path`` and ``proc`` are stored on the eventual peer instance and may
575 ``path`` and ``proc`` are stored on the eventual peer instance and may
579 not be used for anything meaningful.
576 not be used for anything meaningful.
580
577
581 ``stdin``, ``stdout``, and ``stderr`` are the pipes connected to the
578 ``stdin``, ``stdout``, and ``stderr`` are the pipes connected to the
582 SSH server's stdio handles.
579 SSH server's stdio handles.
583
580
584 This function is factored out to allow creating peers that don't
581 This function is factored out to allow creating peers that don't
585 actually spawn a new process. It is useful for starting SSH protocol
582 actually spawn a new process. It is useful for starting SSH protocol
586 servers and clients via non-standard means, which can be useful for
583 servers and clients via non-standard means, which can be useful for
587 testing.
584 testing.
588 """
585 """
589 try:
586 try:
590 protoname, caps = _performhandshake(ui, stdin, stdout, stderr)
587 protoname, caps = _performhandshake(ui, stdin, stdout, stderr)
591 except Exception:
588 except Exception:
592 _cleanuppipes(ui, stdout, stdin, stderr, warn=None)
589 _cleanuppipes(ui, stdout, stdin, stderr, warn=None)
593 raise
590 raise
594
591
595 if protoname == wireprototypes.SSHV1:
592 if protoname == wireprototypes.SSHV1:
596 return sshv1peer(
593 return sshv1peer(
597 ui,
594 ui,
598 path,
595 path,
599 proc,
596 proc,
600 stdin,
597 stdin,
601 stdout,
598 stdout,
602 stderr,
599 stderr,
603 caps,
600 caps,
604 autoreadstderr=autoreadstderr,
601 autoreadstderr=autoreadstderr,
605 )
602 )
606 else:
603 else:
607 _cleanuppipes(ui, stdout, stdin, stderr, warn=None)
604 _cleanuppipes(ui, stdout, stdin, stderr, warn=None)
608 raise error.RepoError(
605 raise error.RepoError(
609 _(b'unknown version of SSH protocol: %s') % protoname
606 _(b'unknown version of SSH protocol: %s') % protoname
610 )
607 )
611
608
612
609
613 def instance(ui, path, create, intents=None, createopts=None):
610 def instance(ui, path, create, intents=None, createopts=None):
614 """Create an SSH peer.
611 """Create an SSH peer.
615
612
616 The returned object conforms to the ``wireprotov1peer.wirepeer`` interface.
613 The returned object conforms to the ``wireprotov1peer.wirepeer`` interface.
617 """
614 """
618 u = urlutil.url(path, parsequery=False, parsefragment=False)
615 u = urlutil.url(path, parsequery=False, parsefragment=False)
619 if u.scheme != b'ssh' or not u.host or u.path is None:
616 if u.scheme != b'ssh' or not u.host or u.path is None:
620 raise error.RepoError(_(b"couldn't parse location %s") % path)
617 raise error.RepoError(_(b"couldn't parse location %s") % path)
621
618
622 urlutil.checksafessh(path)
619 urlutil.checksafessh(path)
623
620
624 if u.passwd is not None:
621 if u.passwd is not None:
625 raise error.RepoError(_(b'password in URL not supported'))
622 raise error.RepoError(_(b'password in URL not supported'))
626
623
627 sshcmd = ui.config(b'ui', b'ssh')
624 sshcmd = ui.config(b'ui', b'ssh')
628 remotecmd = ui.config(b'ui', b'remotecmd')
625 remotecmd = ui.config(b'ui', b'remotecmd')
629 sshaddenv = dict(ui.configitems(b'sshenv'))
626 sshaddenv = dict(ui.configitems(b'sshenv'))
630 sshenv = procutil.shellenviron(sshaddenv)
627 sshenv = procutil.shellenviron(sshaddenv)
631 remotepath = u.path or b'.'
628 remotepath = u.path or b'.'
632
629
633 args = procutil.sshargs(sshcmd, u.host, u.user, u.port)
630 args = procutil.sshargs(sshcmd, u.host, u.user, u.port)
634
631
635 if create:
632 if create:
636 # We /could/ do this, but only if the remote init command knows how to
633 # We /could/ do this, but only if the remote init command knows how to
637 # handle them. We don't yet make any assumptions about that. And without
634 # handle them. We don't yet make any assumptions about that. And without
638 # querying the remote, there's no way of knowing if the remote even
635 # querying the remote, there's no way of knowing if the remote even
639 # supports said requested feature.
636 # supports said requested feature.
640 if createopts:
637 if createopts:
641 raise error.RepoError(
638 raise error.RepoError(
642 _(
639 _(
643 b'cannot create remote SSH repositories '
640 b'cannot create remote SSH repositories '
644 b'with extra options'
641 b'with extra options'
645 )
642 )
646 )
643 )
647
644
648 cmd = b'%s %s %s' % (
645 cmd = b'%s %s %s' % (
649 sshcmd,
646 sshcmd,
650 args,
647 args,
651 procutil.shellquote(
648 procutil.shellquote(
652 b'%s init %s'
649 b'%s init %s'
653 % (_serverquote(remotecmd), _serverquote(remotepath))
650 % (_serverquote(remotecmd), _serverquote(remotepath))
654 ),
651 ),
655 )
652 )
656 ui.debug(b'running %s\n' % cmd)
653 ui.debug(b'running %s\n' % cmd)
657 res = ui.system(cmd, blockedtag=b'sshpeer', environ=sshenv)
654 res = ui.system(cmd, blockedtag=b'sshpeer', environ=sshenv)
658 if res != 0:
655 if res != 0:
659 raise error.RepoError(_(b'could not create remote repo'))
656 raise error.RepoError(_(b'could not create remote repo'))
660
657
661 proc, stdin, stdout, stderr = _makeconnection(
658 proc, stdin, stdout, stderr = _makeconnection(
662 ui, sshcmd, args, remotecmd, remotepath, sshenv
659 ui, sshcmd, args, remotecmd, remotepath, sshenv
663 )
660 )
664
661
665 peer = makepeer(ui, path, proc, stdin, stdout, stderr)
662 peer = makepeer(ui, path, proc, stdin, stdout, stderr)
666
663
667 # Finally, if supported by the server, notify it about our own
664 # Finally, if supported by the server, notify it about our own
668 # capabilities.
665 # capabilities.
669 if b'protocaps' in peer.capabilities():
666 if b'protocaps' in peer.capabilities():
670 try:
667 try:
671 peer._call(
668 peer._call(
672 b"protocaps", caps=b' '.join(sorted(_clientcapabilities()))
669 b"protocaps", caps=b' '.join(sorted(_clientcapabilities()))
673 )
670 )
674 except IOError:
671 except IOError:
675 peer._cleanup()
672 peer._cleanup()
676 raise error.RepoError(_(b'capability exchange failed'))
673 raise error.RepoError(_(b'capability exchange failed'))
677
674
678 return peer
675 return peer
General Comments 0
You need to be logged in to leave comments. Login now