##// END OF EJS Templates
branchcache: unconditionally write delayed branchmap
marmoute -
r52350:c0d51565 default
parent child Browse files
Show More
@@ -1,4034 +1,4035 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 # coding: utf-8
2 # coding: utf-8
3 #
3 #
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9
9
10 import functools
10 import functools
11 import os
11 import os
12 import random
12 import random
13 import re
13 import re
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from concurrent import futures
18 from concurrent import futures
19 from typing import (
19 from typing import (
20 Optional,
20 Optional,
21 )
21 )
22
22
23 from .i18n import _
23 from .i18n import _
24 from .node import (
24 from .node import (
25 bin,
25 bin,
26 hex,
26 hex,
27 nullrev,
27 nullrev,
28 sha1nodeconstants,
28 sha1nodeconstants,
29 short,
29 short,
30 )
30 )
31 from . import (
31 from . import (
32 bookmarks,
32 bookmarks,
33 branchmap,
33 branchmap,
34 bundle2,
34 bundle2,
35 bundlecaches,
35 bundlecaches,
36 changegroup,
36 changegroup,
37 color,
37 color,
38 commit,
38 commit,
39 context,
39 context,
40 dirstate,
40 dirstate,
41 discovery,
41 discovery,
42 encoding,
42 encoding,
43 error,
43 error,
44 exchange,
44 exchange,
45 extensions,
45 extensions,
46 filelog,
46 filelog,
47 hook,
47 hook,
48 lock as lockmod,
48 lock as lockmod,
49 match as matchmod,
49 match as matchmod,
50 mergestate as mergestatemod,
50 mergestate as mergestatemod,
51 mergeutil,
51 mergeutil,
52 namespaces,
52 namespaces,
53 narrowspec,
53 narrowspec,
54 obsolete,
54 obsolete,
55 pathutil,
55 pathutil,
56 phases,
56 phases,
57 policy,
57 policy,
58 pushkey,
58 pushkey,
59 pycompat,
59 pycompat,
60 rcutil,
60 rcutil,
61 repoview,
61 repoview,
62 requirements as requirementsmod,
62 requirements as requirementsmod,
63 revlog,
63 revlog,
64 revset,
64 revset,
65 revsetlang,
65 revsetlang,
66 scmutil,
66 scmutil,
67 sparse,
67 sparse,
68 store as storemod,
68 store as storemod,
69 subrepoutil,
69 subrepoutil,
70 tags as tagsmod,
70 tags as tagsmod,
71 transaction,
71 transaction,
72 txnutil,
72 txnutil,
73 util,
73 util,
74 vfs as vfsmod,
74 vfs as vfsmod,
75 wireprototypes,
75 wireprototypes,
76 )
76 )
77
77
78 from .interfaces import (
78 from .interfaces import (
79 repository,
79 repository,
80 util as interfaceutil,
80 util as interfaceutil,
81 )
81 )
82
82
83 from .utils import (
83 from .utils import (
84 hashutil,
84 hashutil,
85 procutil,
85 procutil,
86 stringutil,
86 stringutil,
87 urlutil,
87 urlutil,
88 )
88 )
89
89
90 from .revlogutils import (
90 from .revlogutils import (
91 concurrency_checker as revlogchecker,
91 concurrency_checker as revlogchecker,
92 constants as revlogconst,
92 constants as revlogconst,
93 sidedata as sidedatamod,
93 sidedata as sidedatamod,
94 )
94 )
95
95
96 release = lockmod.release
96 release = lockmod.release
97 urlerr = util.urlerr
97 urlerr = util.urlerr
98 urlreq = util.urlreq
98 urlreq = util.urlreq
99
99
100 RE_SKIP_DIRSTATE_ROLLBACK = re.compile(
100 RE_SKIP_DIRSTATE_ROLLBACK = re.compile(
101 b"^((dirstate|narrowspec.dirstate).*|branch$)"
101 b"^((dirstate|narrowspec.dirstate).*|branch$)"
102 )
102 )
103
103
104 # set of (path, vfs-location) tuples. vfs-location is:
104 # set of (path, vfs-location) tuples. vfs-location is:
105 # - 'plain for vfs relative paths
105 # - 'plain for vfs relative paths
106 # - '' for svfs relative paths
106 # - '' for svfs relative paths
107 _cachedfiles = set()
107 _cachedfiles = set()
108
108
109
109
110 class _basefilecache(scmutil.filecache):
110 class _basefilecache(scmutil.filecache):
111 """All filecache usage on repo are done for logic that should be unfiltered"""
111 """All filecache usage on repo are done for logic that should be unfiltered"""
112
112
113 def __get__(self, repo, type=None):
113 def __get__(self, repo, type=None):
114 if repo is None:
114 if repo is None:
115 return self
115 return self
116 # proxy to unfiltered __dict__ since filtered repo has no entry
116 # proxy to unfiltered __dict__ since filtered repo has no entry
117 unfi = repo.unfiltered()
117 unfi = repo.unfiltered()
118 try:
118 try:
119 return unfi.__dict__[self.sname]
119 return unfi.__dict__[self.sname]
120 except KeyError:
120 except KeyError:
121 pass
121 pass
122 return super(_basefilecache, self).__get__(unfi, type)
122 return super(_basefilecache, self).__get__(unfi, type)
123
123
124 def set(self, repo, value):
124 def set(self, repo, value):
125 return super(_basefilecache, self).set(repo.unfiltered(), value)
125 return super(_basefilecache, self).set(repo.unfiltered(), value)
126
126
127
127
128 class repofilecache(_basefilecache):
128 class repofilecache(_basefilecache):
129 """filecache for files in .hg but outside of .hg/store"""
129 """filecache for files in .hg but outside of .hg/store"""
130
130
131 def __init__(self, *paths):
131 def __init__(self, *paths):
132 super(repofilecache, self).__init__(*paths)
132 super(repofilecache, self).__init__(*paths)
133 for path in paths:
133 for path in paths:
134 _cachedfiles.add((path, b'plain'))
134 _cachedfiles.add((path, b'plain'))
135
135
136 def join(self, obj, fname):
136 def join(self, obj, fname):
137 return obj.vfs.join(fname)
137 return obj.vfs.join(fname)
138
138
139
139
140 class storecache(_basefilecache):
140 class storecache(_basefilecache):
141 """filecache for files in the store"""
141 """filecache for files in the store"""
142
142
143 def __init__(self, *paths):
143 def __init__(self, *paths):
144 super(storecache, self).__init__(*paths)
144 super(storecache, self).__init__(*paths)
145 for path in paths:
145 for path in paths:
146 _cachedfiles.add((path, b''))
146 _cachedfiles.add((path, b''))
147
147
148 def join(self, obj, fname):
148 def join(self, obj, fname):
149 return obj.sjoin(fname)
149 return obj.sjoin(fname)
150
150
151
151
152 class changelogcache(storecache):
152 class changelogcache(storecache):
153 """filecache for the changelog"""
153 """filecache for the changelog"""
154
154
155 def __init__(self):
155 def __init__(self):
156 super(changelogcache, self).__init__()
156 super(changelogcache, self).__init__()
157 _cachedfiles.add((b'00changelog.i', b''))
157 _cachedfiles.add((b'00changelog.i', b''))
158 _cachedfiles.add((b'00changelog.n', b''))
158 _cachedfiles.add((b'00changelog.n', b''))
159
159
160 def tracked_paths(self, obj):
160 def tracked_paths(self, obj):
161 paths = [self.join(obj, b'00changelog.i')]
161 paths = [self.join(obj, b'00changelog.i')]
162 if obj.store.opener.options.get(b'persistent-nodemap', False):
162 if obj.store.opener.options.get(b'persistent-nodemap', False):
163 paths.append(self.join(obj, b'00changelog.n'))
163 paths.append(self.join(obj, b'00changelog.n'))
164 return paths
164 return paths
165
165
166
166
167 class manifestlogcache(storecache):
167 class manifestlogcache(storecache):
168 """filecache for the manifestlog"""
168 """filecache for the manifestlog"""
169
169
170 def __init__(self):
170 def __init__(self):
171 super(manifestlogcache, self).__init__()
171 super(manifestlogcache, self).__init__()
172 _cachedfiles.add((b'00manifest.i', b''))
172 _cachedfiles.add((b'00manifest.i', b''))
173 _cachedfiles.add((b'00manifest.n', b''))
173 _cachedfiles.add((b'00manifest.n', b''))
174
174
175 def tracked_paths(self, obj):
175 def tracked_paths(self, obj):
176 paths = [self.join(obj, b'00manifest.i')]
176 paths = [self.join(obj, b'00manifest.i')]
177 if obj.store.opener.options.get(b'persistent-nodemap', False):
177 if obj.store.opener.options.get(b'persistent-nodemap', False):
178 paths.append(self.join(obj, b'00manifest.n'))
178 paths.append(self.join(obj, b'00manifest.n'))
179 return paths
179 return paths
180
180
181
181
182 class mixedrepostorecache(_basefilecache):
182 class mixedrepostorecache(_basefilecache):
183 """filecache for a mix files in .hg/store and outside"""
183 """filecache for a mix files in .hg/store and outside"""
184
184
185 def __init__(self, *pathsandlocations):
185 def __init__(self, *pathsandlocations):
186 # scmutil.filecache only uses the path for passing back into our
186 # scmutil.filecache only uses the path for passing back into our
187 # join(), so we can safely pass a list of paths and locations
187 # join(), so we can safely pass a list of paths and locations
188 super(mixedrepostorecache, self).__init__(*pathsandlocations)
188 super(mixedrepostorecache, self).__init__(*pathsandlocations)
189 _cachedfiles.update(pathsandlocations)
189 _cachedfiles.update(pathsandlocations)
190
190
191 def join(self, obj, fnameandlocation):
191 def join(self, obj, fnameandlocation):
192 fname, location = fnameandlocation
192 fname, location = fnameandlocation
193 if location == b'plain':
193 if location == b'plain':
194 return obj.vfs.join(fname)
194 return obj.vfs.join(fname)
195 else:
195 else:
196 if location != b'':
196 if location != b'':
197 raise error.ProgrammingError(
197 raise error.ProgrammingError(
198 b'unexpected location: %s' % location
198 b'unexpected location: %s' % location
199 )
199 )
200 return obj.sjoin(fname)
200 return obj.sjoin(fname)
201
201
202
202
203 def isfilecached(repo, name):
203 def isfilecached(repo, name):
204 """check if a repo has already cached "name" filecache-ed property
204 """check if a repo has already cached "name" filecache-ed property
205
205
206 This returns (cachedobj-or-None, iscached) tuple.
206 This returns (cachedobj-or-None, iscached) tuple.
207 """
207 """
208 cacheentry = repo.unfiltered()._filecache.get(name, None)
208 cacheentry = repo.unfiltered()._filecache.get(name, None)
209 if not cacheentry:
209 if not cacheentry:
210 return None, False
210 return None, False
211 return cacheentry.obj, True
211 return cacheentry.obj, True
212
212
213
213
214 class unfilteredpropertycache(util.propertycache):
214 class unfilteredpropertycache(util.propertycache):
215 """propertycache that apply to unfiltered repo only"""
215 """propertycache that apply to unfiltered repo only"""
216
216
217 def __get__(self, repo, type=None):
217 def __get__(self, repo, type=None):
218 unfi = repo.unfiltered()
218 unfi = repo.unfiltered()
219 if unfi is repo:
219 if unfi is repo:
220 return super(unfilteredpropertycache, self).__get__(unfi)
220 return super(unfilteredpropertycache, self).__get__(unfi)
221 return getattr(unfi, self.name)
221 return getattr(unfi, self.name)
222
222
223
223
224 class filteredpropertycache(util.propertycache):
224 class filteredpropertycache(util.propertycache):
225 """propertycache that must take filtering in account"""
225 """propertycache that must take filtering in account"""
226
226
227 def cachevalue(self, obj, value):
227 def cachevalue(self, obj, value):
228 object.__setattr__(obj, self.name, value)
228 object.__setattr__(obj, self.name, value)
229
229
230
230
231 def hasunfilteredcache(repo, name):
231 def hasunfilteredcache(repo, name):
232 """check if a repo has an unfilteredpropertycache value for <name>"""
232 """check if a repo has an unfilteredpropertycache value for <name>"""
233 return name in vars(repo.unfiltered())
233 return name in vars(repo.unfiltered())
234
234
235
235
236 def unfilteredmethod(orig):
236 def unfilteredmethod(orig):
237 """decorate method that always need to be run on unfiltered version"""
237 """decorate method that always need to be run on unfiltered version"""
238
238
239 @functools.wraps(orig)
239 @functools.wraps(orig)
240 def wrapper(repo, *args, **kwargs):
240 def wrapper(repo, *args, **kwargs):
241 return orig(repo.unfiltered(), *args, **kwargs)
241 return orig(repo.unfiltered(), *args, **kwargs)
242
242
243 return wrapper
243 return wrapper
244
244
245
245
246 moderncaps = {
246 moderncaps = {
247 b'lookup',
247 b'lookup',
248 b'branchmap',
248 b'branchmap',
249 b'pushkey',
249 b'pushkey',
250 b'known',
250 b'known',
251 b'getbundle',
251 b'getbundle',
252 b'unbundle',
252 b'unbundle',
253 }
253 }
254 legacycaps = moderncaps.union({b'changegroupsubset'})
254 legacycaps = moderncaps.union({b'changegroupsubset'})
255
255
256
256
257 @interfaceutil.implementer(repository.ipeercommandexecutor)
257 @interfaceutil.implementer(repository.ipeercommandexecutor)
258 class localcommandexecutor:
258 class localcommandexecutor:
259 def __init__(self, peer):
259 def __init__(self, peer):
260 self._peer = peer
260 self._peer = peer
261 self._sent = False
261 self._sent = False
262 self._closed = False
262 self._closed = False
263
263
264 def __enter__(self):
264 def __enter__(self):
265 return self
265 return self
266
266
267 def __exit__(self, exctype, excvalue, exctb):
267 def __exit__(self, exctype, excvalue, exctb):
268 self.close()
268 self.close()
269
269
270 def callcommand(self, command, args):
270 def callcommand(self, command, args):
271 if self._sent:
271 if self._sent:
272 raise error.ProgrammingError(
272 raise error.ProgrammingError(
273 b'callcommand() cannot be used after sendcommands()'
273 b'callcommand() cannot be used after sendcommands()'
274 )
274 )
275
275
276 if self._closed:
276 if self._closed:
277 raise error.ProgrammingError(
277 raise error.ProgrammingError(
278 b'callcommand() cannot be used after close()'
278 b'callcommand() cannot be used after close()'
279 )
279 )
280
280
281 # We don't need to support anything fancy. Just call the named
281 # We don't need to support anything fancy. Just call the named
282 # method on the peer and return a resolved future.
282 # method on the peer and return a resolved future.
283 fn = getattr(self._peer, pycompat.sysstr(command))
283 fn = getattr(self._peer, pycompat.sysstr(command))
284
284
285 f = futures.Future()
285 f = futures.Future()
286
286
287 try:
287 try:
288 result = fn(**pycompat.strkwargs(args))
288 result = fn(**pycompat.strkwargs(args))
289 except Exception:
289 except Exception:
290 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
290 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
291 else:
291 else:
292 f.set_result(result)
292 f.set_result(result)
293
293
294 return f
294 return f
295
295
296 def sendcommands(self):
296 def sendcommands(self):
297 self._sent = True
297 self._sent = True
298
298
299 def close(self):
299 def close(self):
300 self._closed = True
300 self._closed = True
301
301
302
302
303 @interfaceutil.implementer(repository.ipeercommands)
303 @interfaceutil.implementer(repository.ipeercommands)
304 class localpeer(repository.peer):
304 class localpeer(repository.peer):
305 '''peer for a local repo; reflects only the most recent API'''
305 '''peer for a local repo; reflects only the most recent API'''
306
306
307 def __init__(self, repo, caps=None, path=None, remotehidden=False):
307 def __init__(self, repo, caps=None, path=None, remotehidden=False):
308 super(localpeer, self).__init__(
308 super(localpeer, self).__init__(
309 repo.ui, path=path, remotehidden=remotehidden
309 repo.ui, path=path, remotehidden=remotehidden
310 )
310 )
311
311
312 if caps is None:
312 if caps is None:
313 caps = moderncaps.copy()
313 caps = moderncaps.copy()
314 if remotehidden:
314 if remotehidden:
315 self._repo = repo.filtered(b'served.hidden')
315 self._repo = repo.filtered(b'served.hidden')
316 else:
316 else:
317 self._repo = repo.filtered(b'served')
317 self._repo = repo.filtered(b'served')
318 if repo._wanted_sidedata:
318 if repo._wanted_sidedata:
319 formatted = bundle2.format_remote_wanted_sidedata(repo)
319 formatted = bundle2.format_remote_wanted_sidedata(repo)
320 caps.add(b'exp-wanted-sidedata=' + formatted)
320 caps.add(b'exp-wanted-sidedata=' + formatted)
321
321
322 self._caps = repo._restrictcapabilities(caps)
322 self._caps = repo._restrictcapabilities(caps)
323
323
324 # Begin of _basepeer interface.
324 # Begin of _basepeer interface.
325
325
326 def url(self):
326 def url(self):
327 return self._repo.url()
327 return self._repo.url()
328
328
329 def local(self):
329 def local(self):
330 return self._repo
330 return self._repo
331
331
332 def canpush(self):
332 def canpush(self):
333 return True
333 return True
334
334
335 def close(self):
335 def close(self):
336 self._repo.close()
336 self._repo.close()
337
337
338 # End of _basepeer interface.
338 # End of _basepeer interface.
339
339
340 # Begin of _basewirecommands interface.
340 # Begin of _basewirecommands interface.
341
341
342 def branchmap(self):
342 def branchmap(self):
343 return self._repo.branchmap()
343 return self._repo.branchmap()
344
344
345 def capabilities(self):
345 def capabilities(self):
346 return self._caps
346 return self._caps
347
347
348 def get_cached_bundle_inline(self, path):
348 def get_cached_bundle_inline(self, path):
349 # not needed with local peer
349 # not needed with local peer
350 raise NotImplementedError
350 raise NotImplementedError
351
351
352 def clonebundles(self):
352 def clonebundles(self):
353 return bundlecaches.get_manifest(self._repo)
353 return bundlecaches.get_manifest(self._repo)
354
354
355 def debugwireargs(self, one, two, three=None, four=None, five=None):
355 def debugwireargs(self, one, two, three=None, four=None, five=None):
356 """Used to test argument passing over the wire"""
356 """Used to test argument passing over the wire"""
357 return b"%s %s %s %s %s" % (
357 return b"%s %s %s %s %s" % (
358 one,
358 one,
359 two,
359 two,
360 pycompat.bytestr(three),
360 pycompat.bytestr(three),
361 pycompat.bytestr(four),
361 pycompat.bytestr(four),
362 pycompat.bytestr(five),
362 pycompat.bytestr(five),
363 )
363 )
364
364
365 def getbundle(
365 def getbundle(
366 self,
366 self,
367 source,
367 source,
368 heads=None,
368 heads=None,
369 common=None,
369 common=None,
370 bundlecaps=None,
370 bundlecaps=None,
371 remote_sidedata=None,
371 remote_sidedata=None,
372 **kwargs,
372 **kwargs,
373 ):
373 ):
374 chunks = exchange.getbundlechunks(
374 chunks = exchange.getbundlechunks(
375 self._repo,
375 self._repo,
376 source,
376 source,
377 heads=heads,
377 heads=heads,
378 common=common,
378 common=common,
379 bundlecaps=bundlecaps,
379 bundlecaps=bundlecaps,
380 remote_sidedata=remote_sidedata,
380 remote_sidedata=remote_sidedata,
381 **kwargs,
381 **kwargs,
382 )[1]
382 )[1]
383 cb = util.chunkbuffer(chunks)
383 cb = util.chunkbuffer(chunks)
384
384
385 if exchange.bundle2requested(bundlecaps):
385 if exchange.bundle2requested(bundlecaps):
386 # When requesting a bundle2, getbundle returns a stream to make the
386 # When requesting a bundle2, getbundle returns a stream to make the
387 # wire level function happier. We need to build a proper object
387 # wire level function happier. We need to build a proper object
388 # from it in local peer.
388 # from it in local peer.
389 return bundle2.getunbundler(self.ui, cb)
389 return bundle2.getunbundler(self.ui, cb)
390 else:
390 else:
391 return changegroup.getunbundler(b'01', cb, None)
391 return changegroup.getunbundler(b'01', cb, None)
392
392
393 def heads(self):
393 def heads(self):
394 return self._repo.heads()
394 return self._repo.heads()
395
395
396 def known(self, nodes):
396 def known(self, nodes):
397 return self._repo.known(nodes)
397 return self._repo.known(nodes)
398
398
399 def listkeys(self, namespace):
399 def listkeys(self, namespace):
400 return self._repo.listkeys(namespace)
400 return self._repo.listkeys(namespace)
401
401
402 def lookup(self, key):
402 def lookup(self, key):
403 return self._repo.lookup(key)
403 return self._repo.lookup(key)
404
404
405 def pushkey(self, namespace, key, old, new):
405 def pushkey(self, namespace, key, old, new):
406 return self._repo.pushkey(namespace, key, old, new)
406 return self._repo.pushkey(namespace, key, old, new)
407
407
408 def stream_out(self):
408 def stream_out(self):
409 raise error.Abort(_(b'cannot perform stream clone against local peer'))
409 raise error.Abort(_(b'cannot perform stream clone against local peer'))
410
410
411 def unbundle(self, bundle, heads, url):
411 def unbundle(self, bundle, heads, url):
412 """apply a bundle on a repo
412 """apply a bundle on a repo
413
413
414 This function handles the repo locking itself."""
414 This function handles the repo locking itself."""
415 try:
415 try:
416 try:
416 try:
417 bundle = exchange.readbundle(self.ui, bundle, None)
417 bundle = exchange.readbundle(self.ui, bundle, None)
418 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
418 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
419 if hasattr(ret, 'getchunks'):
419 if hasattr(ret, 'getchunks'):
420 # This is a bundle20 object, turn it into an unbundler.
420 # This is a bundle20 object, turn it into an unbundler.
421 # This little dance should be dropped eventually when the
421 # This little dance should be dropped eventually when the
422 # API is finally improved.
422 # API is finally improved.
423 stream = util.chunkbuffer(ret.getchunks())
423 stream = util.chunkbuffer(ret.getchunks())
424 ret = bundle2.getunbundler(self.ui, stream)
424 ret = bundle2.getunbundler(self.ui, stream)
425 return ret
425 return ret
426 except Exception as exc:
426 except Exception as exc:
427 # If the exception contains output salvaged from a bundle2
427 # If the exception contains output salvaged from a bundle2
428 # reply, we need to make sure it is printed before continuing
428 # reply, we need to make sure it is printed before continuing
429 # to fail. So we build a bundle2 with such output and consume
429 # to fail. So we build a bundle2 with such output and consume
430 # it directly.
430 # it directly.
431 #
431 #
432 # This is not very elegant but allows a "simple" solution for
432 # This is not very elegant but allows a "simple" solution for
433 # issue4594
433 # issue4594
434 output = getattr(exc, '_bundle2salvagedoutput', ())
434 output = getattr(exc, '_bundle2salvagedoutput', ())
435 if output:
435 if output:
436 bundler = bundle2.bundle20(self._repo.ui)
436 bundler = bundle2.bundle20(self._repo.ui)
437 for out in output:
437 for out in output:
438 bundler.addpart(out)
438 bundler.addpart(out)
439 stream = util.chunkbuffer(bundler.getchunks())
439 stream = util.chunkbuffer(bundler.getchunks())
440 b = bundle2.getunbundler(self.ui, stream)
440 b = bundle2.getunbundler(self.ui, stream)
441 bundle2.processbundle(self._repo, b)
441 bundle2.processbundle(self._repo, b)
442 raise
442 raise
443 except error.PushRaced as exc:
443 except error.PushRaced as exc:
444 raise error.ResponseError(
444 raise error.ResponseError(
445 _(b'push failed:'), stringutil.forcebytestr(exc)
445 _(b'push failed:'), stringutil.forcebytestr(exc)
446 )
446 )
447
447
448 # End of _basewirecommands interface.
448 # End of _basewirecommands interface.
449
449
450 # Begin of peer interface.
450 # Begin of peer interface.
451
451
452 def commandexecutor(self):
452 def commandexecutor(self):
453 return localcommandexecutor(self)
453 return localcommandexecutor(self)
454
454
455 # End of peer interface.
455 # End of peer interface.
456
456
457
457
458 @interfaceutil.implementer(repository.ipeerlegacycommands)
458 @interfaceutil.implementer(repository.ipeerlegacycommands)
459 class locallegacypeer(localpeer):
459 class locallegacypeer(localpeer):
460 """peer extension which implements legacy methods too; used for tests with
460 """peer extension which implements legacy methods too; used for tests with
461 restricted capabilities"""
461 restricted capabilities"""
462
462
463 def __init__(self, repo, path=None, remotehidden=False):
463 def __init__(self, repo, path=None, remotehidden=False):
464 super(locallegacypeer, self).__init__(
464 super(locallegacypeer, self).__init__(
465 repo, caps=legacycaps, path=path, remotehidden=remotehidden
465 repo, caps=legacycaps, path=path, remotehidden=remotehidden
466 )
466 )
467
467
468 # Begin of baselegacywirecommands interface.
468 # Begin of baselegacywirecommands interface.
469
469
470 def between(self, pairs):
470 def between(self, pairs):
471 return self._repo.between(pairs)
471 return self._repo.between(pairs)
472
472
473 def branches(self, nodes):
473 def branches(self, nodes):
474 return self._repo.branches(nodes)
474 return self._repo.branches(nodes)
475
475
476 def changegroup(self, nodes, source):
476 def changegroup(self, nodes, source):
477 outgoing = discovery.outgoing(
477 outgoing = discovery.outgoing(
478 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
478 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
479 )
479 )
480 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
480 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
481
481
482 def changegroupsubset(self, bases, heads, source):
482 def changegroupsubset(self, bases, heads, source):
483 outgoing = discovery.outgoing(
483 outgoing = discovery.outgoing(
484 self._repo, missingroots=bases, ancestorsof=heads
484 self._repo, missingroots=bases, ancestorsof=heads
485 )
485 )
486 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
486 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
487
487
488 # End of baselegacywirecommands interface.
488 # End of baselegacywirecommands interface.
489
489
490
490
491 # Functions receiving (ui, features) that extensions can register to impact
491 # Functions receiving (ui, features) that extensions can register to impact
492 # the ability to load repositories with custom requirements. Only
492 # the ability to load repositories with custom requirements. Only
493 # functions defined in loaded extensions are called.
493 # functions defined in loaded extensions are called.
494 #
494 #
495 # The function receives a set of requirement strings that the repository
495 # The function receives a set of requirement strings that the repository
496 # is capable of opening. Functions will typically add elements to the
496 # is capable of opening. Functions will typically add elements to the
497 # set to reflect that the extension knows how to handle that requirements.
497 # set to reflect that the extension knows how to handle that requirements.
498 featuresetupfuncs = set()
498 featuresetupfuncs = set()
499
499
500
500
501 def _getsharedvfs(hgvfs, requirements):
501 def _getsharedvfs(hgvfs, requirements):
502 """returns the vfs object pointing to root of shared source
502 """returns the vfs object pointing to root of shared source
503 repo for a shared repository
503 repo for a shared repository
504
504
505 hgvfs is vfs pointing at .hg/ of current repo (shared one)
505 hgvfs is vfs pointing at .hg/ of current repo (shared one)
506 requirements is a set of requirements of current repo (shared one)
506 requirements is a set of requirements of current repo (shared one)
507 """
507 """
508 # The ``shared`` or ``relshared`` requirements indicate the
508 # The ``shared`` or ``relshared`` requirements indicate the
509 # store lives in the path contained in the ``.hg/sharedpath`` file.
509 # store lives in the path contained in the ``.hg/sharedpath`` file.
510 # This is an absolute path for ``shared`` and relative to
510 # This is an absolute path for ``shared`` and relative to
511 # ``.hg/`` for ``relshared``.
511 # ``.hg/`` for ``relshared``.
512 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
512 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
513 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
513 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
514 sharedpath = util.normpath(hgvfs.join(sharedpath))
514 sharedpath = util.normpath(hgvfs.join(sharedpath))
515
515
516 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
516 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
517
517
518 if not sharedvfs.exists():
518 if not sharedvfs.exists():
519 raise error.RepoError(
519 raise error.RepoError(
520 _(b'.hg/sharedpath points to nonexistent directory %s')
520 _(b'.hg/sharedpath points to nonexistent directory %s')
521 % sharedvfs.base
521 % sharedvfs.base
522 )
522 )
523 return sharedvfs
523 return sharedvfs
524
524
525
525
526 def _readrequires(vfs, allowmissing):
526 def _readrequires(vfs, allowmissing):
527 """reads the require file present at root of this vfs
527 """reads the require file present at root of this vfs
528 and return a set of requirements
528 and return a set of requirements
529
529
530 If allowmissing is True, we suppress FileNotFoundError if raised"""
530 If allowmissing is True, we suppress FileNotFoundError if raised"""
531 # requires file contains a newline-delimited list of
531 # requires file contains a newline-delimited list of
532 # features/capabilities the opener (us) must have in order to use
532 # features/capabilities the opener (us) must have in order to use
533 # the repository. This file was introduced in Mercurial 0.9.2,
533 # the repository. This file was introduced in Mercurial 0.9.2,
534 # which means very old repositories may not have one. We assume
534 # which means very old repositories may not have one. We assume
535 # a missing file translates to no requirements.
535 # a missing file translates to no requirements.
536 read = vfs.tryread if allowmissing else vfs.read
536 read = vfs.tryread if allowmissing else vfs.read
537 return set(read(b'requires').splitlines())
537 return set(read(b'requires').splitlines())
538
538
539
539
540 def makelocalrepository(baseui, path: bytes, intents=None):
540 def makelocalrepository(baseui, path: bytes, intents=None):
541 """Create a local repository object.
541 """Create a local repository object.
542
542
543 Given arguments needed to construct a local repository, this function
543 Given arguments needed to construct a local repository, this function
544 performs various early repository loading functionality (such as
544 performs various early repository loading functionality (such as
545 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
545 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
546 the repository can be opened, derives a type suitable for representing
546 the repository can be opened, derives a type suitable for representing
547 that repository, and returns an instance of it.
547 that repository, and returns an instance of it.
548
548
549 The returned object conforms to the ``repository.completelocalrepository``
549 The returned object conforms to the ``repository.completelocalrepository``
550 interface.
550 interface.
551
551
552 The repository type is derived by calling a series of factory functions
552 The repository type is derived by calling a series of factory functions
553 for each aspect/interface of the final repository. These are defined by
553 for each aspect/interface of the final repository. These are defined by
554 ``REPO_INTERFACES``.
554 ``REPO_INTERFACES``.
555
555
556 Each factory function is called to produce a type implementing a specific
556 Each factory function is called to produce a type implementing a specific
557 interface. The cumulative list of returned types will be combined into a
557 interface. The cumulative list of returned types will be combined into a
558 new type and that type will be instantiated to represent the local
558 new type and that type will be instantiated to represent the local
559 repository.
559 repository.
560
560
561 The factory functions each receive various state that may be consulted
561 The factory functions each receive various state that may be consulted
562 as part of deriving a type.
562 as part of deriving a type.
563
563
564 Extensions should wrap these factory functions to customize repository type
564 Extensions should wrap these factory functions to customize repository type
565 creation. Note that an extension's wrapped function may be called even if
565 creation. Note that an extension's wrapped function may be called even if
566 that extension is not loaded for the repo being constructed. Extensions
566 that extension is not loaded for the repo being constructed. Extensions
567 should check if their ``__name__`` appears in the
567 should check if their ``__name__`` appears in the
568 ``extensionmodulenames`` set passed to the factory function and no-op if
568 ``extensionmodulenames`` set passed to the factory function and no-op if
569 not.
569 not.
570 """
570 """
571 ui = baseui.copy()
571 ui = baseui.copy()
572 # Prevent copying repo configuration.
572 # Prevent copying repo configuration.
573 ui.copy = baseui.copy
573 ui.copy = baseui.copy
574
574
575 # Working directory VFS rooted at repository root.
575 # Working directory VFS rooted at repository root.
576 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
576 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
577
577
578 # Main VFS for .hg/ directory.
578 # Main VFS for .hg/ directory.
579 hgpath = wdirvfs.join(b'.hg')
579 hgpath = wdirvfs.join(b'.hg')
580 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
580 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
581 # Whether this repository is shared one or not
581 # Whether this repository is shared one or not
582 shared = False
582 shared = False
583 # If this repository is shared, vfs pointing to shared repo
583 # If this repository is shared, vfs pointing to shared repo
584 sharedvfs = None
584 sharedvfs = None
585
585
586 # The .hg/ path should exist and should be a directory. All other
586 # The .hg/ path should exist and should be a directory. All other
587 # cases are errors.
587 # cases are errors.
588 if not hgvfs.isdir():
588 if not hgvfs.isdir():
589 try:
589 try:
590 hgvfs.stat()
590 hgvfs.stat()
591 except FileNotFoundError:
591 except FileNotFoundError:
592 pass
592 pass
593 except ValueError as e:
593 except ValueError as e:
594 # Can be raised on Python 3.8 when path is invalid.
594 # Can be raised on Python 3.8 when path is invalid.
595 raise error.Abort(
595 raise error.Abort(
596 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
596 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
597 )
597 )
598
598
599 raise error.RepoError(_(b'repository %s not found') % path)
599 raise error.RepoError(_(b'repository %s not found') % path)
600
600
601 requirements = _readrequires(hgvfs, True)
601 requirements = _readrequires(hgvfs, True)
602 shared = (
602 shared = (
603 requirementsmod.SHARED_REQUIREMENT in requirements
603 requirementsmod.SHARED_REQUIREMENT in requirements
604 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
604 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
605 )
605 )
606 storevfs = None
606 storevfs = None
607 if shared:
607 if shared:
608 # This is a shared repo
608 # This is a shared repo
609 sharedvfs = _getsharedvfs(hgvfs, requirements)
609 sharedvfs = _getsharedvfs(hgvfs, requirements)
610 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
610 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
611 else:
611 else:
612 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
612 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
613
613
614 # if .hg/requires contains the sharesafe requirement, it means
614 # if .hg/requires contains the sharesafe requirement, it means
615 # there exists a `.hg/store/requires` too and we should read it
615 # there exists a `.hg/store/requires` too and we should read it
616 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
616 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
617 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
617 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
618 # is not present, refer checkrequirementscompat() for that
618 # is not present, refer checkrequirementscompat() for that
619 #
619 #
620 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
620 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
621 # repository was shared the old way. We check the share source .hg/requires
621 # repository was shared the old way. We check the share source .hg/requires
622 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
622 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
623 # to be reshared
623 # to be reshared
624 hint = _(b"see `hg help config.format.use-share-safe` for more information")
624 hint = _(b"see `hg help config.format.use-share-safe` for more information")
625 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
625 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
626 if (
626 if (
627 shared
627 shared
628 and requirementsmod.SHARESAFE_REQUIREMENT
628 and requirementsmod.SHARESAFE_REQUIREMENT
629 not in _readrequires(sharedvfs, True)
629 not in _readrequires(sharedvfs, True)
630 ):
630 ):
631 mismatch_warn = ui.configbool(
631 mismatch_warn = ui.configbool(
632 b'share', b'safe-mismatch.source-not-safe.warn'
632 b'share', b'safe-mismatch.source-not-safe.warn'
633 )
633 )
634 mismatch_config = ui.config(
634 mismatch_config = ui.config(
635 b'share', b'safe-mismatch.source-not-safe'
635 b'share', b'safe-mismatch.source-not-safe'
636 )
636 )
637 mismatch_verbose_upgrade = ui.configbool(
637 mismatch_verbose_upgrade = ui.configbool(
638 b'share', b'safe-mismatch.source-not-safe:verbose-upgrade'
638 b'share', b'safe-mismatch.source-not-safe:verbose-upgrade'
639 )
639 )
640 if mismatch_config in (
640 if mismatch_config in (
641 b'downgrade-allow',
641 b'downgrade-allow',
642 b'allow',
642 b'allow',
643 b'downgrade-abort',
643 b'downgrade-abort',
644 ):
644 ):
645 # prevent cyclic import localrepo -> upgrade -> localrepo
645 # prevent cyclic import localrepo -> upgrade -> localrepo
646 from . import upgrade
646 from . import upgrade
647
647
648 upgrade.downgrade_share_to_non_safe(
648 upgrade.downgrade_share_to_non_safe(
649 ui,
649 ui,
650 hgvfs,
650 hgvfs,
651 sharedvfs,
651 sharedvfs,
652 requirements,
652 requirements,
653 mismatch_config,
653 mismatch_config,
654 mismatch_warn,
654 mismatch_warn,
655 mismatch_verbose_upgrade,
655 mismatch_verbose_upgrade,
656 )
656 )
657 elif mismatch_config == b'abort':
657 elif mismatch_config == b'abort':
658 raise error.Abort(
658 raise error.Abort(
659 _(b"share source does not support share-safe requirement"),
659 _(b"share source does not support share-safe requirement"),
660 hint=hint,
660 hint=hint,
661 )
661 )
662 else:
662 else:
663 raise error.Abort(
663 raise error.Abort(
664 _(
664 _(
665 b"share-safe mismatch with source.\nUnrecognized"
665 b"share-safe mismatch with source.\nUnrecognized"
666 b" value '%s' of `share.safe-mismatch.source-not-safe`"
666 b" value '%s' of `share.safe-mismatch.source-not-safe`"
667 b" set."
667 b" set."
668 )
668 )
669 % mismatch_config,
669 % mismatch_config,
670 hint=hint,
670 hint=hint,
671 )
671 )
672 else:
672 else:
673 requirements |= _readrequires(storevfs, False)
673 requirements |= _readrequires(storevfs, False)
674 elif shared:
674 elif shared:
675 sourcerequires = _readrequires(sharedvfs, False)
675 sourcerequires = _readrequires(sharedvfs, False)
676 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
676 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
677 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
677 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
678 mismatch_warn = ui.configbool(
678 mismatch_warn = ui.configbool(
679 b'share', b'safe-mismatch.source-safe.warn'
679 b'share', b'safe-mismatch.source-safe.warn'
680 )
680 )
681 mismatch_verbose_upgrade = ui.configbool(
681 mismatch_verbose_upgrade = ui.configbool(
682 b'share', b'safe-mismatch.source-safe:verbose-upgrade'
682 b'share', b'safe-mismatch.source-safe:verbose-upgrade'
683 )
683 )
684 if mismatch_config in (
684 if mismatch_config in (
685 b'upgrade-allow',
685 b'upgrade-allow',
686 b'allow',
686 b'allow',
687 b'upgrade-abort',
687 b'upgrade-abort',
688 ):
688 ):
689 # prevent cyclic import localrepo -> upgrade -> localrepo
689 # prevent cyclic import localrepo -> upgrade -> localrepo
690 from . import upgrade
690 from . import upgrade
691
691
692 upgrade.upgrade_share_to_safe(
692 upgrade.upgrade_share_to_safe(
693 ui,
693 ui,
694 hgvfs,
694 hgvfs,
695 storevfs,
695 storevfs,
696 requirements,
696 requirements,
697 mismatch_config,
697 mismatch_config,
698 mismatch_warn,
698 mismatch_warn,
699 mismatch_verbose_upgrade,
699 mismatch_verbose_upgrade,
700 )
700 )
701 elif mismatch_config == b'abort':
701 elif mismatch_config == b'abort':
702 raise error.Abort(
702 raise error.Abort(
703 _(
703 _(
704 b'version mismatch: source uses share-safe'
704 b'version mismatch: source uses share-safe'
705 b' functionality while the current share does not'
705 b' functionality while the current share does not'
706 ),
706 ),
707 hint=hint,
707 hint=hint,
708 )
708 )
709 else:
709 else:
710 raise error.Abort(
710 raise error.Abort(
711 _(
711 _(
712 b"share-safe mismatch with source.\nUnrecognized"
712 b"share-safe mismatch with source.\nUnrecognized"
713 b" value '%s' of `share.safe-mismatch.source-safe` set."
713 b" value '%s' of `share.safe-mismatch.source-safe` set."
714 )
714 )
715 % mismatch_config,
715 % mismatch_config,
716 hint=hint,
716 hint=hint,
717 )
717 )
718
718
719 # The .hg/hgrc file may load extensions or contain config options
719 # The .hg/hgrc file may load extensions or contain config options
720 # that influence repository construction. Attempt to load it and
720 # that influence repository construction. Attempt to load it and
721 # process any new extensions that it may have pulled in.
721 # process any new extensions that it may have pulled in.
722 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
722 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
723 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
723 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
724 extensions.loadall(ui)
724 extensions.loadall(ui)
725 extensions.populateui(ui)
725 extensions.populateui(ui)
726
726
727 # Set of module names of extensions loaded for this repository.
727 # Set of module names of extensions loaded for this repository.
728 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
728 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
729
729
730 supportedrequirements = gathersupportedrequirements(ui)
730 supportedrequirements = gathersupportedrequirements(ui)
731
731
732 # We first validate the requirements are known.
732 # We first validate the requirements are known.
733 ensurerequirementsrecognized(requirements, supportedrequirements)
733 ensurerequirementsrecognized(requirements, supportedrequirements)
734
734
735 # Then we validate that the known set is reasonable to use together.
735 # Then we validate that the known set is reasonable to use together.
736 ensurerequirementscompatible(ui, requirements)
736 ensurerequirementscompatible(ui, requirements)
737
737
738 # TODO there are unhandled edge cases related to opening repositories with
738 # TODO there are unhandled edge cases related to opening repositories with
739 # shared storage. If storage is shared, we should also test for requirements
739 # shared storage. If storage is shared, we should also test for requirements
740 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
740 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
741 # that repo, as that repo may load extensions needed to open it. This is a
741 # that repo, as that repo may load extensions needed to open it. This is a
742 # bit complicated because we don't want the other hgrc to overwrite settings
742 # bit complicated because we don't want the other hgrc to overwrite settings
743 # in this hgrc.
743 # in this hgrc.
744 #
744 #
745 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
745 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
746 # file when sharing repos. But if a requirement is added after the share is
746 # file when sharing repos. But if a requirement is added after the share is
747 # performed, thereby introducing a new requirement for the opener, we may
747 # performed, thereby introducing a new requirement for the opener, we may
748 # will not see that and could encounter a run-time error interacting with
748 # will not see that and could encounter a run-time error interacting with
749 # that shared store since it has an unknown-to-us requirement.
749 # that shared store since it has an unknown-to-us requirement.
750
750
751 # At this point, we know we should be capable of opening the repository.
751 # At this point, we know we should be capable of opening the repository.
752 # Now get on with doing that.
752 # Now get on with doing that.
753
753
754 features = set()
754 features = set()
755
755
756 # The "store" part of the repository holds versioned data. How it is
756 # The "store" part of the repository holds versioned data. How it is
757 # accessed is determined by various requirements. If `shared` or
757 # accessed is determined by various requirements. If `shared` or
758 # `relshared` requirements are present, this indicates current repository
758 # `relshared` requirements are present, this indicates current repository
759 # is a share and store exists in path mentioned in `.hg/sharedpath`
759 # is a share and store exists in path mentioned in `.hg/sharedpath`
760 if shared:
760 if shared:
761 storebasepath = sharedvfs.base
761 storebasepath = sharedvfs.base
762 cachepath = sharedvfs.join(b'cache')
762 cachepath = sharedvfs.join(b'cache')
763 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
763 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
764 else:
764 else:
765 storebasepath = hgvfs.base
765 storebasepath = hgvfs.base
766 cachepath = hgvfs.join(b'cache')
766 cachepath = hgvfs.join(b'cache')
767 wcachepath = hgvfs.join(b'wcache')
767 wcachepath = hgvfs.join(b'wcache')
768
768
769 # The store has changed over time and the exact layout is dictated by
769 # The store has changed over time and the exact layout is dictated by
770 # requirements. The store interface abstracts differences across all
770 # requirements. The store interface abstracts differences across all
771 # of them.
771 # of them.
772 store = makestore(
772 store = makestore(
773 requirements,
773 requirements,
774 storebasepath,
774 storebasepath,
775 lambda base: vfsmod.vfs(base, cacheaudited=True),
775 lambda base: vfsmod.vfs(base, cacheaudited=True),
776 )
776 )
777 hgvfs.createmode = store.createmode
777 hgvfs.createmode = store.createmode
778
778
779 storevfs = store.vfs
779 storevfs = store.vfs
780 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
780 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
781
781
782 if (
782 if (
783 requirementsmod.REVLOGV2_REQUIREMENT in requirements
783 requirementsmod.REVLOGV2_REQUIREMENT in requirements
784 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
784 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
785 ):
785 ):
786 features.add(repository.REPO_FEATURE_SIDE_DATA)
786 features.add(repository.REPO_FEATURE_SIDE_DATA)
787 # the revlogv2 docket introduced race condition that we need to fix
787 # the revlogv2 docket introduced race condition that we need to fix
788 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
788 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
789
789
790 # The cache vfs is used to manage cache files.
790 # The cache vfs is used to manage cache files.
791 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
791 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
792 cachevfs.createmode = store.createmode
792 cachevfs.createmode = store.createmode
793 # The cache vfs is used to manage cache files related to the working copy
793 # The cache vfs is used to manage cache files related to the working copy
794 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
794 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
795 wcachevfs.createmode = store.createmode
795 wcachevfs.createmode = store.createmode
796
796
797 # Now resolve the type for the repository object. We do this by repeatedly
797 # Now resolve the type for the repository object. We do this by repeatedly
798 # calling a factory function to produces types for specific aspects of the
798 # calling a factory function to produces types for specific aspects of the
799 # repo's operation. The aggregate returned types are used as base classes
799 # repo's operation. The aggregate returned types are used as base classes
800 # for a dynamically-derived type, which will represent our new repository.
800 # for a dynamically-derived type, which will represent our new repository.
801
801
802 bases = []
802 bases = []
803 extrastate = {}
803 extrastate = {}
804
804
805 for iface, fn in REPO_INTERFACES:
805 for iface, fn in REPO_INTERFACES:
806 # We pass all potentially useful state to give extensions tons of
806 # We pass all potentially useful state to give extensions tons of
807 # flexibility.
807 # flexibility.
808 typ = fn()(
808 typ = fn()(
809 ui=ui,
809 ui=ui,
810 intents=intents,
810 intents=intents,
811 requirements=requirements,
811 requirements=requirements,
812 features=features,
812 features=features,
813 wdirvfs=wdirvfs,
813 wdirvfs=wdirvfs,
814 hgvfs=hgvfs,
814 hgvfs=hgvfs,
815 store=store,
815 store=store,
816 storevfs=storevfs,
816 storevfs=storevfs,
817 storeoptions=storevfs.options,
817 storeoptions=storevfs.options,
818 cachevfs=cachevfs,
818 cachevfs=cachevfs,
819 wcachevfs=wcachevfs,
819 wcachevfs=wcachevfs,
820 extensionmodulenames=extensionmodulenames,
820 extensionmodulenames=extensionmodulenames,
821 extrastate=extrastate,
821 extrastate=extrastate,
822 baseclasses=bases,
822 baseclasses=bases,
823 )
823 )
824
824
825 if not isinstance(typ, type):
825 if not isinstance(typ, type):
826 raise error.ProgrammingError(
826 raise error.ProgrammingError(
827 b'unable to construct type for %s' % iface
827 b'unable to construct type for %s' % iface
828 )
828 )
829
829
830 bases.append(typ)
830 bases.append(typ)
831
831
832 # type() allows you to use characters in type names that wouldn't be
832 # type() allows you to use characters in type names that wouldn't be
833 # recognized as Python symbols in source code. We abuse that to add
833 # recognized as Python symbols in source code. We abuse that to add
834 # rich information about our constructed repo.
834 # rich information about our constructed repo.
835 name = pycompat.sysstr(
835 name = pycompat.sysstr(
836 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
836 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
837 )
837 )
838
838
839 cls = type(name, tuple(bases), {})
839 cls = type(name, tuple(bases), {})
840
840
841 return cls(
841 return cls(
842 baseui=baseui,
842 baseui=baseui,
843 ui=ui,
843 ui=ui,
844 origroot=path,
844 origroot=path,
845 wdirvfs=wdirvfs,
845 wdirvfs=wdirvfs,
846 hgvfs=hgvfs,
846 hgvfs=hgvfs,
847 requirements=requirements,
847 requirements=requirements,
848 supportedrequirements=supportedrequirements,
848 supportedrequirements=supportedrequirements,
849 sharedpath=storebasepath,
849 sharedpath=storebasepath,
850 store=store,
850 store=store,
851 cachevfs=cachevfs,
851 cachevfs=cachevfs,
852 wcachevfs=wcachevfs,
852 wcachevfs=wcachevfs,
853 features=features,
853 features=features,
854 intents=intents,
854 intents=intents,
855 )
855 )
856
856
857
857
858 def loadhgrc(
858 def loadhgrc(
859 ui,
859 ui,
860 wdirvfs: vfsmod.vfs,
860 wdirvfs: vfsmod.vfs,
861 hgvfs: vfsmod.vfs,
861 hgvfs: vfsmod.vfs,
862 requirements,
862 requirements,
863 sharedvfs: Optional[vfsmod.vfs] = None,
863 sharedvfs: Optional[vfsmod.vfs] = None,
864 ):
864 ):
865 """Load hgrc files/content into a ui instance.
865 """Load hgrc files/content into a ui instance.
866
866
867 This is called during repository opening to load any additional
867 This is called during repository opening to load any additional
868 config files or settings relevant to the current repository.
868 config files or settings relevant to the current repository.
869
869
870 Returns a bool indicating whether any additional configs were loaded.
870 Returns a bool indicating whether any additional configs were loaded.
871
871
872 Extensions should monkeypatch this function to modify how per-repo
872 Extensions should monkeypatch this function to modify how per-repo
873 configs are loaded. For example, an extension may wish to pull in
873 configs are loaded. For example, an extension may wish to pull in
874 configs from alternate files or sources.
874 configs from alternate files or sources.
875
875
876 sharedvfs is vfs object pointing to source repo if the current one is a
876 sharedvfs is vfs object pointing to source repo if the current one is a
877 shared one
877 shared one
878 """
878 """
879 if not rcutil.use_repo_hgrc():
879 if not rcutil.use_repo_hgrc():
880 return False
880 return False
881
881
882 ret = False
882 ret = False
883 # first load config from shared source if we has to
883 # first load config from shared source if we has to
884 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
884 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
885 try:
885 try:
886 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
886 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
887 ret = True
887 ret = True
888 except IOError:
888 except IOError:
889 pass
889 pass
890
890
891 try:
891 try:
892 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
892 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
893 ret = True
893 ret = True
894 except IOError:
894 except IOError:
895 pass
895 pass
896
896
897 try:
897 try:
898 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
898 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
899 ret = True
899 ret = True
900 except IOError:
900 except IOError:
901 pass
901 pass
902
902
903 return ret
903 return ret
904
904
905
905
906 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
906 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
907 """Perform additional actions after .hg/hgrc is loaded.
907 """Perform additional actions after .hg/hgrc is loaded.
908
908
909 This function is called during repository loading immediately after
909 This function is called during repository loading immediately after
910 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
910 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
911
911
912 The function can be used to validate configs, automatically add
912 The function can be used to validate configs, automatically add
913 options (including extensions) based on requirements, etc.
913 options (including extensions) based on requirements, etc.
914 """
914 """
915
915
916 # Map of requirements to list of extensions to load automatically when
916 # Map of requirements to list of extensions to load automatically when
917 # requirement is present.
917 # requirement is present.
918 autoextensions = {
918 autoextensions = {
919 b'git': [b'git'],
919 b'git': [b'git'],
920 b'largefiles': [b'largefiles'],
920 b'largefiles': [b'largefiles'],
921 b'lfs': [b'lfs'],
921 b'lfs': [b'lfs'],
922 }
922 }
923
923
924 for requirement, names in sorted(autoextensions.items()):
924 for requirement, names in sorted(autoextensions.items()):
925 if requirement not in requirements:
925 if requirement not in requirements:
926 continue
926 continue
927
927
928 for name in names:
928 for name in names:
929 if not ui.hasconfig(b'extensions', name):
929 if not ui.hasconfig(b'extensions', name):
930 ui.setconfig(b'extensions', name, b'', source=b'autoload')
930 ui.setconfig(b'extensions', name, b'', source=b'autoload')
931
931
932
932
933 def gathersupportedrequirements(ui):
933 def gathersupportedrequirements(ui):
934 """Determine the complete set of recognized requirements."""
934 """Determine the complete set of recognized requirements."""
935 # Start with all requirements supported by this file.
935 # Start with all requirements supported by this file.
936 supported = set(localrepository._basesupported)
936 supported = set(localrepository._basesupported)
937
937
938 # Execute ``featuresetupfuncs`` entries if they belong to an extension
938 # Execute ``featuresetupfuncs`` entries if they belong to an extension
939 # relevant to this ui instance.
939 # relevant to this ui instance.
940 modules = {m.__name__ for n, m in extensions.extensions(ui)}
940 modules = {m.__name__ for n, m in extensions.extensions(ui)}
941
941
942 for fn in featuresetupfuncs:
942 for fn in featuresetupfuncs:
943 if fn.__module__ in modules:
943 if fn.__module__ in modules:
944 fn(ui, supported)
944 fn(ui, supported)
945
945
946 # Add derived requirements from registered compression engines.
946 # Add derived requirements from registered compression engines.
947 for name in util.compengines:
947 for name in util.compengines:
948 engine = util.compengines[name]
948 engine = util.compengines[name]
949 if engine.available() and engine.revlogheader():
949 if engine.available() and engine.revlogheader():
950 supported.add(b'exp-compression-%s' % name)
950 supported.add(b'exp-compression-%s' % name)
951 if engine.name() == b'zstd':
951 if engine.name() == b'zstd':
952 supported.add(requirementsmod.REVLOG_COMPRESSION_ZSTD)
952 supported.add(requirementsmod.REVLOG_COMPRESSION_ZSTD)
953
953
954 return supported
954 return supported
955
955
956
956
957 def ensurerequirementsrecognized(requirements, supported):
957 def ensurerequirementsrecognized(requirements, supported):
958 """Validate that a set of local requirements is recognized.
958 """Validate that a set of local requirements is recognized.
959
959
960 Receives a set of requirements. Raises an ``error.RepoError`` if there
960 Receives a set of requirements. Raises an ``error.RepoError`` if there
961 exists any requirement in that set that currently loaded code doesn't
961 exists any requirement in that set that currently loaded code doesn't
962 recognize.
962 recognize.
963
963
964 Returns a set of supported requirements.
964 Returns a set of supported requirements.
965 """
965 """
966 missing = set()
966 missing = set()
967
967
968 for requirement in requirements:
968 for requirement in requirements:
969 if requirement in supported:
969 if requirement in supported:
970 continue
970 continue
971
971
972 if not requirement or not requirement[0:1].isalnum():
972 if not requirement or not requirement[0:1].isalnum():
973 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
973 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
974
974
975 missing.add(requirement)
975 missing.add(requirement)
976
976
977 if missing:
977 if missing:
978 raise error.RequirementError(
978 raise error.RequirementError(
979 _(b'repository requires features unknown to this Mercurial: %s')
979 _(b'repository requires features unknown to this Mercurial: %s')
980 % b' '.join(sorted(missing)),
980 % b' '.join(sorted(missing)),
981 hint=_(
981 hint=_(
982 b'see https://mercurial-scm.org/wiki/MissingRequirement '
982 b'see https://mercurial-scm.org/wiki/MissingRequirement '
983 b'for more information'
983 b'for more information'
984 ),
984 ),
985 )
985 )
986
986
987
987
988 def ensurerequirementscompatible(ui, requirements):
988 def ensurerequirementscompatible(ui, requirements):
989 """Validates that a set of recognized requirements is mutually compatible.
989 """Validates that a set of recognized requirements is mutually compatible.
990
990
991 Some requirements may not be compatible with others or require
991 Some requirements may not be compatible with others or require
992 config options that aren't enabled. This function is called during
992 config options that aren't enabled. This function is called during
993 repository opening to ensure that the set of requirements needed
993 repository opening to ensure that the set of requirements needed
994 to open a repository is sane and compatible with config options.
994 to open a repository is sane and compatible with config options.
995
995
996 Extensions can monkeypatch this function to perform additional
996 Extensions can monkeypatch this function to perform additional
997 checking.
997 checking.
998
998
999 ``error.RepoError`` should be raised on failure.
999 ``error.RepoError`` should be raised on failure.
1000 """
1000 """
1001 if (
1001 if (
1002 requirementsmod.SPARSE_REQUIREMENT in requirements
1002 requirementsmod.SPARSE_REQUIREMENT in requirements
1003 and not sparse.enabled
1003 and not sparse.enabled
1004 ):
1004 ):
1005 raise error.RepoError(
1005 raise error.RepoError(
1006 _(
1006 _(
1007 b'repository is using sparse feature but '
1007 b'repository is using sparse feature but '
1008 b'sparse is not enabled; enable the '
1008 b'sparse is not enabled; enable the '
1009 b'"sparse" extensions to access'
1009 b'"sparse" extensions to access'
1010 )
1010 )
1011 )
1011 )
1012
1012
1013
1013
1014 def makestore(requirements, path, vfstype):
1014 def makestore(requirements, path, vfstype):
1015 """Construct a storage object for a repository."""
1015 """Construct a storage object for a repository."""
1016 if requirementsmod.STORE_REQUIREMENT in requirements:
1016 if requirementsmod.STORE_REQUIREMENT in requirements:
1017 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
1017 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
1018 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1018 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1019 return storemod.fncachestore(path, vfstype, dotencode)
1019 return storemod.fncachestore(path, vfstype, dotencode)
1020
1020
1021 return storemod.encodedstore(path, vfstype)
1021 return storemod.encodedstore(path, vfstype)
1022
1022
1023 return storemod.basicstore(path, vfstype)
1023 return storemod.basicstore(path, vfstype)
1024
1024
1025
1025
1026 def resolvestorevfsoptions(ui, requirements, features):
1026 def resolvestorevfsoptions(ui, requirements, features):
1027 """Resolve the options to pass to the store vfs opener.
1027 """Resolve the options to pass to the store vfs opener.
1028
1028
1029 The returned dict is used to influence behavior of the storage layer.
1029 The returned dict is used to influence behavior of the storage layer.
1030 """
1030 """
1031 options = {}
1031 options = {}
1032
1032
1033 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1033 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1034 options[b'treemanifest'] = True
1034 options[b'treemanifest'] = True
1035
1035
1036 # experimental config: format.manifestcachesize
1036 # experimental config: format.manifestcachesize
1037 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1037 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1038 if manifestcachesize is not None:
1038 if manifestcachesize is not None:
1039 options[b'manifestcachesize'] = manifestcachesize
1039 options[b'manifestcachesize'] = manifestcachesize
1040
1040
1041 # In the absence of another requirement superseding a revlog-related
1041 # In the absence of another requirement superseding a revlog-related
1042 # requirement, we have to assume the repo is using revlog version 0.
1042 # requirement, we have to assume the repo is using revlog version 0.
1043 # This revlog format is super old and we don't bother trying to parse
1043 # This revlog format is super old and we don't bother trying to parse
1044 # opener options for it because those options wouldn't do anything
1044 # opener options for it because those options wouldn't do anything
1045 # meaningful on such old repos.
1045 # meaningful on such old repos.
1046 if (
1046 if (
1047 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1047 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1048 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1048 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1049 ):
1049 ):
1050 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1050 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1051 else: # explicitly mark repo as using revlogv0
1051 else: # explicitly mark repo as using revlogv0
1052 options[b'revlogv0'] = True
1052 options[b'revlogv0'] = True
1053
1053
1054 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1054 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1055 options[b'copies-storage'] = b'changeset-sidedata'
1055 options[b'copies-storage'] = b'changeset-sidedata'
1056 else:
1056 else:
1057 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1057 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1058 copiesextramode = (b'changeset-only', b'compatibility')
1058 copiesextramode = (b'changeset-only', b'compatibility')
1059 if writecopiesto in copiesextramode:
1059 if writecopiesto in copiesextramode:
1060 options[b'copies-storage'] = b'extra'
1060 options[b'copies-storage'] = b'extra'
1061
1061
1062 return options
1062 return options
1063
1063
1064
1064
1065 def resolverevlogstorevfsoptions(ui, requirements, features):
1065 def resolverevlogstorevfsoptions(ui, requirements, features):
1066 """Resolve opener options specific to revlogs."""
1066 """Resolve opener options specific to revlogs."""
1067
1067
1068 options = {}
1068 options = {}
1069 options[b'flagprocessors'] = {}
1069 options[b'flagprocessors'] = {}
1070
1070
1071 feature_config = options[b'feature-config'] = revlog.FeatureConfig()
1071 feature_config = options[b'feature-config'] = revlog.FeatureConfig()
1072 data_config = options[b'data-config'] = revlog.DataConfig()
1072 data_config = options[b'data-config'] = revlog.DataConfig()
1073 delta_config = options[b'delta-config'] = revlog.DeltaConfig()
1073 delta_config = options[b'delta-config'] = revlog.DeltaConfig()
1074
1074
1075 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1075 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1076 options[b'revlogv1'] = True
1076 options[b'revlogv1'] = True
1077 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1077 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1078 options[b'revlogv2'] = True
1078 options[b'revlogv2'] = True
1079 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1079 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1080 options[b'changelogv2'] = True
1080 options[b'changelogv2'] = True
1081 cmp_rank = ui.configbool(b'experimental', b'changelog-v2.compute-rank')
1081 cmp_rank = ui.configbool(b'experimental', b'changelog-v2.compute-rank')
1082 options[b'changelogv2.compute-rank'] = cmp_rank
1082 options[b'changelogv2.compute-rank'] = cmp_rank
1083
1083
1084 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1084 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1085 options[b'generaldelta'] = True
1085 options[b'generaldelta'] = True
1086
1086
1087 # experimental config: format.chunkcachesize
1087 # experimental config: format.chunkcachesize
1088 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1088 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1089 if chunkcachesize is not None:
1089 if chunkcachesize is not None:
1090 data_config.chunk_cache_size = chunkcachesize
1090 data_config.chunk_cache_size = chunkcachesize
1091
1091
1092 memory_profile = scmutil.get_resource_profile(ui, b'memory')
1092 memory_profile = scmutil.get_resource_profile(ui, b'memory')
1093 if memory_profile >= scmutil.RESOURCE_MEDIUM:
1093 if memory_profile >= scmutil.RESOURCE_MEDIUM:
1094 data_config.uncompressed_cache_count = 10_000
1094 data_config.uncompressed_cache_count = 10_000
1095 data_config.uncompressed_cache_factor = 4
1095 data_config.uncompressed_cache_factor = 4
1096 if memory_profile >= scmutil.RESOURCE_HIGH:
1096 if memory_profile >= scmutil.RESOURCE_HIGH:
1097 data_config.uncompressed_cache_factor = 10
1097 data_config.uncompressed_cache_factor = 10
1098
1098
1099 delta_config.delta_both_parents = ui.configbool(
1099 delta_config.delta_both_parents = ui.configbool(
1100 b'storage', b'revlog.optimize-delta-parent-choice'
1100 b'storage', b'revlog.optimize-delta-parent-choice'
1101 )
1101 )
1102 delta_config.candidate_group_chunk_size = ui.configint(
1102 delta_config.candidate_group_chunk_size = ui.configint(
1103 b'storage',
1103 b'storage',
1104 b'revlog.delta-parent-search.candidate-group-chunk-size',
1104 b'revlog.delta-parent-search.candidate-group-chunk-size',
1105 )
1105 )
1106 delta_config.debug_delta = ui.configbool(b'debug', b'revlog.debug-delta')
1106 delta_config.debug_delta = ui.configbool(b'debug', b'revlog.debug-delta')
1107
1107
1108 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1108 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1109 options[b'issue6528.fix-incoming'] = issue6528
1109 options[b'issue6528.fix-incoming'] = issue6528
1110
1110
1111 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1111 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1112 lazydeltabase = False
1112 lazydeltabase = False
1113 if lazydelta:
1113 if lazydelta:
1114 lazydeltabase = ui.configbool(
1114 lazydeltabase = ui.configbool(
1115 b'storage', b'revlog.reuse-external-delta-parent'
1115 b'storage', b'revlog.reuse-external-delta-parent'
1116 )
1116 )
1117 if lazydeltabase is None:
1117 if lazydeltabase is None:
1118 lazydeltabase = not scmutil.gddeltaconfig(ui)
1118 lazydeltabase = not scmutil.gddeltaconfig(ui)
1119 delta_config.lazy_delta = lazydelta
1119 delta_config.lazy_delta = lazydelta
1120 delta_config.lazy_delta_base = lazydeltabase
1120 delta_config.lazy_delta_base = lazydeltabase
1121
1121
1122 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1122 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1123 if 0 <= chainspan:
1123 if 0 <= chainspan:
1124 delta_config.max_deltachain_span = chainspan
1124 delta_config.max_deltachain_span = chainspan
1125
1125
1126 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1126 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1127 if mmapindexthreshold is not None:
1127 if mmapindexthreshold is not None:
1128 data_config.mmap_index_threshold = mmapindexthreshold
1128 data_config.mmap_index_threshold = mmapindexthreshold
1129
1129
1130 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1130 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1131 srdensitythres = float(
1131 srdensitythres = float(
1132 ui.config(b'experimental', b'sparse-read.density-threshold')
1132 ui.config(b'experimental', b'sparse-read.density-threshold')
1133 )
1133 )
1134 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1134 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1135 data_config.with_sparse_read = withsparseread
1135 data_config.with_sparse_read = withsparseread
1136 data_config.sr_density_threshold = srdensitythres
1136 data_config.sr_density_threshold = srdensitythres
1137 data_config.sr_min_gap_size = srmingapsize
1137 data_config.sr_min_gap_size = srmingapsize
1138
1138
1139 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1139 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1140 delta_config.sparse_revlog = sparserevlog
1140 delta_config.sparse_revlog = sparserevlog
1141 if sparserevlog:
1141 if sparserevlog:
1142 options[b'generaldelta'] = True
1142 options[b'generaldelta'] = True
1143 data_config.with_sparse_read = True
1143 data_config.with_sparse_read = True
1144
1144
1145 maxchainlen = None
1145 maxchainlen = None
1146 if sparserevlog:
1146 if sparserevlog:
1147 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1147 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1148 # experimental config: format.maxchainlen
1148 # experimental config: format.maxchainlen
1149 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1149 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1150 if maxchainlen is not None:
1150 if maxchainlen is not None:
1151 delta_config.max_chain_len = maxchainlen
1151 delta_config.max_chain_len = maxchainlen
1152
1152
1153 for r in requirements:
1153 for r in requirements:
1154 # we allow multiple compression engine requirement to co-exist because
1154 # we allow multiple compression engine requirement to co-exist because
1155 # strickly speaking, revlog seems to support mixed compression style.
1155 # strickly speaking, revlog seems to support mixed compression style.
1156 #
1156 #
1157 # The compression used for new entries will be "the last one"
1157 # The compression used for new entries will be "the last one"
1158 prefix = r.startswith
1158 prefix = r.startswith
1159 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1159 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1160 feature_config.compression_engine = r.split(b'-', 2)[2]
1160 feature_config.compression_engine = r.split(b'-', 2)[2]
1161
1161
1162 zlib_level = ui.configint(b'storage', b'revlog.zlib.level')
1162 zlib_level = ui.configint(b'storage', b'revlog.zlib.level')
1163 if zlib_level is not None:
1163 if zlib_level is not None:
1164 if not (0 <= zlib_level <= 9):
1164 if not (0 <= zlib_level <= 9):
1165 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1165 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1166 raise error.Abort(msg % zlib_level)
1166 raise error.Abort(msg % zlib_level)
1167 feature_config.compression_engine_options[b'zlib.level'] = zlib_level
1167 feature_config.compression_engine_options[b'zlib.level'] = zlib_level
1168 zstd_level = ui.configint(b'storage', b'revlog.zstd.level')
1168 zstd_level = ui.configint(b'storage', b'revlog.zstd.level')
1169 if zstd_level is not None:
1169 if zstd_level is not None:
1170 if not (0 <= zstd_level <= 22):
1170 if not (0 <= zstd_level <= 22):
1171 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1171 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1172 raise error.Abort(msg % zstd_level)
1172 raise error.Abort(msg % zstd_level)
1173 feature_config.compression_engine_options[b'zstd.level'] = zstd_level
1173 feature_config.compression_engine_options[b'zstd.level'] = zstd_level
1174
1174
1175 if requirementsmod.NARROW_REQUIREMENT in requirements:
1175 if requirementsmod.NARROW_REQUIREMENT in requirements:
1176 feature_config.enable_ellipsis = True
1176 feature_config.enable_ellipsis = True
1177
1177
1178 if ui.configbool(b'experimental', b'rust.index'):
1178 if ui.configbool(b'experimental', b'rust.index'):
1179 options[b'rust.index'] = True
1179 options[b'rust.index'] = True
1180 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1180 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1181 slow_path = ui.config(
1181 slow_path = ui.config(
1182 b'storage', b'revlog.persistent-nodemap.slow-path'
1182 b'storage', b'revlog.persistent-nodemap.slow-path'
1183 )
1183 )
1184 if slow_path not in (b'allow', b'warn', b'abort'):
1184 if slow_path not in (b'allow', b'warn', b'abort'):
1185 default = ui.config_default(
1185 default = ui.config_default(
1186 b'storage', b'revlog.persistent-nodemap.slow-path'
1186 b'storage', b'revlog.persistent-nodemap.slow-path'
1187 )
1187 )
1188 msg = _(
1188 msg = _(
1189 b'unknown value for config '
1189 b'unknown value for config '
1190 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1190 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1191 )
1191 )
1192 ui.warn(msg % slow_path)
1192 ui.warn(msg % slow_path)
1193 if not ui.quiet:
1193 if not ui.quiet:
1194 ui.warn(_(b'falling back to default value: %s\n') % default)
1194 ui.warn(_(b'falling back to default value: %s\n') % default)
1195 slow_path = default
1195 slow_path = default
1196
1196
1197 msg = _(
1197 msg = _(
1198 b"accessing `persistent-nodemap` repository without associated "
1198 b"accessing `persistent-nodemap` repository without associated "
1199 b"fast implementation."
1199 b"fast implementation."
1200 )
1200 )
1201 hint = _(
1201 hint = _(
1202 b"check `hg help config.format.use-persistent-nodemap` "
1202 b"check `hg help config.format.use-persistent-nodemap` "
1203 b"for details"
1203 b"for details"
1204 )
1204 )
1205 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1205 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1206 if slow_path == b'warn':
1206 if slow_path == b'warn':
1207 msg = b"warning: " + msg + b'\n'
1207 msg = b"warning: " + msg + b'\n'
1208 ui.warn(msg)
1208 ui.warn(msg)
1209 if not ui.quiet:
1209 if not ui.quiet:
1210 hint = b'(' + hint + b')\n'
1210 hint = b'(' + hint + b')\n'
1211 ui.warn(hint)
1211 ui.warn(hint)
1212 if slow_path == b'abort':
1212 if slow_path == b'abort':
1213 raise error.Abort(msg, hint=hint)
1213 raise error.Abort(msg, hint=hint)
1214 options[b'persistent-nodemap'] = True
1214 options[b'persistent-nodemap'] = True
1215 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1215 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1216 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1216 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1217 if slow_path not in (b'allow', b'warn', b'abort'):
1217 if slow_path not in (b'allow', b'warn', b'abort'):
1218 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1218 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1219 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1219 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1220 ui.warn(msg % slow_path)
1220 ui.warn(msg % slow_path)
1221 if not ui.quiet:
1221 if not ui.quiet:
1222 ui.warn(_(b'falling back to default value: %s\n') % default)
1222 ui.warn(_(b'falling back to default value: %s\n') % default)
1223 slow_path = default
1223 slow_path = default
1224
1224
1225 msg = _(
1225 msg = _(
1226 b"accessing `dirstate-v2` repository without associated "
1226 b"accessing `dirstate-v2` repository without associated "
1227 b"fast implementation."
1227 b"fast implementation."
1228 )
1228 )
1229 hint = _(
1229 hint = _(
1230 b"check `hg help config.format.use-dirstate-v2` " b"for details"
1230 b"check `hg help config.format.use-dirstate-v2` " b"for details"
1231 )
1231 )
1232 if not dirstate.HAS_FAST_DIRSTATE_V2:
1232 if not dirstate.HAS_FAST_DIRSTATE_V2:
1233 if slow_path == b'warn':
1233 if slow_path == b'warn':
1234 msg = b"warning: " + msg + b'\n'
1234 msg = b"warning: " + msg + b'\n'
1235 ui.warn(msg)
1235 ui.warn(msg)
1236 if not ui.quiet:
1236 if not ui.quiet:
1237 hint = b'(' + hint + b')\n'
1237 hint = b'(' + hint + b')\n'
1238 ui.warn(hint)
1238 ui.warn(hint)
1239 if slow_path == b'abort':
1239 if slow_path == b'abort':
1240 raise error.Abort(msg, hint=hint)
1240 raise error.Abort(msg, hint=hint)
1241 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1241 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1242 options[b'persistent-nodemap.mmap'] = True
1242 options[b'persistent-nodemap.mmap'] = True
1243 if ui.configbool(b'devel', b'persistent-nodemap'):
1243 if ui.configbool(b'devel', b'persistent-nodemap'):
1244 options[b'devel-force-nodemap'] = True
1244 options[b'devel-force-nodemap'] = True
1245
1245
1246 return options
1246 return options
1247
1247
1248
1248
1249 def makemain(**kwargs):
1249 def makemain(**kwargs):
1250 """Produce a type conforming to ``ilocalrepositorymain``."""
1250 """Produce a type conforming to ``ilocalrepositorymain``."""
1251 return localrepository
1251 return localrepository
1252
1252
1253
1253
1254 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1254 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1255 class revlogfilestorage:
1255 class revlogfilestorage:
1256 """File storage when using revlogs."""
1256 """File storage when using revlogs."""
1257
1257
1258 def file(self, path):
1258 def file(self, path):
1259 if path.startswith(b'/'):
1259 if path.startswith(b'/'):
1260 path = path[1:]
1260 path = path[1:]
1261
1261
1262 try_split = (
1262 try_split = (
1263 self.currenttransaction() is not None
1263 self.currenttransaction() is not None
1264 or txnutil.mayhavepending(self.root)
1264 or txnutil.mayhavepending(self.root)
1265 )
1265 )
1266
1266
1267 return filelog.filelog(self.svfs, path, try_split=try_split)
1267 return filelog.filelog(self.svfs, path, try_split=try_split)
1268
1268
1269
1269
1270 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1270 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1271 class revlognarrowfilestorage:
1271 class revlognarrowfilestorage:
1272 """File storage when using revlogs and narrow files."""
1272 """File storage when using revlogs and narrow files."""
1273
1273
1274 def file(self, path):
1274 def file(self, path):
1275 if path.startswith(b'/'):
1275 if path.startswith(b'/'):
1276 path = path[1:]
1276 path = path[1:]
1277
1277
1278 try_split = (
1278 try_split = (
1279 self.currenttransaction() is not None
1279 self.currenttransaction() is not None
1280 or txnutil.mayhavepending(self.root)
1280 or txnutil.mayhavepending(self.root)
1281 )
1281 )
1282 return filelog.narrowfilelog(
1282 return filelog.narrowfilelog(
1283 self.svfs, path, self._storenarrowmatch, try_split=try_split
1283 self.svfs, path, self._storenarrowmatch, try_split=try_split
1284 )
1284 )
1285
1285
1286
1286
1287 def makefilestorage(requirements, features, **kwargs):
1287 def makefilestorage(requirements, features, **kwargs):
1288 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1288 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1289 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1289 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1290 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1290 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1291
1291
1292 if requirementsmod.NARROW_REQUIREMENT in requirements:
1292 if requirementsmod.NARROW_REQUIREMENT in requirements:
1293 return revlognarrowfilestorage
1293 return revlognarrowfilestorage
1294 else:
1294 else:
1295 return revlogfilestorage
1295 return revlogfilestorage
1296
1296
1297
1297
1298 # List of repository interfaces and factory functions for them. Each
1298 # List of repository interfaces and factory functions for them. Each
1299 # will be called in order during ``makelocalrepository()`` to iteratively
1299 # will be called in order during ``makelocalrepository()`` to iteratively
1300 # derive the final type for a local repository instance. We capture the
1300 # derive the final type for a local repository instance. We capture the
1301 # function as a lambda so we don't hold a reference and the module-level
1301 # function as a lambda so we don't hold a reference and the module-level
1302 # functions can be wrapped.
1302 # functions can be wrapped.
1303 REPO_INTERFACES = [
1303 REPO_INTERFACES = [
1304 (repository.ilocalrepositorymain, lambda: makemain),
1304 (repository.ilocalrepositorymain, lambda: makemain),
1305 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1305 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1306 ]
1306 ]
1307
1307
1308
1308
1309 @interfaceutil.implementer(repository.ilocalrepositorymain)
1309 @interfaceutil.implementer(repository.ilocalrepositorymain)
1310 class localrepository:
1310 class localrepository:
1311 """Main class for representing local repositories.
1311 """Main class for representing local repositories.
1312
1312
1313 All local repositories are instances of this class.
1313 All local repositories are instances of this class.
1314
1314
1315 Constructed on its own, instances of this class are not usable as
1315 Constructed on its own, instances of this class are not usable as
1316 repository objects. To obtain a usable repository object, call
1316 repository objects. To obtain a usable repository object, call
1317 ``hg.repository()``, ``localrepo.instance()``, or
1317 ``hg.repository()``, ``localrepo.instance()``, or
1318 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1318 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1319 ``instance()`` adds support for creating new repositories.
1319 ``instance()`` adds support for creating new repositories.
1320 ``hg.repository()`` adds more extension integration, including calling
1320 ``hg.repository()`` adds more extension integration, including calling
1321 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1321 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1322 used.
1322 used.
1323 """
1323 """
1324
1324
1325 _basesupported = {
1325 _basesupported = {
1326 requirementsmod.ARCHIVED_PHASE_REQUIREMENT,
1326 requirementsmod.ARCHIVED_PHASE_REQUIREMENT,
1327 requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT,
1327 requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT,
1328 requirementsmod.CHANGELOGV2_REQUIREMENT,
1328 requirementsmod.CHANGELOGV2_REQUIREMENT,
1329 requirementsmod.COPIESSDC_REQUIREMENT,
1329 requirementsmod.COPIESSDC_REQUIREMENT,
1330 requirementsmod.DIRSTATE_TRACKED_HINT_V1,
1330 requirementsmod.DIRSTATE_TRACKED_HINT_V1,
1331 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1331 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1332 requirementsmod.DOTENCODE_REQUIREMENT,
1332 requirementsmod.DOTENCODE_REQUIREMENT,
1333 requirementsmod.FNCACHE_REQUIREMENT,
1333 requirementsmod.FNCACHE_REQUIREMENT,
1334 requirementsmod.GENERALDELTA_REQUIREMENT,
1334 requirementsmod.GENERALDELTA_REQUIREMENT,
1335 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1335 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1336 requirementsmod.NODEMAP_REQUIREMENT,
1336 requirementsmod.NODEMAP_REQUIREMENT,
1337 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1337 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1338 requirementsmod.REVLOGV1_REQUIREMENT,
1338 requirementsmod.REVLOGV1_REQUIREMENT,
1339 requirementsmod.REVLOGV2_REQUIREMENT,
1339 requirementsmod.REVLOGV2_REQUIREMENT,
1340 requirementsmod.SHARED_REQUIREMENT,
1340 requirementsmod.SHARED_REQUIREMENT,
1341 requirementsmod.SHARESAFE_REQUIREMENT,
1341 requirementsmod.SHARESAFE_REQUIREMENT,
1342 requirementsmod.SPARSE_REQUIREMENT,
1342 requirementsmod.SPARSE_REQUIREMENT,
1343 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1343 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1344 requirementsmod.STORE_REQUIREMENT,
1344 requirementsmod.STORE_REQUIREMENT,
1345 requirementsmod.TREEMANIFEST_REQUIREMENT,
1345 requirementsmod.TREEMANIFEST_REQUIREMENT,
1346 }
1346 }
1347
1347
1348 # list of prefix for file which can be written without 'wlock'
1348 # list of prefix for file which can be written without 'wlock'
1349 # Extensions should extend this list when needed
1349 # Extensions should extend this list when needed
1350 _wlockfreeprefix = {
1350 _wlockfreeprefix = {
1351 # We migh consider requiring 'wlock' for the next
1351 # We migh consider requiring 'wlock' for the next
1352 # two, but pretty much all the existing code assume
1352 # two, but pretty much all the existing code assume
1353 # wlock is not needed so we keep them excluded for
1353 # wlock is not needed so we keep them excluded for
1354 # now.
1354 # now.
1355 b'hgrc',
1355 b'hgrc',
1356 b'requires',
1356 b'requires',
1357 # XXX cache is a complicatged business someone
1357 # XXX cache is a complicatged business someone
1358 # should investigate this in depth at some point
1358 # should investigate this in depth at some point
1359 b'cache/',
1359 b'cache/',
1360 # XXX bisect was still a bit too messy at the time
1360 # XXX bisect was still a bit too messy at the time
1361 # this changeset was introduced. Someone should fix
1361 # this changeset was introduced. Someone should fix
1362 # the remainig bit and drop this line
1362 # the remainig bit and drop this line
1363 b'bisect.state',
1363 b'bisect.state',
1364 }
1364 }
1365
1365
1366 def __init__(
1366 def __init__(
1367 self,
1367 self,
1368 baseui,
1368 baseui,
1369 ui,
1369 ui,
1370 origroot: bytes,
1370 origroot: bytes,
1371 wdirvfs: vfsmod.vfs,
1371 wdirvfs: vfsmod.vfs,
1372 hgvfs: vfsmod.vfs,
1372 hgvfs: vfsmod.vfs,
1373 requirements,
1373 requirements,
1374 supportedrequirements,
1374 supportedrequirements,
1375 sharedpath: bytes,
1375 sharedpath: bytes,
1376 store,
1376 store,
1377 cachevfs: vfsmod.vfs,
1377 cachevfs: vfsmod.vfs,
1378 wcachevfs: vfsmod.vfs,
1378 wcachevfs: vfsmod.vfs,
1379 features,
1379 features,
1380 intents=None,
1380 intents=None,
1381 ):
1381 ):
1382 """Create a new local repository instance.
1382 """Create a new local repository instance.
1383
1383
1384 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1384 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1385 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1385 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1386 object.
1386 object.
1387
1387
1388 Arguments:
1388 Arguments:
1389
1389
1390 baseui
1390 baseui
1391 ``ui.ui`` instance that ``ui`` argument was based off of.
1391 ``ui.ui`` instance that ``ui`` argument was based off of.
1392
1392
1393 ui
1393 ui
1394 ``ui.ui`` instance for use by the repository.
1394 ``ui.ui`` instance for use by the repository.
1395
1395
1396 origroot
1396 origroot
1397 ``bytes`` path to working directory root of this repository.
1397 ``bytes`` path to working directory root of this repository.
1398
1398
1399 wdirvfs
1399 wdirvfs
1400 ``vfs.vfs`` rooted at the working directory.
1400 ``vfs.vfs`` rooted at the working directory.
1401
1401
1402 hgvfs
1402 hgvfs
1403 ``vfs.vfs`` rooted at .hg/
1403 ``vfs.vfs`` rooted at .hg/
1404
1404
1405 requirements
1405 requirements
1406 ``set`` of bytestrings representing repository opening requirements.
1406 ``set`` of bytestrings representing repository opening requirements.
1407
1407
1408 supportedrequirements
1408 supportedrequirements
1409 ``set`` of bytestrings representing repository requirements that we
1409 ``set`` of bytestrings representing repository requirements that we
1410 know how to open. May be a supetset of ``requirements``.
1410 know how to open. May be a supetset of ``requirements``.
1411
1411
1412 sharedpath
1412 sharedpath
1413 ``bytes`` Defining path to storage base directory. Points to a
1413 ``bytes`` Defining path to storage base directory. Points to a
1414 ``.hg/`` directory somewhere.
1414 ``.hg/`` directory somewhere.
1415
1415
1416 store
1416 store
1417 ``store.basicstore`` (or derived) instance providing access to
1417 ``store.basicstore`` (or derived) instance providing access to
1418 versioned storage.
1418 versioned storage.
1419
1419
1420 cachevfs
1420 cachevfs
1421 ``vfs.vfs`` used for cache files.
1421 ``vfs.vfs`` used for cache files.
1422
1422
1423 wcachevfs
1423 wcachevfs
1424 ``vfs.vfs`` used for cache files related to the working copy.
1424 ``vfs.vfs`` used for cache files related to the working copy.
1425
1425
1426 features
1426 features
1427 ``set`` of bytestrings defining features/capabilities of this
1427 ``set`` of bytestrings defining features/capabilities of this
1428 instance.
1428 instance.
1429
1429
1430 intents
1430 intents
1431 ``set`` of system strings indicating what this repo will be used
1431 ``set`` of system strings indicating what this repo will be used
1432 for.
1432 for.
1433 """
1433 """
1434 self.baseui = baseui
1434 self.baseui = baseui
1435 self.ui = ui
1435 self.ui = ui
1436 self.origroot = origroot
1436 self.origroot = origroot
1437 # vfs rooted at working directory.
1437 # vfs rooted at working directory.
1438 self.wvfs = wdirvfs
1438 self.wvfs = wdirvfs
1439 self.root = wdirvfs.base
1439 self.root = wdirvfs.base
1440 # vfs rooted at .hg/. Used to access most non-store paths.
1440 # vfs rooted at .hg/. Used to access most non-store paths.
1441 self.vfs = hgvfs
1441 self.vfs = hgvfs
1442 self.path = hgvfs.base
1442 self.path = hgvfs.base
1443 self.requirements = requirements
1443 self.requirements = requirements
1444 self.nodeconstants = sha1nodeconstants
1444 self.nodeconstants = sha1nodeconstants
1445 self.nullid = self.nodeconstants.nullid
1445 self.nullid = self.nodeconstants.nullid
1446 self.supported = supportedrequirements
1446 self.supported = supportedrequirements
1447 self.sharedpath = sharedpath
1447 self.sharedpath = sharedpath
1448 self.store = store
1448 self.store = store
1449 self.cachevfs = cachevfs
1449 self.cachevfs = cachevfs
1450 self.wcachevfs = wcachevfs
1450 self.wcachevfs = wcachevfs
1451 self.features = features
1451 self.features = features
1452
1452
1453 self.filtername = None
1453 self.filtername = None
1454
1454
1455 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1455 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1456 b'devel', b'check-locks'
1456 b'devel', b'check-locks'
1457 ):
1457 ):
1458 self.vfs.audit = self._getvfsward(self.vfs.audit)
1458 self.vfs.audit = self._getvfsward(self.vfs.audit)
1459 # A list of callback to shape the phase if no data were found.
1459 # A list of callback to shape the phase if no data were found.
1460 # Callback are in the form: func(repo, roots) --> processed root.
1460 # Callback are in the form: func(repo, roots) --> processed root.
1461 # This list it to be filled by extension during repo setup
1461 # This list it to be filled by extension during repo setup
1462 self._phasedefaults = []
1462 self._phasedefaults = []
1463
1463
1464 color.setup(self.ui)
1464 color.setup(self.ui)
1465
1465
1466 self.spath = self.store.path
1466 self.spath = self.store.path
1467 self.svfs = self.store.vfs
1467 self.svfs = self.store.vfs
1468 self.sjoin = self.store.join
1468 self.sjoin = self.store.join
1469 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1469 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1470 b'devel', b'check-locks'
1470 b'devel', b'check-locks'
1471 ):
1471 ):
1472 if hasattr(self.svfs, 'vfs'): # this is filtervfs
1472 if hasattr(self.svfs, 'vfs'): # this is filtervfs
1473 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1473 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1474 else: # standard vfs
1474 else: # standard vfs
1475 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1475 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1476
1476
1477 self._dirstatevalidatewarned = False
1477 self._dirstatevalidatewarned = False
1478
1478
1479 self._branchcaches = branchmap.BranchMapCache()
1479 self._branchcaches = branchmap.BranchMapCache()
1480 self._revbranchcache = None
1480 self._revbranchcache = None
1481 self._filterpats = {}
1481 self._filterpats = {}
1482 self._datafilters = {}
1482 self._datafilters = {}
1483 self._transref = self._lockref = self._wlockref = None
1483 self._transref = self._lockref = self._wlockref = None
1484
1484
1485 # A cache for various files under .hg/ that tracks file changes,
1485 # A cache for various files under .hg/ that tracks file changes,
1486 # (used by the filecache decorator)
1486 # (used by the filecache decorator)
1487 #
1487 #
1488 # Maps a property name to its util.filecacheentry
1488 # Maps a property name to its util.filecacheentry
1489 self._filecache = {}
1489 self._filecache = {}
1490
1490
1491 # hold sets of revision to be filtered
1491 # hold sets of revision to be filtered
1492 # should be cleared when something might have changed the filter value:
1492 # should be cleared when something might have changed the filter value:
1493 # - new changesets,
1493 # - new changesets,
1494 # - phase change,
1494 # - phase change,
1495 # - new obsolescence marker,
1495 # - new obsolescence marker,
1496 # - working directory parent change,
1496 # - working directory parent change,
1497 # - bookmark changes
1497 # - bookmark changes
1498 self.filteredrevcache = {}
1498 self.filteredrevcache = {}
1499
1499
1500 self._dirstate = None
1500 self._dirstate = None
1501 # post-dirstate-status hooks
1501 # post-dirstate-status hooks
1502 self._postdsstatus = []
1502 self._postdsstatus = []
1503
1503
1504 self._pending_narrow_pats = None
1504 self._pending_narrow_pats = None
1505 self._pending_narrow_pats_dirstate = None
1505 self._pending_narrow_pats_dirstate = None
1506
1506
1507 # generic mapping between names and nodes
1507 # generic mapping between names and nodes
1508 self.names = namespaces.namespaces()
1508 self.names = namespaces.namespaces()
1509
1509
1510 # Key to signature value.
1510 # Key to signature value.
1511 self._sparsesignaturecache = {}
1511 self._sparsesignaturecache = {}
1512 # Signature to cached matcher instance.
1512 # Signature to cached matcher instance.
1513 self._sparsematchercache = {}
1513 self._sparsematchercache = {}
1514
1514
1515 self._extrafilterid = repoview.extrafilter(ui)
1515 self._extrafilterid = repoview.extrafilter(ui)
1516
1516
1517 self.filecopiesmode = None
1517 self.filecopiesmode = None
1518 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1518 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1519 self.filecopiesmode = b'changeset-sidedata'
1519 self.filecopiesmode = b'changeset-sidedata'
1520
1520
1521 self._wanted_sidedata = set()
1521 self._wanted_sidedata = set()
1522 self._sidedata_computers = {}
1522 self._sidedata_computers = {}
1523 sidedatamod.set_sidedata_spec_for_repo(self)
1523 sidedatamod.set_sidedata_spec_for_repo(self)
1524
1524
1525 def _getvfsward(self, origfunc):
1525 def _getvfsward(self, origfunc):
1526 """build a ward for self.vfs"""
1526 """build a ward for self.vfs"""
1527 rref = weakref.ref(self)
1527 rref = weakref.ref(self)
1528
1528
1529 def checkvfs(path, mode=None):
1529 def checkvfs(path, mode=None):
1530 ret = origfunc(path, mode=mode)
1530 ret = origfunc(path, mode=mode)
1531 repo = rref()
1531 repo = rref()
1532 if (
1532 if (
1533 repo is None
1533 repo is None
1534 or not hasattr(repo, '_wlockref')
1534 or not hasattr(repo, '_wlockref')
1535 or not hasattr(repo, '_lockref')
1535 or not hasattr(repo, '_lockref')
1536 ):
1536 ):
1537 return
1537 return
1538 if mode in (None, b'r', b'rb'):
1538 if mode in (None, b'r', b'rb'):
1539 return
1539 return
1540 if path.startswith(repo.path):
1540 if path.startswith(repo.path):
1541 # truncate name relative to the repository (.hg)
1541 # truncate name relative to the repository (.hg)
1542 path = path[len(repo.path) + 1 :]
1542 path = path[len(repo.path) + 1 :]
1543 if path.startswith(b'cache/'):
1543 if path.startswith(b'cache/'):
1544 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1544 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1545 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1545 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1546 # path prefixes covered by 'lock'
1546 # path prefixes covered by 'lock'
1547 vfs_path_prefixes = (
1547 vfs_path_prefixes = (
1548 b'journal.',
1548 b'journal.',
1549 b'undo.',
1549 b'undo.',
1550 b'strip-backup/',
1550 b'strip-backup/',
1551 b'cache/',
1551 b'cache/',
1552 )
1552 )
1553 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1553 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1554 if repo._currentlock(repo._lockref) is None:
1554 if repo._currentlock(repo._lockref) is None:
1555 repo.ui.develwarn(
1555 repo.ui.develwarn(
1556 b'write with no lock: "%s"' % path,
1556 b'write with no lock: "%s"' % path,
1557 stacklevel=3,
1557 stacklevel=3,
1558 config=b'check-locks',
1558 config=b'check-locks',
1559 )
1559 )
1560 elif repo._currentlock(repo._wlockref) is None:
1560 elif repo._currentlock(repo._wlockref) is None:
1561 # rest of vfs files are covered by 'wlock'
1561 # rest of vfs files are covered by 'wlock'
1562 #
1562 #
1563 # exclude special files
1563 # exclude special files
1564 for prefix in self._wlockfreeprefix:
1564 for prefix in self._wlockfreeprefix:
1565 if path.startswith(prefix):
1565 if path.startswith(prefix):
1566 return
1566 return
1567 repo.ui.develwarn(
1567 repo.ui.develwarn(
1568 b'write with no wlock: "%s"' % path,
1568 b'write with no wlock: "%s"' % path,
1569 stacklevel=3,
1569 stacklevel=3,
1570 config=b'check-locks',
1570 config=b'check-locks',
1571 )
1571 )
1572 return ret
1572 return ret
1573
1573
1574 return checkvfs
1574 return checkvfs
1575
1575
1576 def _getsvfsward(self, origfunc):
1576 def _getsvfsward(self, origfunc):
1577 """build a ward for self.svfs"""
1577 """build a ward for self.svfs"""
1578 rref = weakref.ref(self)
1578 rref = weakref.ref(self)
1579
1579
1580 def checksvfs(path, mode=None):
1580 def checksvfs(path, mode=None):
1581 ret = origfunc(path, mode=mode)
1581 ret = origfunc(path, mode=mode)
1582 repo = rref()
1582 repo = rref()
1583 if repo is None or not hasattr(repo, '_lockref'):
1583 if repo is None or not hasattr(repo, '_lockref'):
1584 return
1584 return
1585 if mode in (None, b'r', b'rb'):
1585 if mode in (None, b'r', b'rb'):
1586 return
1586 return
1587 if path.startswith(repo.sharedpath):
1587 if path.startswith(repo.sharedpath):
1588 # truncate name relative to the repository (.hg)
1588 # truncate name relative to the repository (.hg)
1589 path = path[len(repo.sharedpath) + 1 :]
1589 path = path[len(repo.sharedpath) + 1 :]
1590 if repo._currentlock(repo._lockref) is None:
1590 if repo._currentlock(repo._lockref) is None:
1591 repo.ui.develwarn(
1591 repo.ui.develwarn(
1592 b'write with no lock: "%s"' % path, stacklevel=4
1592 b'write with no lock: "%s"' % path, stacklevel=4
1593 )
1593 )
1594 return ret
1594 return ret
1595
1595
1596 return checksvfs
1596 return checksvfs
1597
1597
1598 @property
1598 @property
1599 def vfs_map(self):
1599 def vfs_map(self):
1600 return {
1600 return {
1601 b'': self.svfs,
1601 b'': self.svfs,
1602 b'plain': self.vfs,
1602 b'plain': self.vfs,
1603 b'store': self.svfs,
1603 b'store': self.svfs,
1604 }
1604 }
1605
1605
1606 def close(self):
1606 def close(self):
1607 self._writecaches()
1607 self._writecaches()
1608
1608
1609 def _writecaches(self):
1609 def _writecaches(self):
1610 if self._revbranchcache:
1610 if self._revbranchcache:
1611 self._revbranchcache.write()
1611 self._revbranchcache.write()
1612
1612
1613 def _restrictcapabilities(self, caps):
1613 def _restrictcapabilities(self, caps):
1614 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1614 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1615 caps = set(caps)
1615 caps = set(caps)
1616 capsblob = bundle2.encodecaps(
1616 capsblob = bundle2.encodecaps(
1617 bundle2.getrepocaps(self, role=b'client')
1617 bundle2.getrepocaps(self, role=b'client')
1618 )
1618 )
1619 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1619 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1620 if self.ui.configbool(b'experimental', b'narrow'):
1620 if self.ui.configbool(b'experimental', b'narrow'):
1621 caps.add(wireprototypes.NARROWCAP)
1621 caps.add(wireprototypes.NARROWCAP)
1622 return caps
1622 return caps
1623
1623
1624 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1624 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1625 # self -> auditor -> self._checknested -> self
1625 # self -> auditor -> self._checknested -> self
1626
1626
1627 @property
1627 @property
1628 def auditor(self):
1628 def auditor(self):
1629 # This is only used by context.workingctx.match in order to
1629 # This is only used by context.workingctx.match in order to
1630 # detect files in subrepos.
1630 # detect files in subrepos.
1631 return pathutil.pathauditor(self.root, callback=self._checknested)
1631 return pathutil.pathauditor(self.root, callback=self._checknested)
1632
1632
1633 @property
1633 @property
1634 def nofsauditor(self):
1634 def nofsauditor(self):
1635 # This is only used by context.basectx.match in order to detect
1635 # This is only used by context.basectx.match in order to detect
1636 # files in subrepos.
1636 # files in subrepos.
1637 return pathutil.pathauditor(
1637 return pathutil.pathauditor(
1638 self.root, callback=self._checknested, realfs=False, cached=True
1638 self.root, callback=self._checknested, realfs=False, cached=True
1639 )
1639 )
1640
1640
1641 def _checknested(self, path):
1641 def _checknested(self, path):
1642 """Determine if path is a legal nested repository."""
1642 """Determine if path is a legal nested repository."""
1643 if not path.startswith(self.root):
1643 if not path.startswith(self.root):
1644 return False
1644 return False
1645 subpath = path[len(self.root) + 1 :]
1645 subpath = path[len(self.root) + 1 :]
1646 normsubpath = util.pconvert(subpath)
1646 normsubpath = util.pconvert(subpath)
1647
1647
1648 # XXX: Checking against the current working copy is wrong in
1648 # XXX: Checking against the current working copy is wrong in
1649 # the sense that it can reject things like
1649 # the sense that it can reject things like
1650 #
1650 #
1651 # $ hg cat -r 10 sub/x.txt
1651 # $ hg cat -r 10 sub/x.txt
1652 #
1652 #
1653 # if sub/ is no longer a subrepository in the working copy
1653 # if sub/ is no longer a subrepository in the working copy
1654 # parent revision.
1654 # parent revision.
1655 #
1655 #
1656 # However, it can of course also allow things that would have
1656 # However, it can of course also allow things that would have
1657 # been rejected before, such as the above cat command if sub/
1657 # been rejected before, such as the above cat command if sub/
1658 # is a subrepository now, but was a normal directory before.
1658 # is a subrepository now, but was a normal directory before.
1659 # The old path auditor would have rejected by mistake since it
1659 # The old path auditor would have rejected by mistake since it
1660 # panics when it sees sub/.hg/.
1660 # panics when it sees sub/.hg/.
1661 #
1661 #
1662 # All in all, checking against the working copy seems sensible
1662 # All in all, checking against the working copy seems sensible
1663 # since we want to prevent access to nested repositories on
1663 # since we want to prevent access to nested repositories on
1664 # the filesystem *now*.
1664 # the filesystem *now*.
1665 ctx = self[None]
1665 ctx = self[None]
1666 parts = util.splitpath(subpath)
1666 parts = util.splitpath(subpath)
1667 while parts:
1667 while parts:
1668 prefix = b'/'.join(parts)
1668 prefix = b'/'.join(parts)
1669 if prefix in ctx.substate:
1669 if prefix in ctx.substate:
1670 if prefix == normsubpath:
1670 if prefix == normsubpath:
1671 return True
1671 return True
1672 else:
1672 else:
1673 sub = ctx.sub(prefix)
1673 sub = ctx.sub(prefix)
1674 return sub.checknested(subpath[len(prefix) + 1 :])
1674 return sub.checknested(subpath[len(prefix) + 1 :])
1675 else:
1675 else:
1676 parts.pop()
1676 parts.pop()
1677 return False
1677 return False
1678
1678
1679 def peer(self, path=None, remotehidden=False):
1679 def peer(self, path=None, remotehidden=False):
1680 return localpeer(
1680 return localpeer(
1681 self, path=path, remotehidden=remotehidden
1681 self, path=path, remotehidden=remotehidden
1682 ) # not cached to avoid reference cycle
1682 ) # not cached to avoid reference cycle
1683
1683
1684 def unfiltered(self):
1684 def unfiltered(self):
1685 """Return unfiltered version of the repository
1685 """Return unfiltered version of the repository
1686
1686
1687 Intended to be overwritten by filtered repo."""
1687 Intended to be overwritten by filtered repo."""
1688 return self
1688 return self
1689
1689
1690 def filtered(self, name, visibilityexceptions=None):
1690 def filtered(self, name, visibilityexceptions=None):
1691 """Return a filtered version of a repository
1691 """Return a filtered version of a repository
1692
1692
1693 The `name` parameter is the identifier of the requested view. This
1693 The `name` parameter is the identifier of the requested view. This
1694 will return a repoview object set "exactly" to the specified view.
1694 will return a repoview object set "exactly" to the specified view.
1695
1695
1696 This function does not apply recursive filtering to a repository. For
1696 This function does not apply recursive filtering to a repository. For
1697 example calling `repo.filtered("served")` will return a repoview using
1697 example calling `repo.filtered("served")` will return a repoview using
1698 the "served" view, regardless of the initial view used by `repo`.
1698 the "served" view, regardless of the initial view used by `repo`.
1699
1699
1700 In other word, there is always only one level of `repoview` "filtering".
1700 In other word, there is always only one level of `repoview` "filtering".
1701 """
1701 """
1702 if self._extrafilterid is not None and b'%' not in name:
1702 if self._extrafilterid is not None and b'%' not in name:
1703 name = name + b'%' + self._extrafilterid
1703 name = name + b'%' + self._extrafilterid
1704
1704
1705 cls = repoview.newtype(self.unfiltered().__class__)
1705 cls = repoview.newtype(self.unfiltered().__class__)
1706 return cls(self, name, visibilityexceptions)
1706 return cls(self, name, visibilityexceptions)
1707
1707
1708 @mixedrepostorecache(
1708 @mixedrepostorecache(
1709 (b'bookmarks', b'plain'),
1709 (b'bookmarks', b'plain'),
1710 (b'bookmarks.current', b'plain'),
1710 (b'bookmarks.current', b'plain'),
1711 (b'bookmarks', b''),
1711 (b'bookmarks', b''),
1712 (b'00changelog.i', b''),
1712 (b'00changelog.i', b''),
1713 )
1713 )
1714 def _bookmarks(self):
1714 def _bookmarks(self):
1715 # Since the multiple files involved in the transaction cannot be
1715 # Since the multiple files involved in the transaction cannot be
1716 # written atomically (with current repository format), there is a race
1716 # written atomically (with current repository format), there is a race
1717 # condition here.
1717 # condition here.
1718 #
1718 #
1719 # 1) changelog content A is read
1719 # 1) changelog content A is read
1720 # 2) outside transaction update changelog to content B
1720 # 2) outside transaction update changelog to content B
1721 # 3) outside transaction update bookmark file referring to content B
1721 # 3) outside transaction update bookmark file referring to content B
1722 # 4) bookmarks file content is read and filtered against changelog-A
1722 # 4) bookmarks file content is read and filtered against changelog-A
1723 #
1723 #
1724 # When this happens, bookmarks against nodes missing from A are dropped.
1724 # When this happens, bookmarks against nodes missing from A are dropped.
1725 #
1725 #
1726 # Having this happening during read is not great, but it become worse
1726 # Having this happening during read is not great, but it become worse
1727 # when this happen during write because the bookmarks to the "unknown"
1727 # when this happen during write because the bookmarks to the "unknown"
1728 # nodes will be dropped for good. However, writes happen within locks.
1728 # nodes will be dropped for good. However, writes happen within locks.
1729 # This locking makes it possible to have a race free consistent read.
1729 # This locking makes it possible to have a race free consistent read.
1730 # For this purpose data read from disc before locking are
1730 # For this purpose data read from disc before locking are
1731 # "invalidated" right after the locks are taken. This invalidations are
1731 # "invalidated" right after the locks are taken. This invalidations are
1732 # "light", the `filecache` mechanism keep the data in memory and will
1732 # "light", the `filecache` mechanism keep the data in memory and will
1733 # reuse them if the underlying files did not changed. Not parsing the
1733 # reuse them if the underlying files did not changed. Not parsing the
1734 # same data multiple times helps performances.
1734 # same data multiple times helps performances.
1735 #
1735 #
1736 # Unfortunately in the case describe above, the files tracked by the
1736 # Unfortunately in the case describe above, the files tracked by the
1737 # bookmarks file cache might not have changed, but the in-memory
1737 # bookmarks file cache might not have changed, but the in-memory
1738 # content is still "wrong" because we used an older changelog content
1738 # content is still "wrong" because we used an older changelog content
1739 # to process the on-disk data. So after locking, the changelog would be
1739 # to process the on-disk data. So after locking, the changelog would be
1740 # refreshed but `_bookmarks` would be preserved.
1740 # refreshed but `_bookmarks` would be preserved.
1741 # Adding `00changelog.i` to the list of tracked file is not
1741 # Adding `00changelog.i` to the list of tracked file is not
1742 # enough, because at the time we build the content for `_bookmarks` in
1742 # enough, because at the time we build the content for `_bookmarks` in
1743 # (4), the changelog file has already diverged from the content used
1743 # (4), the changelog file has already diverged from the content used
1744 # for loading `changelog` in (1)
1744 # for loading `changelog` in (1)
1745 #
1745 #
1746 # To prevent the issue, we force the changelog to be explicitly
1746 # To prevent the issue, we force the changelog to be explicitly
1747 # reloaded while computing `_bookmarks`. The data race can still happen
1747 # reloaded while computing `_bookmarks`. The data race can still happen
1748 # without the lock (with a narrower window), but it would no longer go
1748 # without the lock (with a narrower window), but it would no longer go
1749 # undetected during the lock time refresh.
1749 # undetected during the lock time refresh.
1750 #
1750 #
1751 # The new schedule is as follow
1751 # The new schedule is as follow
1752 #
1752 #
1753 # 1) filecache logic detect that `_bookmarks` needs to be computed
1753 # 1) filecache logic detect that `_bookmarks` needs to be computed
1754 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1754 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1755 # 3) We force `changelog` filecache to be tested
1755 # 3) We force `changelog` filecache to be tested
1756 # 4) cachestat for `changelog` are captured (for changelog)
1756 # 4) cachestat for `changelog` are captured (for changelog)
1757 # 5) `_bookmarks` is computed and cached
1757 # 5) `_bookmarks` is computed and cached
1758 #
1758 #
1759 # The step in (3) ensure we have a changelog at least as recent as the
1759 # The step in (3) ensure we have a changelog at least as recent as the
1760 # cache stat computed in (1). As a result at locking time:
1760 # cache stat computed in (1). As a result at locking time:
1761 # * if the changelog did not changed since (1) -> we can reuse the data
1761 # * if the changelog did not changed since (1) -> we can reuse the data
1762 # * otherwise -> the bookmarks get refreshed.
1762 # * otherwise -> the bookmarks get refreshed.
1763 self._refreshchangelog()
1763 self._refreshchangelog()
1764 return bookmarks.bmstore(self)
1764 return bookmarks.bmstore(self)
1765
1765
1766 def _refreshchangelog(self):
1766 def _refreshchangelog(self):
1767 """make sure the in memory changelog match the on-disk one"""
1767 """make sure the in memory changelog match the on-disk one"""
1768 if 'changelog' in vars(self) and self.currenttransaction() is None:
1768 if 'changelog' in vars(self) and self.currenttransaction() is None:
1769 del self.changelog
1769 del self.changelog
1770
1770
1771 @property
1771 @property
1772 def _activebookmark(self):
1772 def _activebookmark(self):
1773 return self._bookmarks.active
1773 return self._bookmarks.active
1774
1774
1775 # _phasesets depend on changelog. what we need is to call
1775 # _phasesets depend on changelog. what we need is to call
1776 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1776 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1777 # can't be easily expressed in filecache mechanism.
1777 # can't be easily expressed in filecache mechanism.
1778 @storecache(b'phaseroots', b'00changelog.i')
1778 @storecache(b'phaseroots', b'00changelog.i')
1779 def _phasecache(self):
1779 def _phasecache(self):
1780 return phases.phasecache(self, self._phasedefaults)
1780 return phases.phasecache(self, self._phasedefaults)
1781
1781
1782 @storecache(b'obsstore')
1782 @storecache(b'obsstore')
1783 def obsstore(self):
1783 def obsstore(self):
1784 return obsolete.makestore(self.ui, self)
1784 return obsolete.makestore(self.ui, self)
1785
1785
1786 @changelogcache()
1786 @changelogcache()
1787 def changelog(repo):
1787 def changelog(repo):
1788 # load dirstate before changelog to avoid race see issue6303
1788 # load dirstate before changelog to avoid race see issue6303
1789 repo.dirstate.prefetch_parents()
1789 repo.dirstate.prefetch_parents()
1790 return repo.store.changelog(
1790 return repo.store.changelog(
1791 txnutil.mayhavepending(repo.root),
1791 txnutil.mayhavepending(repo.root),
1792 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1792 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1793 )
1793 )
1794
1794
1795 @manifestlogcache()
1795 @manifestlogcache()
1796 def manifestlog(self):
1796 def manifestlog(self):
1797 return self.store.manifestlog(self, self._storenarrowmatch)
1797 return self.store.manifestlog(self, self._storenarrowmatch)
1798
1798
1799 @unfilteredpropertycache
1799 @unfilteredpropertycache
1800 def dirstate(self):
1800 def dirstate(self):
1801 if self._dirstate is None:
1801 if self._dirstate is None:
1802 self._dirstate = self._makedirstate()
1802 self._dirstate = self._makedirstate()
1803 else:
1803 else:
1804 self._dirstate.refresh()
1804 self._dirstate.refresh()
1805 return self._dirstate
1805 return self._dirstate
1806
1806
1807 def _makedirstate(self):
1807 def _makedirstate(self):
1808 """Extension point for wrapping the dirstate per-repo."""
1808 """Extension point for wrapping the dirstate per-repo."""
1809 sparsematchfn = None
1809 sparsematchfn = None
1810 if sparse.use_sparse(self):
1810 if sparse.use_sparse(self):
1811 sparsematchfn = lambda: sparse.matcher(self)
1811 sparsematchfn = lambda: sparse.matcher(self)
1812 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1812 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1813 th = requirementsmod.DIRSTATE_TRACKED_HINT_V1
1813 th = requirementsmod.DIRSTATE_TRACKED_HINT_V1
1814 use_dirstate_v2 = v2_req in self.requirements
1814 use_dirstate_v2 = v2_req in self.requirements
1815 use_tracked_hint = th in self.requirements
1815 use_tracked_hint = th in self.requirements
1816
1816
1817 return dirstate.dirstate(
1817 return dirstate.dirstate(
1818 self.vfs,
1818 self.vfs,
1819 self.ui,
1819 self.ui,
1820 self.root,
1820 self.root,
1821 self._dirstatevalidate,
1821 self._dirstatevalidate,
1822 sparsematchfn,
1822 sparsematchfn,
1823 self.nodeconstants,
1823 self.nodeconstants,
1824 use_dirstate_v2,
1824 use_dirstate_v2,
1825 use_tracked_hint=use_tracked_hint,
1825 use_tracked_hint=use_tracked_hint,
1826 )
1826 )
1827
1827
1828 def _dirstatevalidate(self, node):
1828 def _dirstatevalidate(self, node):
1829 okay = True
1829 okay = True
1830 try:
1830 try:
1831 self.changelog.rev(node)
1831 self.changelog.rev(node)
1832 except error.LookupError:
1832 except error.LookupError:
1833 # If the parent are unknown it might just be because the changelog
1833 # If the parent are unknown it might just be because the changelog
1834 # in memory is lagging behind the dirstate in memory. So try to
1834 # in memory is lagging behind the dirstate in memory. So try to
1835 # refresh the changelog first.
1835 # refresh the changelog first.
1836 #
1836 #
1837 # We only do so if we don't hold the lock, if we do hold the lock
1837 # We only do so if we don't hold the lock, if we do hold the lock
1838 # the invalidation at that time should have taken care of this and
1838 # the invalidation at that time should have taken care of this and
1839 # something is very fishy.
1839 # something is very fishy.
1840 if self.currentlock() is None:
1840 if self.currentlock() is None:
1841 self.invalidate()
1841 self.invalidate()
1842 try:
1842 try:
1843 self.changelog.rev(node)
1843 self.changelog.rev(node)
1844 except error.LookupError:
1844 except error.LookupError:
1845 okay = False
1845 okay = False
1846 else:
1846 else:
1847 # XXX we should consider raising an error here.
1847 # XXX we should consider raising an error here.
1848 okay = False
1848 okay = False
1849 if okay:
1849 if okay:
1850 return node
1850 return node
1851 else:
1851 else:
1852 if not self._dirstatevalidatewarned:
1852 if not self._dirstatevalidatewarned:
1853 self._dirstatevalidatewarned = True
1853 self._dirstatevalidatewarned = True
1854 self.ui.warn(
1854 self.ui.warn(
1855 _(b"warning: ignoring unknown working parent %s!\n")
1855 _(b"warning: ignoring unknown working parent %s!\n")
1856 % short(node)
1856 % short(node)
1857 )
1857 )
1858 return self.nullid
1858 return self.nullid
1859
1859
1860 @storecache(narrowspec.FILENAME)
1860 @storecache(narrowspec.FILENAME)
1861 def narrowpats(self):
1861 def narrowpats(self):
1862 """matcher patterns for this repository's narrowspec
1862 """matcher patterns for this repository's narrowspec
1863
1863
1864 A tuple of (includes, excludes).
1864 A tuple of (includes, excludes).
1865 """
1865 """
1866 # the narrow management should probably move into its own object
1866 # the narrow management should probably move into its own object
1867 val = self._pending_narrow_pats
1867 val = self._pending_narrow_pats
1868 if val is None:
1868 if val is None:
1869 val = narrowspec.load(self)
1869 val = narrowspec.load(self)
1870 return val
1870 return val
1871
1871
1872 @storecache(narrowspec.FILENAME)
1872 @storecache(narrowspec.FILENAME)
1873 def _storenarrowmatch(self):
1873 def _storenarrowmatch(self):
1874 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1874 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1875 return matchmod.always()
1875 return matchmod.always()
1876 include, exclude = self.narrowpats
1876 include, exclude = self.narrowpats
1877 return narrowspec.match(self.root, include=include, exclude=exclude)
1877 return narrowspec.match(self.root, include=include, exclude=exclude)
1878
1878
1879 @storecache(narrowspec.FILENAME)
1879 @storecache(narrowspec.FILENAME)
1880 def _narrowmatch(self):
1880 def _narrowmatch(self):
1881 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1881 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1882 return matchmod.always()
1882 return matchmod.always()
1883 narrowspec.checkworkingcopynarrowspec(self)
1883 narrowspec.checkworkingcopynarrowspec(self)
1884 include, exclude = self.narrowpats
1884 include, exclude = self.narrowpats
1885 return narrowspec.match(self.root, include=include, exclude=exclude)
1885 return narrowspec.match(self.root, include=include, exclude=exclude)
1886
1886
1887 def narrowmatch(self, match=None, includeexact=False):
1887 def narrowmatch(self, match=None, includeexact=False):
1888 """matcher corresponding the the repo's narrowspec
1888 """matcher corresponding the the repo's narrowspec
1889
1889
1890 If `match` is given, then that will be intersected with the narrow
1890 If `match` is given, then that will be intersected with the narrow
1891 matcher.
1891 matcher.
1892
1892
1893 If `includeexact` is True, then any exact matches from `match` will
1893 If `includeexact` is True, then any exact matches from `match` will
1894 be included even if they're outside the narrowspec.
1894 be included even if they're outside the narrowspec.
1895 """
1895 """
1896 if match:
1896 if match:
1897 if includeexact and not self._narrowmatch.always():
1897 if includeexact and not self._narrowmatch.always():
1898 # do not exclude explicitly-specified paths so that they can
1898 # do not exclude explicitly-specified paths so that they can
1899 # be warned later on
1899 # be warned later on
1900 em = matchmod.exact(match.files())
1900 em = matchmod.exact(match.files())
1901 nm = matchmod.unionmatcher([self._narrowmatch, em])
1901 nm = matchmod.unionmatcher([self._narrowmatch, em])
1902 return matchmod.intersectmatchers(match, nm)
1902 return matchmod.intersectmatchers(match, nm)
1903 return matchmod.intersectmatchers(match, self._narrowmatch)
1903 return matchmod.intersectmatchers(match, self._narrowmatch)
1904 return self._narrowmatch
1904 return self._narrowmatch
1905
1905
1906 def setnarrowpats(self, newincludes, newexcludes):
1906 def setnarrowpats(self, newincludes, newexcludes):
1907 narrowspec.save(self, newincludes, newexcludes)
1907 narrowspec.save(self, newincludes, newexcludes)
1908 self.invalidate(clearfilecache=True)
1908 self.invalidate(clearfilecache=True)
1909
1909
1910 @unfilteredpropertycache
1910 @unfilteredpropertycache
1911 def _quick_access_changeid_null(self):
1911 def _quick_access_changeid_null(self):
1912 return {
1912 return {
1913 b'null': (nullrev, self.nodeconstants.nullid),
1913 b'null': (nullrev, self.nodeconstants.nullid),
1914 nullrev: (nullrev, self.nodeconstants.nullid),
1914 nullrev: (nullrev, self.nodeconstants.nullid),
1915 self.nullid: (nullrev, self.nullid),
1915 self.nullid: (nullrev, self.nullid),
1916 }
1916 }
1917
1917
1918 @unfilteredpropertycache
1918 @unfilteredpropertycache
1919 def _quick_access_changeid_wc(self):
1919 def _quick_access_changeid_wc(self):
1920 # also fast path access to the working copy parents
1920 # also fast path access to the working copy parents
1921 # however, only do it for filter that ensure wc is visible.
1921 # however, only do it for filter that ensure wc is visible.
1922 quick = self._quick_access_changeid_null.copy()
1922 quick = self._quick_access_changeid_null.copy()
1923 cl = self.unfiltered().changelog
1923 cl = self.unfiltered().changelog
1924 for node in self.dirstate.parents():
1924 for node in self.dirstate.parents():
1925 if node == self.nullid:
1925 if node == self.nullid:
1926 continue
1926 continue
1927 rev = cl.index.get_rev(node)
1927 rev = cl.index.get_rev(node)
1928 if rev is None:
1928 if rev is None:
1929 # unknown working copy parent case:
1929 # unknown working copy parent case:
1930 #
1930 #
1931 # skip the fast path and let higher code deal with it
1931 # skip the fast path and let higher code deal with it
1932 continue
1932 continue
1933 pair = (rev, node)
1933 pair = (rev, node)
1934 quick[rev] = pair
1934 quick[rev] = pair
1935 quick[node] = pair
1935 quick[node] = pair
1936 # also add the parents of the parents
1936 # also add the parents of the parents
1937 for r in cl.parentrevs(rev):
1937 for r in cl.parentrevs(rev):
1938 if r == nullrev:
1938 if r == nullrev:
1939 continue
1939 continue
1940 n = cl.node(r)
1940 n = cl.node(r)
1941 pair = (r, n)
1941 pair = (r, n)
1942 quick[r] = pair
1942 quick[r] = pair
1943 quick[n] = pair
1943 quick[n] = pair
1944 p1node = self.dirstate.p1()
1944 p1node = self.dirstate.p1()
1945 if p1node != self.nullid:
1945 if p1node != self.nullid:
1946 quick[b'.'] = quick[p1node]
1946 quick[b'.'] = quick[p1node]
1947 return quick
1947 return quick
1948
1948
1949 @unfilteredmethod
1949 @unfilteredmethod
1950 def _quick_access_changeid_invalidate(self):
1950 def _quick_access_changeid_invalidate(self):
1951 if '_quick_access_changeid_wc' in vars(self):
1951 if '_quick_access_changeid_wc' in vars(self):
1952 del self.__dict__['_quick_access_changeid_wc']
1952 del self.__dict__['_quick_access_changeid_wc']
1953
1953
1954 @property
1954 @property
1955 def _quick_access_changeid(self):
1955 def _quick_access_changeid(self):
1956 """an helper dictionnary for __getitem__ calls
1956 """an helper dictionnary for __getitem__ calls
1957
1957
1958 This contains a list of symbol we can recognise right away without
1958 This contains a list of symbol we can recognise right away without
1959 further processing.
1959 further processing.
1960 """
1960 """
1961 if self.filtername in repoview.filter_has_wc:
1961 if self.filtername in repoview.filter_has_wc:
1962 return self._quick_access_changeid_wc
1962 return self._quick_access_changeid_wc
1963 return self._quick_access_changeid_null
1963 return self._quick_access_changeid_null
1964
1964
1965 def __getitem__(self, changeid):
1965 def __getitem__(self, changeid):
1966 # dealing with special cases
1966 # dealing with special cases
1967 if changeid is None:
1967 if changeid is None:
1968 return context.workingctx(self)
1968 return context.workingctx(self)
1969 if isinstance(changeid, context.basectx):
1969 if isinstance(changeid, context.basectx):
1970 return changeid
1970 return changeid
1971
1971
1972 # dealing with multiple revisions
1972 # dealing with multiple revisions
1973 if isinstance(changeid, slice):
1973 if isinstance(changeid, slice):
1974 # wdirrev isn't contiguous so the slice shouldn't include it
1974 # wdirrev isn't contiguous so the slice shouldn't include it
1975 return [
1975 return [
1976 self[i]
1976 self[i]
1977 for i in range(*changeid.indices(len(self)))
1977 for i in range(*changeid.indices(len(self)))
1978 if i not in self.changelog.filteredrevs
1978 if i not in self.changelog.filteredrevs
1979 ]
1979 ]
1980
1980
1981 # dealing with some special values
1981 # dealing with some special values
1982 quick_access = self._quick_access_changeid.get(changeid)
1982 quick_access = self._quick_access_changeid.get(changeid)
1983 if quick_access is not None:
1983 if quick_access is not None:
1984 rev, node = quick_access
1984 rev, node = quick_access
1985 return context.changectx(self, rev, node, maybe_filtered=False)
1985 return context.changectx(self, rev, node, maybe_filtered=False)
1986 if changeid == b'tip':
1986 if changeid == b'tip':
1987 node = self.changelog.tip()
1987 node = self.changelog.tip()
1988 rev = self.changelog.rev(node)
1988 rev = self.changelog.rev(node)
1989 return context.changectx(self, rev, node)
1989 return context.changectx(self, rev, node)
1990
1990
1991 # dealing with arbitrary values
1991 # dealing with arbitrary values
1992 try:
1992 try:
1993 if isinstance(changeid, int):
1993 if isinstance(changeid, int):
1994 node = self.changelog.node(changeid)
1994 node = self.changelog.node(changeid)
1995 rev = changeid
1995 rev = changeid
1996 elif changeid == b'.':
1996 elif changeid == b'.':
1997 # this is a hack to delay/avoid loading obsmarkers
1997 # this is a hack to delay/avoid loading obsmarkers
1998 # when we know that '.' won't be hidden
1998 # when we know that '.' won't be hidden
1999 node = self.dirstate.p1()
1999 node = self.dirstate.p1()
2000 rev = self.unfiltered().changelog.rev(node)
2000 rev = self.unfiltered().changelog.rev(node)
2001 elif len(changeid) == self.nodeconstants.nodelen:
2001 elif len(changeid) == self.nodeconstants.nodelen:
2002 try:
2002 try:
2003 node = changeid
2003 node = changeid
2004 rev = self.changelog.rev(changeid)
2004 rev = self.changelog.rev(changeid)
2005 except error.FilteredLookupError:
2005 except error.FilteredLookupError:
2006 changeid = hex(changeid) # for the error message
2006 changeid = hex(changeid) # for the error message
2007 raise
2007 raise
2008 except LookupError:
2008 except LookupError:
2009 # check if it might have come from damaged dirstate
2009 # check if it might have come from damaged dirstate
2010 #
2010 #
2011 # XXX we could avoid the unfiltered if we had a recognizable
2011 # XXX we could avoid the unfiltered if we had a recognizable
2012 # exception for filtered changeset access
2012 # exception for filtered changeset access
2013 if (
2013 if (
2014 self.local()
2014 self.local()
2015 and changeid in self.unfiltered().dirstate.parents()
2015 and changeid in self.unfiltered().dirstate.parents()
2016 ):
2016 ):
2017 msg = _(b"working directory has unknown parent '%s'!")
2017 msg = _(b"working directory has unknown parent '%s'!")
2018 raise error.Abort(msg % short(changeid))
2018 raise error.Abort(msg % short(changeid))
2019 changeid = hex(changeid) # for the error message
2019 changeid = hex(changeid) # for the error message
2020 raise
2020 raise
2021
2021
2022 elif len(changeid) == 2 * self.nodeconstants.nodelen:
2022 elif len(changeid) == 2 * self.nodeconstants.nodelen:
2023 node = bin(changeid)
2023 node = bin(changeid)
2024 rev = self.changelog.rev(node)
2024 rev = self.changelog.rev(node)
2025 else:
2025 else:
2026 raise error.ProgrammingError(
2026 raise error.ProgrammingError(
2027 b"unsupported changeid '%s' of type %s"
2027 b"unsupported changeid '%s' of type %s"
2028 % (changeid, pycompat.bytestr(type(changeid)))
2028 % (changeid, pycompat.bytestr(type(changeid)))
2029 )
2029 )
2030
2030
2031 return context.changectx(self, rev, node)
2031 return context.changectx(self, rev, node)
2032
2032
2033 except (error.FilteredIndexError, error.FilteredLookupError):
2033 except (error.FilteredIndexError, error.FilteredLookupError):
2034 raise error.FilteredRepoLookupError(
2034 raise error.FilteredRepoLookupError(
2035 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
2035 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
2036 )
2036 )
2037 except (IndexError, LookupError):
2037 except (IndexError, LookupError):
2038 raise error.RepoLookupError(
2038 raise error.RepoLookupError(
2039 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
2039 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
2040 )
2040 )
2041 except error.WdirUnsupported:
2041 except error.WdirUnsupported:
2042 return context.workingctx(self)
2042 return context.workingctx(self)
2043
2043
2044 def __contains__(self, changeid):
2044 def __contains__(self, changeid):
2045 """True if the given changeid exists"""
2045 """True if the given changeid exists"""
2046 try:
2046 try:
2047 self[changeid]
2047 self[changeid]
2048 return True
2048 return True
2049 except error.RepoLookupError:
2049 except error.RepoLookupError:
2050 return False
2050 return False
2051
2051
2052 def __nonzero__(self):
2052 def __nonzero__(self):
2053 return True
2053 return True
2054
2054
2055 __bool__ = __nonzero__
2055 __bool__ = __nonzero__
2056
2056
2057 def __len__(self):
2057 def __len__(self):
2058 # no need to pay the cost of repoview.changelog
2058 # no need to pay the cost of repoview.changelog
2059 unfi = self.unfiltered()
2059 unfi = self.unfiltered()
2060 return len(unfi.changelog)
2060 return len(unfi.changelog)
2061
2061
2062 def __iter__(self):
2062 def __iter__(self):
2063 return iter(self.changelog)
2063 return iter(self.changelog)
2064
2064
2065 def revs(self, expr: bytes, *args):
2065 def revs(self, expr: bytes, *args):
2066 """Find revisions matching a revset.
2066 """Find revisions matching a revset.
2067
2067
2068 The revset is specified as a string ``expr`` that may contain
2068 The revset is specified as a string ``expr`` that may contain
2069 %-formatting to escape certain types. See ``revsetlang.formatspec``.
2069 %-formatting to escape certain types. See ``revsetlang.formatspec``.
2070
2070
2071 Revset aliases from the configuration are not expanded. To expand
2071 Revset aliases from the configuration are not expanded. To expand
2072 user aliases, consider calling ``scmutil.revrange()`` or
2072 user aliases, consider calling ``scmutil.revrange()`` or
2073 ``repo.anyrevs([expr], user=True)``.
2073 ``repo.anyrevs([expr], user=True)``.
2074
2074
2075 Returns a smartset.abstractsmartset, which is a list-like interface
2075 Returns a smartset.abstractsmartset, which is a list-like interface
2076 that contains integer revisions.
2076 that contains integer revisions.
2077 """
2077 """
2078 tree = revsetlang.spectree(expr, *args)
2078 tree = revsetlang.spectree(expr, *args)
2079 return revset.makematcher(tree)(self)
2079 return revset.makematcher(tree)(self)
2080
2080
2081 def set(self, expr: bytes, *args):
2081 def set(self, expr: bytes, *args):
2082 """Find revisions matching a revset and emit changectx instances.
2082 """Find revisions matching a revset and emit changectx instances.
2083
2083
2084 This is a convenience wrapper around ``revs()`` that iterates the
2084 This is a convenience wrapper around ``revs()`` that iterates the
2085 result and is a generator of changectx instances.
2085 result and is a generator of changectx instances.
2086
2086
2087 Revset aliases from the configuration are not expanded. To expand
2087 Revset aliases from the configuration are not expanded. To expand
2088 user aliases, consider calling ``scmutil.revrange()``.
2088 user aliases, consider calling ``scmutil.revrange()``.
2089 """
2089 """
2090 for r in self.revs(expr, *args):
2090 for r in self.revs(expr, *args):
2091 yield self[r]
2091 yield self[r]
2092
2092
2093 def anyrevs(self, specs: bytes, user=False, localalias=None):
2093 def anyrevs(self, specs: bytes, user=False, localalias=None):
2094 """Find revisions matching one of the given revsets.
2094 """Find revisions matching one of the given revsets.
2095
2095
2096 Revset aliases from the configuration are not expanded by default. To
2096 Revset aliases from the configuration are not expanded by default. To
2097 expand user aliases, specify ``user=True``. To provide some local
2097 expand user aliases, specify ``user=True``. To provide some local
2098 definitions overriding user aliases, set ``localalias`` to
2098 definitions overriding user aliases, set ``localalias`` to
2099 ``{name: definitionstring}``.
2099 ``{name: definitionstring}``.
2100 """
2100 """
2101 if specs == [b'null']:
2101 if specs == [b'null']:
2102 return revset.baseset([nullrev])
2102 return revset.baseset([nullrev])
2103 if specs == [b'.']:
2103 if specs == [b'.']:
2104 quick_data = self._quick_access_changeid.get(b'.')
2104 quick_data = self._quick_access_changeid.get(b'.')
2105 if quick_data is not None:
2105 if quick_data is not None:
2106 return revset.baseset([quick_data[0]])
2106 return revset.baseset([quick_data[0]])
2107 if user:
2107 if user:
2108 m = revset.matchany(
2108 m = revset.matchany(
2109 self.ui,
2109 self.ui,
2110 specs,
2110 specs,
2111 lookup=revset.lookupfn(self),
2111 lookup=revset.lookupfn(self),
2112 localalias=localalias,
2112 localalias=localalias,
2113 )
2113 )
2114 else:
2114 else:
2115 m = revset.matchany(None, specs, localalias=localalias)
2115 m = revset.matchany(None, specs, localalias=localalias)
2116 return m(self)
2116 return m(self)
2117
2117
2118 def url(self) -> bytes:
2118 def url(self) -> bytes:
2119 return b'file:' + self.root
2119 return b'file:' + self.root
2120
2120
2121 def hook(self, name, throw=False, **args):
2121 def hook(self, name, throw=False, **args):
2122 """Call a hook, passing this repo instance.
2122 """Call a hook, passing this repo instance.
2123
2123
2124 This a convenience method to aid invoking hooks. Extensions likely
2124 This a convenience method to aid invoking hooks. Extensions likely
2125 won't call this unless they have registered a custom hook or are
2125 won't call this unless they have registered a custom hook or are
2126 replacing code that is expected to call a hook.
2126 replacing code that is expected to call a hook.
2127 """
2127 """
2128 return hook.hook(self.ui, self, name, throw, **args)
2128 return hook.hook(self.ui, self, name, throw, **args)
2129
2129
2130 @filteredpropertycache
2130 @filteredpropertycache
2131 def _tagscache(self):
2131 def _tagscache(self):
2132 """Returns a tagscache object that contains various tags related
2132 """Returns a tagscache object that contains various tags related
2133 caches."""
2133 caches."""
2134
2134
2135 # This simplifies its cache management by having one decorated
2135 # This simplifies its cache management by having one decorated
2136 # function (this one) and the rest simply fetch things from it.
2136 # function (this one) and the rest simply fetch things from it.
2137 class tagscache:
2137 class tagscache:
2138 def __init__(self):
2138 def __init__(self):
2139 # These two define the set of tags for this repository. tags
2139 # These two define the set of tags for this repository. tags
2140 # maps tag name to node; tagtypes maps tag name to 'global' or
2140 # maps tag name to node; tagtypes maps tag name to 'global' or
2141 # 'local'. (Global tags are defined by .hgtags across all
2141 # 'local'. (Global tags are defined by .hgtags across all
2142 # heads, and local tags are defined in .hg/localtags.)
2142 # heads, and local tags are defined in .hg/localtags.)
2143 # They constitute the in-memory cache of tags.
2143 # They constitute the in-memory cache of tags.
2144 self.tags = self.tagtypes = None
2144 self.tags = self.tagtypes = None
2145
2145
2146 self.nodetagscache = self.tagslist = None
2146 self.nodetagscache = self.tagslist = None
2147
2147
2148 cache = tagscache()
2148 cache = tagscache()
2149 cache.tags, cache.tagtypes = self._findtags()
2149 cache.tags, cache.tagtypes = self._findtags()
2150
2150
2151 return cache
2151 return cache
2152
2152
2153 def tags(self):
2153 def tags(self):
2154 '''return a mapping of tag to node'''
2154 '''return a mapping of tag to node'''
2155 t = {}
2155 t = {}
2156 if self.changelog.filteredrevs:
2156 if self.changelog.filteredrevs:
2157 tags, tt = self._findtags()
2157 tags, tt = self._findtags()
2158 else:
2158 else:
2159 tags = self._tagscache.tags
2159 tags = self._tagscache.tags
2160 rev = self.changelog.rev
2160 rev = self.changelog.rev
2161 for k, v in tags.items():
2161 for k, v in tags.items():
2162 try:
2162 try:
2163 # ignore tags to unknown nodes
2163 # ignore tags to unknown nodes
2164 rev(v)
2164 rev(v)
2165 t[k] = v
2165 t[k] = v
2166 except (error.LookupError, ValueError):
2166 except (error.LookupError, ValueError):
2167 pass
2167 pass
2168 return t
2168 return t
2169
2169
2170 def _findtags(self):
2170 def _findtags(self):
2171 """Do the hard work of finding tags. Return a pair of dicts
2171 """Do the hard work of finding tags. Return a pair of dicts
2172 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2172 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2173 maps tag name to a string like \'global\' or \'local\'.
2173 maps tag name to a string like \'global\' or \'local\'.
2174 Subclasses or extensions are free to add their own tags, but
2174 Subclasses or extensions are free to add their own tags, but
2175 should be aware that the returned dicts will be retained for the
2175 should be aware that the returned dicts will be retained for the
2176 duration of the localrepo object."""
2176 duration of the localrepo object."""
2177
2177
2178 # XXX what tagtype should subclasses/extensions use? Currently
2178 # XXX what tagtype should subclasses/extensions use? Currently
2179 # mq and bookmarks add tags, but do not set the tagtype at all.
2179 # mq and bookmarks add tags, but do not set the tagtype at all.
2180 # Should each extension invent its own tag type? Should there
2180 # Should each extension invent its own tag type? Should there
2181 # be one tagtype for all such "virtual" tags? Or is the status
2181 # be one tagtype for all such "virtual" tags? Or is the status
2182 # quo fine?
2182 # quo fine?
2183
2183
2184 # map tag name to (node, hist)
2184 # map tag name to (node, hist)
2185 alltags = tagsmod.findglobaltags(self.ui, self)
2185 alltags = tagsmod.findglobaltags(self.ui, self)
2186 # map tag name to tag type
2186 # map tag name to tag type
2187 tagtypes = {tag: b'global' for tag in alltags}
2187 tagtypes = {tag: b'global' for tag in alltags}
2188
2188
2189 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2189 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2190
2190
2191 # Build the return dicts. Have to re-encode tag names because
2191 # Build the return dicts. Have to re-encode tag names because
2192 # the tags module always uses UTF-8 (in order not to lose info
2192 # the tags module always uses UTF-8 (in order not to lose info
2193 # writing to the cache), but the rest of Mercurial wants them in
2193 # writing to the cache), but the rest of Mercurial wants them in
2194 # local encoding.
2194 # local encoding.
2195 tags = {}
2195 tags = {}
2196 for name, (node, hist) in alltags.items():
2196 for name, (node, hist) in alltags.items():
2197 if node != self.nullid:
2197 if node != self.nullid:
2198 tags[encoding.tolocal(name)] = node
2198 tags[encoding.tolocal(name)] = node
2199 tags[b'tip'] = self.changelog.tip()
2199 tags[b'tip'] = self.changelog.tip()
2200 tagtypes = {
2200 tagtypes = {
2201 encoding.tolocal(name): value for (name, value) in tagtypes.items()
2201 encoding.tolocal(name): value for (name, value) in tagtypes.items()
2202 }
2202 }
2203 return (tags, tagtypes)
2203 return (tags, tagtypes)
2204
2204
2205 def tagtype(self, tagname):
2205 def tagtype(self, tagname):
2206 """
2206 """
2207 return the type of the given tag. result can be:
2207 return the type of the given tag. result can be:
2208
2208
2209 'local' : a local tag
2209 'local' : a local tag
2210 'global' : a global tag
2210 'global' : a global tag
2211 None : tag does not exist
2211 None : tag does not exist
2212 """
2212 """
2213
2213
2214 return self._tagscache.tagtypes.get(tagname)
2214 return self._tagscache.tagtypes.get(tagname)
2215
2215
2216 def tagslist(self):
2216 def tagslist(self):
2217 '''return a list of tags ordered by revision'''
2217 '''return a list of tags ordered by revision'''
2218 if not self._tagscache.tagslist:
2218 if not self._tagscache.tagslist:
2219 l = []
2219 l = []
2220 for t, n in self.tags().items():
2220 for t, n in self.tags().items():
2221 l.append((self.changelog.rev(n), t, n))
2221 l.append((self.changelog.rev(n), t, n))
2222 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2222 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2223
2223
2224 return self._tagscache.tagslist
2224 return self._tagscache.tagslist
2225
2225
2226 def nodetags(self, node):
2226 def nodetags(self, node):
2227 '''return the tags associated with a node'''
2227 '''return the tags associated with a node'''
2228 if not self._tagscache.nodetagscache:
2228 if not self._tagscache.nodetagscache:
2229 nodetagscache = {}
2229 nodetagscache = {}
2230 for t, n in self._tagscache.tags.items():
2230 for t, n in self._tagscache.tags.items():
2231 nodetagscache.setdefault(n, []).append(t)
2231 nodetagscache.setdefault(n, []).append(t)
2232 for tags in nodetagscache.values():
2232 for tags in nodetagscache.values():
2233 tags.sort()
2233 tags.sort()
2234 self._tagscache.nodetagscache = nodetagscache
2234 self._tagscache.nodetagscache = nodetagscache
2235 return self._tagscache.nodetagscache.get(node, [])
2235 return self._tagscache.nodetagscache.get(node, [])
2236
2236
2237 def nodebookmarks(self, node):
2237 def nodebookmarks(self, node):
2238 """return the list of bookmarks pointing to the specified node"""
2238 """return the list of bookmarks pointing to the specified node"""
2239 return self._bookmarks.names(node)
2239 return self._bookmarks.names(node)
2240
2240
2241 def branchmap(self):
2241 def branchmap(self):
2242 """returns a dictionary {branch: [branchheads]} with branchheads
2242 """returns a dictionary {branch: [branchheads]} with branchheads
2243 ordered by increasing revision number"""
2243 ordered by increasing revision number"""
2244 return self._branchcaches[self]
2244 return self._branchcaches[self]
2245
2245
2246 @unfilteredmethod
2246 @unfilteredmethod
2247 def revbranchcache(self):
2247 def revbranchcache(self):
2248 if not self._revbranchcache:
2248 if not self._revbranchcache:
2249 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2249 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2250 return self._revbranchcache
2250 return self._revbranchcache
2251
2251
2252 def register_changeset(self, rev, changelogrevision):
2252 def register_changeset(self, rev, changelogrevision):
2253 self.revbranchcache().setdata(rev, changelogrevision)
2253 self.revbranchcache().setdata(rev, changelogrevision)
2254
2254
2255 def branchtip(self, branch, ignoremissing=False):
2255 def branchtip(self, branch, ignoremissing=False):
2256 """return the tip node for a given branch
2256 """return the tip node for a given branch
2257
2257
2258 If ignoremissing is True, then this method will not raise an error.
2258 If ignoremissing is True, then this method will not raise an error.
2259 This is helpful for callers that only expect None for a missing branch
2259 This is helpful for callers that only expect None for a missing branch
2260 (e.g. namespace).
2260 (e.g. namespace).
2261
2261
2262 """
2262 """
2263 try:
2263 try:
2264 return self.branchmap().branchtip(branch)
2264 return self.branchmap().branchtip(branch)
2265 except KeyError:
2265 except KeyError:
2266 if not ignoremissing:
2266 if not ignoremissing:
2267 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2267 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2268 else:
2268 else:
2269 pass
2269 pass
2270
2270
2271 def lookup(self, key):
2271 def lookup(self, key):
2272 node = scmutil.revsymbol(self, key).node()
2272 node = scmutil.revsymbol(self, key).node()
2273 if node is None:
2273 if node is None:
2274 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2274 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2275 return node
2275 return node
2276
2276
2277 def lookupbranch(self, key):
2277 def lookupbranch(self, key):
2278 if self.branchmap().hasbranch(key):
2278 if self.branchmap().hasbranch(key):
2279 return key
2279 return key
2280
2280
2281 return scmutil.revsymbol(self, key).branch()
2281 return scmutil.revsymbol(self, key).branch()
2282
2282
2283 def known(self, nodes):
2283 def known(self, nodes):
2284 cl = self.changelog
2284 cl = self.changelog
2285 get_rev = cl.index.get_rev
2285 get_rev = cl.index.get_rev
2286 filtered = cl.filteredrevs
2286 filtered = cl.filteredrevs
2287 result = []
2287 result = []
2288 for n in nodes:
2288 for n in nodes:
2289 r = get_rev(n)
2289 r = get_rev(n)
2290 resp = not (r is None or r in filtered)
2290 resp = not (r is None or r in filtered)
2291 result.append(resp)
2291 result.append(resp)
2292 return result
2292 return result
2293
2293
2294 def local(self):
2294 def local(self):
2295 return self
2295 return self
2296
2296
2297 def publishing(self):
2297 def publishing(self):
2298 # it's safe (and desirable) to trust the publish flag unconditionally
2298 # it's safe (and desirable) to trust the publish flag unconditionally
2299 # so that we don't finalize changes shared between users via ssh or nfs
2299 # so that we don't finalize changes shared between users via ssh or nfs
2300 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2300 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2301
2301
2302 def cancopy(self):
2302 def cancopy(self):
2303 # so statichttprepo's override of local() works
2303 # so statichttprepo's override of local() works
2304 if not self.local():
2304 if not self.local():
2305 return False
2305 return False
2306 if not self.publishing():
2306 if not self.publishing():
2307 return True
2307 return True
2308 # if publishing we can't copy if there is filtered content
2308 # if publishing we can't copy if there is filtered content
2309 return not self.filtered(b'visible').changelog.filteredrevs
2309 return not self.filtered(b'visible').changelog.filteredrevs
2310
2310
2311 def shared(self):
2311 def shared(self):
2312 '''the type of shared repository (None if not shared)'''
2312 '''the type of shared repository (None if not shared)'''
2313 if self.sharedpath != self.path:
2313 if self.sharedpath != self.path:
2314 return b'store'
2314 return b'store'
2315 return None
2315 return None
2316
2316
2317 def wjoin(self, f: bytes, *insidef: bytes) -> bytes:
2317 def wjoin(self, f: bytes, *insidef: bytes) -> bytes:
2318 return self.vfs.reljoin(self.root, f, *insidef)
2318 return self.vfs.reljoin(self.root, f, *insidef)
2319
2319
2320 def setparents(self, p1, p2=None):
2320 def setparents(self, p1, p2=None):
2321 if p2 is None:
2321 if p2 is None:
2322 p2 = self.nullid
2322 p2 = self.nullid
2323 self[None].setparents(p1, p2)
2323 self[None].setparents(p1, p2)
2324 self._quick_access_changeid_invalidate()
2324 self._quick_access_changeid_invalidate()
2325
2325
2326 def filectx(self, path: bytes, changeid=None, fileid=None, changectx=None):
2326 def filectx(self, path: bytes, changeid=None, fileid=None, changectx=None):
2327 """changeid must be a changeset revision, if specified.
2327 """changeid must be a changeset revision, if specified.
2328 fileid can be a file revision or node."""
2328 fileid can be a file revision or node."""
2329 return context.filectx(
2329 return context.filectx(
2330 self, path, changeid, fileid, changectx=changectx
2330 self, path, changeid, fileid, changectx=changectx
2331 )
2331 )
2332
2332
2333 def getcwd(self) -> bytes:
2333 def getcwd(self) -> bytes:
2334 return self.dirstate.getcwd()
2334 return self.dirstate.getcwd()
2335
2335
2336 def pathto(self, f: bytes, cwd: Optional[bytes] = None) -> bytes:
2336 def pathto(self, f: bytes, cwd: Optional[bytes] = None) -> bytes:
2337 return self.dirstate.pathto(f, cwd)
2337 return self.dirstate.pathto(f, cwd)
2338
2338
2339 def _loadfilter(self, filter):
2339 def _loadfilter(self, filter):
2340 if filter not in self._filterpats:
2340 if filter not in self._filterpats:
2341 l = []
2341 l = []
2342 for pat, cmd in self.ui.configitems(filter):
2342 for pat, cmd in self.ui.configitems(filter):
2343 if cmd == b'!':
2343 if cmd == b'!':
2344 continue
2344 continue
2345 mf = matchmod.match(self.root, b'', [pat])
2345 mf = matchmod.match(self.root, b'', [pat])
2346 fn = None
2346 fn = None
2347 params = cmd
2347 params = cmd
2348 for name, filterfn in self._datafilters.items():
2348 for name, filterfn in self._datafilters.items():
2349 if cmd.startswith(name):
2349 if cmd.startswith(name):
2350 fn = filterfn
2350 fn = filterfn
2351 params = cmd[len(name) :].lstrip()
2351 params = cmd[len(name) :].lstrip()
2352 break
2352 break
2353 if not fn:
2353 if not fn:
2354 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2354 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2355 fn.__name__ = 'commandfilter'
2355 fn.__name__ = 'commandfilter'
2356 # Wrap old filters not supporting keyword arguments
2356 # Wrap old filters not supporting keyword arguments
2357 if not pycompat.getargspec(fn)[2]:
2357 if not pycompat.getargspec(fn)[2]:
2358 oldfn = fn
2358 oldfn = fn
2359 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2359 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2360 fn.__name__ = 'compat-' + oldfn.__name__
2360 fn.__name__ = 'compat-' + oldfn.__name__
2361 l.append((mf, fn, params))
2361 l.append((mf, fn, params))
2362 self._filterpats[filter] = l
2362 self._filterpats[filter] = l
2363 return self._filterpats[filter]
2363 return self._filterpats[filter]
2364
2364
2365 def _filter(self, filterpats, filename, data):
2365 def _filter(self, filterpats, filename, data):
2366 for mf, fn, cmd in filterpats:
2366 for mf, fn, cmd in filterpats:
2367 if mf(filename):
2367 if mf(filename):
2368 self.ui.debug(
2368 self.ui.debug(
2369 b"filtering %s through %s\n"
2369 b"filtering %s through %s\n"
2370 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2370 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2371 )
2371 )
2372 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2372 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2373 break
2373 break
2374
2374
2375 return data
2375 return data
2376
2376
2377 @unfilteredpropertycache
2377 @unfilteredpropertycache
2378 def _encodefilterpats(self):
2378 def _encodefilterpats(self):
2379 return self._loadfilter(b'encode')
2379 return self._loadfilter(b'encode')
2380
2380
2381 @unfilteredpropertycache
2381 @unfilteredpropertycache
2382 def _decodefilterpats(self):
2382 def _decodefilterpats(self):
2383 return self._loadfilter(b'decode')
2383 return self._loadfilter(b'decode')
2384
2384
2385 def adddatafilter(self, name, filter):
2385 def adddatafilter(self, name, filter):
2386 self._datafilters[name] = filter
2386 self._datafilters[name] = filter
2387
2387
2388 def wread(self, filename: bytes) -> bytes:
2388 def wread(self, filename: bytes) -> bytes:
2389 if self.wvfs.islink(filename):
2389 if self.wvfs.islink(filename):
2390 data = self.wvfs.readlink(filename)
2390 data = self.wvfs.readlink(filename)
2391 else:
2391 else:
2392 data = self.wvfs.read(filename)
2392 data = self.wvfs.read(filename)
2393 return self._filter(self._encodefilterpats, filename, data)
2393 return self._filter(self._encodefilterpats, filename, data)
2394
2394
2395 def wwrite(
2395 def wwrite(
2396 self,
2396 self,
2397 filename: bytes,
2397 filename: bytes,
2398 data: bytes,
2398 data: bytes,
2399 flags: bytes,
2399 flags: bytes,
2400 backgroundclose=False,
2400 backgroundclose=False,
2401 **kwargs,
2401 **kwargs,
2402 ) -> int:
2402 ) -> int:
2403 """write ``data`` into ``filename`` in the working directory
2403 """write ``data`` into ``filename`` in the working directory
2404
2404
2405 This returns length of written (maybe decoded) data.
2405 This returns length of written (maybe decoded) data.
2406 """
2406 """
2407 data = self._filter(self._decodefilterpats, filename, data)
2407 data = self._filter(self._decodefilterpats, filename, data)
2408 if b'l' in flags:
2408 if b'l' in flags:
2409 self.wvfs.symlink(data, filename)
2409 self.wvfs.symlink(data, filename)
2410 else:
2410 else:
2411 self.wvfs.write(
2411 self.wvfs.write(
2412 filename, data, backgroundclose=backgroundclose, **kwargs
2412 filename, data, backgroundclose=backgroundclose, **kwargs
2413 )
2413 )
2414 if b'x' in flags:
2414 if b'x' in flags:
2415 self.wvfs.setflags(filename, False, True)
2415 self.wvfs.setflags(filename, False, True)
2416 else:
2416 else:
2417 self.wvfs.setflags(filename, False, False)
2417 self.wvfs.setflags(filename, False, False)
2418 return len(data)
2418 return len(data)
2419
2419
2420 def wwritedata(self, filename: bytes, data: bytes) -> bytes:
2420 def wwritedata(self, filename: bytes, data: bytes) -> bytes:
2421 return self._filter(self._decodefilterpats, filename, data)
2421 return self._filter(self._decodefilterpats, filename, data)
2422
2422
2423 def currenttransaction(self):
2423 def currenttransaction(self):
2424 """return the current transaction or None if non exists"""
2424 """return the current transaction or None if non exists"""
2425 if self._transref:
2425 if self._transref:
2426 tr = self._transref()
2426 tr = self._transref()
2427 else:
2427 else:
2428 tr = None
2428 tr = None
2429
2429
2430 if tr and tr.running():
2430 if tr and tr.running():
2431 return tr
2431 return tr
2432 return None
2432 return None
2433
2433
2434 def transaction(self, desc, report=None):
2434 def transaction(self, desc, report=None):
2435 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2435 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2436 b'devel', b'check-locks'
2436 b'devel', b'check-locks'
2437 ):
2437 ):
2438 if self._currentlock(self._lockref) is None:
2438 if self._currentlock(self._lockref) is None:
2439 raise error.ProgrammingError(b'transaction requires locking')
2439 raise error.ProgrammingError(b'transaction requires locking')
2440 tr = self.currenttransaction()
2440 tr = self.currenttransaction()
2441 if tr is not None:
2441 if tr is not None:
2442 return tr.nest(name=desc)
2442 return tr.nest(name=desc)
2443
2443
2444 # abort here if the journal already exists
2444 # abort here if the journal already exists
2445 if self.svfs.exists(b"journal"):
2445 if self.svfs.exists(b"journal"):
2446 raise error.RepoError(
2446 raise error.RepoError(
2447 _(b"abandoned transaction found"),
2447 _(b"abandoned transaction found"),
2448 hint=_(b"run 'hg recover' to clean up transaction"),
2448 hint=_(b"run 'hg recover' to clean up transaction"),
2449 )
2449 )
2450
2450
2451 # At that point your dirstate should be clean:
2451 # At that point your dirstate should be clean:
2452 #
2452 #
2453 # - If you don't have the wlock, why would you still have a dirty
2453 # - If you don't have the wlock, why would you still have a dirty
2454 # dirstate ?
2454 # dirstate ?
2455 #
2455 #
2456 # - If you hold the wlock, you should not be opening a transaction in
2456 # - If you hold the wlock, you should not be opening a transaction in
2457 # the middle of a `distate.changing_*` block. The transaction needs to
2457 # the middle of a `distate.changing_*` block. The transaction needs to
2458 # be open before that and wrap the change-context.
2458 # be open before that and wrap the change-context.
2459 #
2459 #
2460 # - If you are not within a `dirstate.changing_*` context, why is our
2460 # - If you are not within a `dirstate.changing_*` context, why is our
2461 # dirstate dirty?
2461 # dirstate dirty?
2462 if self.dirstate._dirty:
2462 if self.dirstate._dirty:
2463 m = "cannot open a transaction with a dirty dirstate"
2463 m = "cannot open a transaction with a dirty dirstate"
2464 raise error.ProgrammingError(m)
2464 raise error.ProgrammingError(m)
2465
2465
2466 idbase = b"%.40f#%f" % (random.random(), time.time())
2466 idbase = b"%.40f#%f" % (random.random(), time.time())
2467 ha = hex(hashutil.sha1(idbase).digest())
2467 ha = hex(hashutil.sha1(idbase).digest())
2468 txnid = b'TXN:' + ha
2468 txnid = b'TXN:' + ha
2469 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2469 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2470
2470
2471 self._writejournal(desc)
2471 self._writejournal(desc)
2472 if report:
2472 if report:
2473 rp = report
2473 rp = report
2474 else:
2474 else:
2475 rp = self.ui.warn
2475 rp = self.ui.warn
2476 vfsmap = self.vfs_map
2476 vfsmap = self.vfs_map
2477 # we must avoid cyclic reference between repo and transaction.
2477 # we must avoid cyclic reference between repo and transaction.
2478 reporef = weakref.ref(self)
2478 reporef = weakref.ref(self)
2479 # Code to track tag movement
2479 # Code to track tag movement
2480 #
2480 #
2481 # Since tags are all handled as file content, it is actually quite hard
2481 # Since tags are all handled as file content, it is actually quite hard
2482 # to track these movement from a code perspective. So we fallback to a
2482 # to track these movement from a code perspective. So we fallback to a
2483 # tracking at the repository level. One could envision to track changes
2483 # tracking at the repository level. One could envision to track changes
2484 # to the '.hgtags' file through changegroup apply but that fails to
2484 # to the '.hgtags' file through changegroup apply but that fails to
2485 # cope with case where transaction expose new heads without changegroup
2485 # cope with case where transaction expose new heads without changegroup
2486 # being involved (eg: phase movement).
2486 # being involved (eg: phase movement).
2487 #
2487 #
2488 # For now, We gate the feature behind a flag since this likely comes
2488 # For now, We gate the feature behind a flag since this likely comes
2489 # with performance impacts. The current code run more often than needed
2489 # with performance impacts. The current code run more often than needed
2490 # and do not use caches as much as it could. The current focus is on
2490 # and do not use caches as much as it could. The current focus is on
2491 # the behavior of the feature so we disable it by default. The flag
2491 # the behavior of the feature so we disable it by default. The flag
2492 # will be removed when we are happy with the performance impact.
2492 # will be removed when we are happy with the performance impact.
2493 #
2493 #
2494 # Once this feature is no longer experimental move the following
2494 # Once this feature is no longer experimental move the following
2495 # documentation to the appropriate help section:
2495 # documentation to the appropriate help section:
2496 #
2496 #
2497 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2497 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2498 # tags (new or changed or deleted tags). In addition the details of
2498 # tags (new or changed or deleted tags). In addition the details of
2499 # these changes are made available in a file at:
2499 # these changes are made available in a file at:
2500 # ``REPOROOT/.hg/changes/tags.changes``.
2500 # ``REPOROOT/.hg/changes/tags.changes``.
2501 # Make sure you check for HG_TAG_MOVED before reading that file as it
2501 # Make sure you check for HG_TAG_MOVED before reading that file as it
2502 # might exist from a previous transaction even if no tag were touched
2502 # might exist from a previous transaction even if no tag were touched
2503 # in this one. Changes are recorded in a line base format::
2503 # in this one. Changes are recorded in a line base format::
2504 #
2504 #
2505 # <action> <hex-node> <tag-name>\n
2505 # <action> <hex-node> <tag-name>\n
2506 #
2506 #
2507 # Actions are defined as follow:
2507 # Actions are defined as follow:
2508 # "-R": tag is removed,
2508 # "-R": tag is removed,
2509 # "+A": tag is added,
2509 # "+A": tag is added,
2510 # "-M": tag is moved (old value),
2510 # "-M": tag is moved (old value),
2511 # "+M": tag is moved (new value),
2511 # "+M": tag is moved (new value),
2512 tracktags = lambda x: None
2512 tracktags = lambda x: None
2513 # experimental config: experimental.hook-track-tags
2513 # experimental config: experimental.hook-track-tags
2514 shouldtracktags = self.ui.configbool(
2514 shouldtracktags = self.ui.configbool(
2515 b'experimental', b'hook-track-tags'
2515 b'experimental', b'hook-track-tags'
2516 )
2516 )
2517 if desc != b'strip' and shouldtracktags:
2517 if desc != b'strip' and shouldtracktags:
2518 oldheads = self.changelog.headrevs()
2518 oldheads = self.changelog.headrevs()
2519
2519
2520 def tracktags(tr2):
2520 def tracktags(tr2):
2521 repo = reporef()
2521 repo = reporef()
2522 assert repo is not None # help pytype
2522 assert repo is not None # help pytype
2523 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2523 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2524 newheads = repo.changelog.headrevs()
2524 newheads = repo.changelog.headrevs()
2525 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2525 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2526 # notes: we compare lists here.
2526 # notes: we compare lists here.
2527 # As we do it only once buiding set would not be cheaper
2527 # As we do it only once buiding set would not be cheaper
2528 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2528 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2529 if changes:
2529 if changes:
2530 tr2.hookargs[b'tag_moved'] = b'1'
2530 tr2.hookargs[b'tag_moved'] = b'1'
2531 with repo.vfs(
2531 with repo.vfs(
2532 b'changes/tags.changes', b'w', atomictemp=True
2532 b'changes/tags.changes', b'w', atomictemp=True
2533 ) as changesfile:
2533 ) as changesfile:
2534 # note: we do not register the file to the transaction
2534 # note: we do not register the file to the transaction
2535 # because we needs it to still exist on the transaction
2535 # because we needs it to still exist on the transaction
2536 # is close (for txnclose hooks)
2536 # is close (for txnclose hooks)
2537 tagsmod.writediff(changesfile, changes)
2537 tagsmod.writediff(changesfile, changes)
2538
2538
2539 def validate(tr2):
2539 def validate(tr2):
2540 """will run pre-closing hooks"""
2540 """will run pre-closing hooks"""
2541 # XXX the transaction API is a bit lacking here so we take a hacky
2541 # XXX the transaction API is a bit lacking here so we take a hacky
2542 # path for now
2542 # path for now
2543 #
2543 #
2544 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2544 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2545 # dict is copied before these run. In addition we needs the data
2545 # dict is copied before these run. In addition we needs the data
2546 # available to in memory hooks too.
2546 # available to in memory hooks too.
2547 #
2547 #
2548 # Moreover, we also need to make sure this runs before txnclose
2548 # Moreover, we also need to make sure this runs before txnclose
2549 # hooks and there is no "pending" mechanism that would execute
2549 # hooks and there is no "pending" mechanism that would execute
2550 # logic only if hooks are about to run.
2550 # logic only if hooks are about to run.
2551 #
2551 #
2552 # Fixing this limitation of the transaction is also needed to track
2552 # Fixing this limitation of the transaction is also needed to track
2553 # other families of changes (bookmarks, phases, obsolescence).
2553 # other families of changes (bookmarks, phases, obsolescence).
2554 #
2554 #
2555 # This will have to be fixed before we remove the experimental
2555 # This will have to be fixed before we remove the experimental
2556 # gating.
2556 # gating.
2557 tracktags(tr2)
2557 tracktags(tr2)
2558 repo = reporef()
2558 repo = reporef()
2559 assert repo is not None # help pytype
2559 assert repo is not None # help pytype
2560
2560
2561 singleheadopt = (b'experimental', b'single-head-per-branch')
2561 singleheadopt = (b'experimental', b'single-head-per-branch')
2562 singlehead = repo.ui.configbool(*singleheadopt)
2562 singlehead = repo.ui.configbool(*singleheadopt)
2563 if singlehead:
2563 if singlehead:
2564 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2564 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2565 accountclosed = singleheadsub.get(
2565 accountclosed = singleheadsub.get(
2566 b"account-closed-heads", False
2566 b"account-closed-heads", False
2567 )
2567 )
2568 if singleheadsub.get(b"public-changes-only", False):
2568 if singleheadsub.get(b"public-changes-only", False):
2569 filtername = b"immutable"
2569 filtername = b"immutable"
2570 else:
2570 else:
2571 filtername = b"visible"
2571 filtername = b"visible"
2572 scmutil.enforcesinglehead(
2572 scmutil.enforcesinglehead(
2573 repo, tr2, desc, accountclosed, filtername
2573 repo, tr2, desc, accountclosed, filtername
2574 )
2574 )
2575 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2575 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2576 for name, (old, new) in sorted(
2576 for name, (old, new) in sorted(
2577 tr.changes[b'bookmarks'].items()
2577 tr.changes[b'bookmarks'].items()
2578 ):
2578 ):
2579 args = tr.hookargs.copy()
2579 args = tr.hookargs.copy()
2580 args.update(bookmarks.preparehookargs(name, old, new))
2580 args.update(bookmarks.preparehookargs(name, old, new))
2581 repo.hook(
2581 repo.hook(
2582 b'pretxnclose-bookmark',
2582 b'pretxnclose-bookmark',
2583 throw=True,
2583 throw=True,
2584 **pycompat.strkwargs(args),
2584 **pycompat.strkwargs(args),
2585 )
2585 )
2586 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2586 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2587 cl = repo.unfiltered().changelog
2587 cl = repo.unfiltered().changelog
2588 for revs, (old, new) in tr.changes[b'phases']:
2588 for revs, (old, new) in tr.changes[b'phases']:
2589 for rev in revs:
2589 for rev in revs:
2590 args = tr.hookargs.copy()
2590 args = tr.hookargs.copy()
2591 node = hex(cl.node(rev))
2591 node = hex(cl.node(rev))
2592 args.update(phases.preparehookargs(node, old, new))
2592 args.update(phases.preparehookargs(node, old, new))
2593 repo.hook(
2593 repo.hook(
2594 b'pretxnclose-phase',
2594 b'pretxnclose-phase',
2595 throw=True,
2595 throw=True,
2596 **pycompat.strkwargs(args),
2596 **pycompat.strkwargs(args),
2597 )
2597 )
2598
2598
2599 repo.hook(
2599 repo.hook(
2600 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2600 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2601 )
2601 )
2602
2602
2603 def releasefn(tr, success):
2603 def releasefn(tr, success):
2604 repo = reporef()
2604 repo = reporef()
2605 if repo is None:
2605 if repo is None:
2606 # If the repo has been GC'd (and this release function is being
2606 # If the repo has been GC'd (and this release function is being
2607 # called from transaction.__del__), there's not much we can do,
2607 # called from transaction.__del__), there's not much we can do,
2608 # so just leave the unfinished transaction there and let the
2608 # so just leave the unfinished transaction there and let the
2609 # user run `hg recover`.
2609 # user run `hg recover`.
2610 return
2610 return
2611 if success:
2611 if success:
2612 # this should be explicitly invoked here, because
2612 # this should be explicitly invoked here, because
2613 # in-memory changes aren't written out at closing
2613 # in-memory changes aren't written out at closing
2614 # transaction, if tr.addfilegenerator (via
2614 # transaction, if tr.addfilegenerator (via
2615 # dirstate.write or so) isn't invoked while
2615 # dirstate.write or so) isn't invoked while
2616 # transaction running
2616 # transaction running
2617 repo.dirstate.write(None)
2617 repo.dirstate.write(None)
2618 else:
2618 else:
2619 # discard all changes (including ones already written
2619 # discard all changes (including ones already written
2620 # out) in this transaction
2620 # out) in this transaction
2621 repo.invalidate(clearfilecache=True)
2621 repo.invalidate(clearfilecache=True)
2622
2622
2623 tr = transaction.transaction(
2623 tr = transaction.transaction(
2624 rp,
2624 rp,
2625 self.svfs,
2625 self.svfs,
2626 vfsmap,
2626 vfsmap,
2627 b"journal",
2627 b"journal",
2628 b"undo",
2628 b"undo",
2629 lambda: None,
2629 lambda: None,
2630 self.store.createmode,
2630 self.store.createmode,
2631 validator=validate,
2631 validator=validate,
2632 releasefn=releasefn,
2632 releasefn=releasefn,
2633 checkambigfiles=_cachedfiles,
2633 checkambigfiles=_cachedfiles,
2634 name=desc,
2634 name=desc,
2635 )
2635 )
2636 for vfs_id, path in self._journalfiles():
2636 for vfs_id, path in self._journalfiles():
2637 tr.add_journal(vfs_id, path)
2637 tr.add_journal(vfs_id, path)
2638 tr.changes[b'origrepolen'] = len(self)
2638 tr.changes[b'origrepolen'] = len(self)
2639 tr.changes[b'obsmarkers'] = set()
2639 tr.changes[b'obsmarkers'] = set()
2640 tr.changes[b'phases'] = []
2640 tr.changes[b'phases'] = []
2641 tr.changes[b'bookmarks'] = {}
2641 tr.changes[b'bookmarks'] = {}
2642
2642
2643 tr.hookargs[b'txnid'] = txnid
2643 tr.hookargs[b'txnid'] = txnid
2644 tr.hookargs[b'txnname'] = desc
2644 tr.hookargs[b'txnname'] = desc
2645 tr.hookargs[b'changes'] = tr.changes
2645 tr.hookargs[b'changes'] = tr.changes
2646 # note: writing the fncache only during finalize mean that the file is
2646 # note: writing the fncache only during finalize mean that the file is
2647 # outdated when running hooks. As fncache is used for streaming clone,
2647 # outdated when running hooks. As fncache is used for streaming clone,
2648 # this is not expected to break anything that happen during the hooks.
2648 # this is not expected to break anything that happen during the hooks.
2649 tr.addfinalize(b'flush-fncache', self.store.write)
2649 tr.addfinalize(b'flush-fncache', self.store.write)
2650
2650
2651 def txnclosehook(tr2):
2651 def txnclosehook(tr2):
2652 """To be run if transaction is successful, will schedule a hook run"""
2652 """To be run if transaction is successful, will schedule a hook run"""
2653 # Don't reference tr2 in hook() so we don't hold a reference.
2653 # Don't reference tr2 in hook() so we don't hold a reference.
2654 # This reduces memory consumption when there are multiple
2654 # This reduces memory consumption when there are multiple
2655 # transactions per lock. This can likely go away if issue5045
2655 # transactions per lock. This can likely go away if issue5045
2656 # fixes the function accumulation.
2656 # fixes the function accumulation.
2657 hookargs = tr2.hookargs
2657 hookargs = tr2.hookargs
2658
2658
2659 def hookfunc(unused_success):
2659 def hookfunc(unused_success):
2660 repo = reporef()
2660 repo = reporef()
2661 assert repo is not None # help pytype
2661 assert repo is not None # help pytype
2662
2662
2663 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2663 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2664 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2664 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2665 for name, (old, new) in bmchanges:
2665 for name, (old, new) in bmchanges:
2666 args = tr.hookargs.copy()
2666 args = tr.hookargs.copy()
2667 args.update(bookmarks.preparehookargs(name, old, new))
2667 args.update(bookmarks.preparehookargs(name, old, new))
2668 repo.hook(
2668 repo.hook(
2669 b'txnclose-bookmark',
2669 b'txnclose-bookmark',
2670 throw=False,
2670 throw=False,
2671 **pycompat.strkwargs(args),
2671 **pycompat.strkwargs(args),
2672 )
2672 )
2673
2673
2674 if hook.hashook(repo.ui, b'txnclose-phase'):
2674 if hook.hashook(repo.ui, b'txnclose-phase'):
2675 cl = repo.unfiltered().changelog
2675 cl = repo.unfiltered().changelog
2676 phasemv = sorted(
2676 phasemv = sorted(
2677 tr.changes[b'phases'], key=lambda r: r[0][0]
2677 tr.changes[b'phases'], key=lambda r: r[0][0]
2678 )
2678 )
2679 for revs, (old, new) in phasemv:
2679 for revs, (old, new) in phasemv:
2680 for rev in revs:
2680 for rev in revs:
2681 args = tr.hookargs.copy()
2681 args = tr.hookargs.copy()
2682 node = hex(cl.node(rev))
2682 node = hex(cl.node(rev))
2683 args.update(phases.preparehookargs(node, old, new))
2683 args.update(phases.preparehookargs(node, old, new))
2684 repo.hook(
2684 repo.hook(
2685 b'txnclose-phase',
2685 b'txnclose-phase',
2686 throw=False,
2686 throw=False,
2687 **pycompat.strkwargs(args),
2687 **pycompat.strkwargs(args),
2688 )
2688 )
2689
2689
2690 repo.hook(
2690 repo.hook(
2691 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2691 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2692 )
2692 )
2693
2693
2694 repo = reporef()
2694 repo = reporef()
2695 assert repo is not None # help pytype
2695 assert repo is not None # help pytype
2696 repo._afterlock(hookfunc)
2696 repo._afterlock(hookfunc)
2697
2697
2698 tr.addfinalize(b'txnclose-hook', txnclosehook)
2698 tr.addfinalize(b'txnclose-hook', txnclosehook)
2699 # Include a leading "-" to make it happen before the transaction summary
2699 # Include a leading "-" to make it happen before the transaction summary
2700 # reports registered via scmutil.registersummarycallback() whose names
2700 # reports registered via scmutil.registersummarycallback() whose names
2701 # are 00-txnreport etc. That way, the caches will be warm when the
2701 # are 00-txnreport etc. That way, the caches will be warm when the
2702 # callbacks run.
2702 # callbacks run.
2703 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2703 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2704
2704
2705 def txnaborthook(tr2):
2705 def txnaborthook(tr2):
2706 """To be run if transaction is aborted"""
2706 """To be run if transaction is aborted"""
2707 repo = reporef()
2707 repo = reporef()
2708 assert repo is not None # help pytype
2708 assert repo is not None # help pytype
2709 repo.hook(
2709 repo.hook(
2710 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2710 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2711 )
2711 )
2712
2712
2713 tr.addabort(b'txnabort-hook', txnaborthook)
2713 tr.addabort(b'txnabort-hook', txnaborthook)
2714 # avoid eager cache invalidation. in-memory data should be identical
2714 # avoid eager cache invalidation. in-memory data should be identical
2715 # to stored data if transaction has no error.
2715 # to stored data if transaction has no error.
2716 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2716 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2717 self._transref = weakref.ref(tr)
2717 self._transref = weakref.ref(tr)
2718 scmutil.registersummarycallback(self, tr, desc)
2718 scmutil.registersummarycallback(self, tr, desc)
2719 # This only exist to deal with the need of rollback to have viable
2719 # This only exist to deal with the need of rollback to have viable
2720 # parents at the end of the operation. So backup viable parents at the
2720 # parents at the end of the operation. So backup viable parents at the
2721 # time of this operation.
2721 # time of this operation.
2722 #
2722 #
2723 # We only do it when the `wlock` is taken, otherwise other might be
2723 # We only do it when the `wlock` is taken, otherwise other might be
2724 # altering the dirstate under us.
2724 # altering the dirstate under us.
2725 #
2725 #
2726 # This is really not a great way to do this (first, because we cannot
2726 # This is really not a great way to do this (first, because we cannot
2727 # always do it). There are more viable alternative that exists
2727 # always do it). There are more viable alternative that exists
2728 #
2728 #
2729 # - backing only the working copy parent in a dedicated files and doing
2729 # - backing only the working copy parent in a dedicated files and doing
2730 # a clean "keep-update" to them on `hg rollback`.
2730 # a clean "keep-update" to them on `hg rollback`.
2731 #
2731 #
2732 # - slightly changing the behavior an applying a logic similar to "hg
2732 # - slightly changing the behavior an applying a logic similar to "hg
2733 # strip" to pick a working copy destination on `hg rollback`
2733 # strip" to pick a working copy destination on `hg rollback`
2734 if self.currentwlock() is not None:
2734 if self.currentwlock() is not None:
2735 ds = self.dirstate
2735 ds = self.dirstate
2736 if not self.vfs.exists(b'branch'):
2736 if not self.vfs.exists(b'branch'):
2737 # force a file to be written if None exist
2737 # force a file to be written if None exist
2738 ds.setbranch(b'default', None)
2738 ds.setbranch(b'default', None)
2739
2739
2740 def backup_dirstate(tr):
2740 def backup_dirstate(tr):
2741 for f in ds.all_file_names():
2741 for f in ds.all_file_names():
2742 # hardlink backup is okay because `dirstate` is always
2742 # hardlink backup is okay because `dirstate` is always
2743 # atomically written and possible data file are append only
2743 # atomically written and possible data file are append only
2744 # and resistant to trailing data.
2744 # and resistant to trailing data.
2745 tr.addbackup(f, hardlink=True, location=b'plain')
2745 tr.addbackup(f, hardlink=True, location=b'plain')
2746
2746
2747 tr.addvalidator(b'dirstate-backup', backup_dirstate)
2747 tr.addvalidator(b'dirstate-backup', backup_dirstate)
2748 return tr
2748 return tr
2749
2749
2750 def _journalfiles(self):
2750 def _journalfiles(self):
2751 return (
2751 return (
2752 (self.svfs, b'journal'),
2752 (self.svfs, b'journal'),
2753 (self.vfs, b'journal.desc'),
2753 (self.vfs, b'journal.desc'),
2754 )
2754 )
2755
2755
2756 def undofiles(self):
2756 def undofiles(self):
2757 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2757 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2758
2758
2759 @unfilteredmethod
2759 @unfilteredmethod
2760 def _writejournal(self, desc):
2760 def _writejournal(self, desc):
2761 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2761 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2762
2762
2763 def recover(self):
2763 def recover(self):
2764 with self.lock():
2764 with self.lock():
2765 if self.svfs.exists(b"journal"):
2765 if self.svfs.exists(b"journal"):
2766 self.ui.status(_(b"rolling back interrupted transaction\n"))
2766 self.ui.status(_(b"rolling back interrupted transaction\n"))
2767 vfsmap = self.vfs_map
2767 vfsmap = self.vfs_map
2768 transaction.rollback(
2768 transaction.rollback(
2769 self.svfs,
2769 self.svfs,
2770 vfsmap,
2770 vfsmap,
2771 b"journal",
2771 b"journal",
2772 self.ui.warn,
2772 self.ui.warn,
2773 checkambigfiles=_cachedfiles,
2773 checkambigfiles=_cachedfiles,
2774 )
2774 )
2775 self.invalidate()
2775 self.invalidate()
2776 return True
2776 return True
2777 else:
2777 else:
2778 self.ui.warn(_(b"no interrupted transaction available\n"))
2778 self.ui.warn(_(b"no interrupted transaction available\n"))
2779 return False
2779 return False
2780
2780
2781 def rollback(self, dryrun=False, force=False):
2781 def rollback(self, dryrun=False, force=False):
2782 wlock = lock = None
2782 wlock = lock = None
2783 try:
2783 try:
2784 wlock = self.wlock()
2784 wlock = self.wlock()
2785 lock = self.lock()
2785 lock = self.lock()
2786 if self.svfs.exists(b"undo"):
2786 if self.svfs.exists(b"undo"):
2787 return self._rollback(dryrun, force)
2787 return self._rollback(dryrun, force)
2788 else:
2788 else:
2789 self.ui.warn(_(b"no rollback information available\n"))
2789 self.ui.warn(_(b"no rollback information available\n"))
2790 return 1
2790 return 1
2791 finally:
2791 finally:
2792 release(lock, wlock)
2792 release(lock, wlock)
2793
2793
2794 @unfilteredmethod # Until we get smarter cache management
2794 @unfilteredmethod # Until we get smarter cache management
2795 def _rollback(self, dryrun, force):
2795 def _rollback(self, dryrun, force):
2796 ui = self.ui
2796 ui = self.ui
2797
2797
2798 parents = self.dirstate.parents()
2798 parents = self.dirstate.parents()
2799 try:
2799 try:
2800 args = self.vfs.read(b'undo.desc').splitlines()
2800 args = self.vfs.read(b'undo.desc').splitlines()
2801 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2801 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2802 if len(args) >= 3:
2802 if len(args) >= 3:
2803 detail = args[2]
2803 detail = args[2]
2804 oldtip = oldlen - 1
2804 oldtip = oldlen - 1
2805
2805
2806 if detail and ui.verbose:
2806 if detail and ui.verbose:
2807 msg = _(
2807 msg = _(
2808 b'repository tip rolled back to revision %d'
2808 b'repository tip rolled back to revision %d'
2809 b' (undo %s: %s)\n'
2809 b' (undo %s: %s)\n'
2810 ) % (oldtip, desc, detail)
2810 ) % (oldtip, desc, detail)
2811 else:
2811 else:
2812 msg = _(
2812 msg = _(
2813 b'repository tip rolled back to revision %d (undo %s)\n'
2813 b'repository tip rolled back to revision %d (undo %s)\n'
2814 ) % (oldtip, desc)
2814 ) % (oldtip, desc)
2815 parentgone = any(self[p].rev() > oldtip for p in parents)
2815 parentgone = any(self[p].rev() > oldtip for p in parents)
2816 except IOError:
2816 except IOError:
2817 msg = _(b'rolling back unknown transaction\n')
2817 msg = _(b'rolling back unknown transaction\n')
2818 desc = None
2818 desc = None
2819 parentgone = True
2819 parentgone = True
2820
2820
2821 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2821 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2822 raise error.Abort(
2822 raise error.Abort(
2823 _(
2823 _(
2824 b'rollback of last commit while not checked out '
2824 b'rollback of last commit while not checked out '
2825 b'may lose data'
2825 b'may lose data'
2826 ),
2826 ),
2827 hint=_(b'use -f to force'),
2827 hint=_(b'use -f to force'),
2828 )
2828 )
2829
2829
2830 ui.status(msg)
2830 ui.status(msg)
2831 if dryrun:
2831 if dryrun:
2832 return 0
2832 return 0
2833
2833
2834 self.destroying()
2834 self.destroying()
2835 vfsmap = self.vfs_map
2835 vfsmap = self.vfs_map
2836 skip_journal_pattern = None
2836 skip_journal_pattern = None
2837 if not parentgone:
2837 if not parentgone:
2838 skip_journal_pattern = RE_SKIP_DIRSTATE_ROLLBACK
2838 skip_journal_pattern = RE_SKIP_DIRSTATE_ROLLBACK
2839 transaction.rollback(
2839 transaction.rollback(
2840 self.svfs,
2840 self.svfs,
2841 vfsmap,
2841 vfsmap,
2842 b'undo',
2842 b'undo',
2843 ui.warn,
2843 ui.warn,
2844 checkambigfiles=_cachedfiles,
2844 checkambigfiles=_cachedfiles,
2845 skip_journal_pattern=skip_journal_pattern,
2845 skip_journal_pattern=skip_journal_pattern,
2846 )
2846 )
2847 self.invalidate()
2847 self.invalidate()
2848 self.dirstate.invalidate()
2848 self.dirstate.invalidate()
2849
2849
2850 if parentgone:
2850 if parentgone:
2851 # replace this with some explicit parent update in the future.
2851 # replace this with some explicit parent update in the future.
2852 has_node = self.changelog.index.has_node
2852 has_node = self.changelog.index.has_node
2853 if not all(has_node(p) for p in self.dirstate._pl):
2853 if not all(has_node(p) for p in self.dirstate._pl):
2854 # There was no dirstate to backup initially, we need to drop
2854 # There was no dirstate to backup initially, we need to drop
2855 # the existing one.
2855 # the existing one.
2856 with self.dirstate.changing_parents(self):
2856 with self.dirstate.changing_parents(self):
2857 self.dirstate.setparents(self.nullid)
2857 self.dirstate.setparents(self.nullid)
2858 self.dirstate.clear()
2858 self.dirstate.clear()
2859
2859
2860 parents = tuple([p.rev() for p in self[None].parents()])
2860 parents = tuple([p.rev() for p in self[None].parents()])
2861 if len(parents) > 1:
2861 if len(parents) > 1:
2862 ui.status(
2862 ui.status(
2863 _(
2863 _(
2864 b'working directory now based on '
2864 b'working directory now based on '
2865 b'revisions %d and %d\n'
2865 b'revisions %d and %d\n'
2866 )
2866 )
2867 % parents
2867 % parents
2868 )
2868 )
2869 else:
2869 else:
2870 ui.status(
2870 ui.status(
2871 _(b'working directory now based on revision %d\n') % parents
2871 _(b'working directory now based on revision %d\n') % parents
2872 )
2872 )
2873 mergestatemod.mergestate.clean(self)
2873 mergestatemod.mergestate.clean(self)
2874
2874
2875 # TODO: if we know which new heads may result from this rollback, pass
2875 # TODO: if we know which new heads may result from this rollback, pass
2876 # them to destroy(), which will prevent the branchhead cache from being
2876 # them to destroy(), which will prevent the branchhead cache from being
2877 # invalidated.
2877 # invalidated.
2878 self.destroyed()
2878 self.destroyed()
2879 return 0
2879 return 0
2880
2880
2881 def _buildcacheupdater(self, newtransaction):
2881 def _buildcacheupdater(self, newtransaction):
2882 """called during transaction to build the callback updating cache
2882 """called during transaction to build the callback updating cache
2883
2883
2884 Lives on the repository to help extension who might want to augment
2884 Lives on the repository to help extension who might want to augment
2885 this logic. For this purpose, the created transaction is passed to the
2885 this logic. For this purpose, the created transaction is passed to the
2886 method.
2886 method.
2887 """
2887 """
2888 # we must avoid cyclic reference between repo and transaction.
2888 # we must avoid cyclic reference between repo and transaction.
2889 reporef = weakref.ref(self)
2889 reporef = weakref.ref(self)
2890
2890
2891 def updater(tr):
2891 def updater(tr):
2892 repo = reporef()
2892 repo = reporef()
2893 assert repo is not None # help pytype
2893 assert repo is not None # help pytype
2894 repo.updatecaches(tr)
2894 repo.updatecaches(tr)
2895
2895
2896 return updater
2896 return updater
2897
2897
2898 @unfilteredmethod
2898 @unfilteredmethod
2899 def updatecaches(self, tr=None, full=False, caches=None):
2899 def updatecaches(self, tr=None, full=False, caches=None):
2900 """warm appropriate caches
2900 """warm appropriate caches
2901
2901
2902 If this function is called after a transaction closed. The transaction
2902 If this function is called after a transaction closed. The transaction
2903 will be available in the 'tr' argument. This can be used to selectively
2903 will be available in the 'tr' argument. This can be used to selectively
2904 update caches relevant to the changes in that transaction.
2904 update caches relevant to the changes in that transaction.
2905
2905
2906 If 'full' is set, make sure all caches the function knows about have
2906 If 'full' is set, make sure all caches the function knows about have
2907 up-to-date data. Even the ones usually loaded more lazily.
2907 up-to-date data. Even the ones usually loaded more lazily.
2908
2908
2909 The `full` argument can take a special "post-clone" value. In this case
2909 The `full` argument can take a special "post-clone" value. In this case
2910 the cache warming is made after a clone and of the slower cache might
2910 the cache warming is made after a clone and of the slower cache might
2911 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2911 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2912 as we plan for a cleaner way to deal with this for 5.9.
2912 as we plan for a cleaner way to deal with this for 5.9.
2913 """
2913 """
2914 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2914 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2915 # During strip, many caches are invalid but
2915 # During strip, many caches are invalid but
2916 # later call to `destroyed` will refresh them.
2916 # later call to `destroyed` will refresh them.
2917 return
2917 return
2918
2918
2919 unfi = self.unfiltered()
2919 unfi = self.unfiltered()
2920
2920
2921 if caches is None:
2921 if caches is None:
2922 caches = repository.CACHES_DEFAULT
2922 caches = repository.CACHES_DEFAULT
2923
2923
2924 if repository.CACHE_BRANCHMAP_SERVED in caches:
2924 if repository.CACHE_BRANCHMAP_SERVED in caches:
2925 if tr is None or tr.changes[b'origrepolen'] < len(self):
2925 if tr is None or tr.changes[b'origrepolen'] < len(self):
2926 # accessing the 'served' branchmap should refresh all the others,
2926 # accessing the 'served' branchmap should refresh all the others,
2927 self.ui.debug(b'updating the branch cache\n')
2927 self.ui.debug(b'updating the branch cache\n')
2928 self.filtered(b'served').branchmap()
2928 self.filtered(b'served').branchmap()
2929 self.filtered(b'served.hidden').branchmap()
2929 self.filtered(b'served.hidden').branchmap()
2930 # flush all possibly delayed write.
2931 self._branchcaches.write_delayed(self)
2932
2930
2933 if repository.CACHE_CHANGELOG_CACHE in caches:
2931 if repository.CACHE_CHANGELOG_CACHE in caches:
2934 self.changelog.update_caches(transaction=tr)
2932 self.changelog.update_caches(transaction=tr)
2935
2933
2936 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2934 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2937 self.manifestlog.update_caches(transaction=tr)
2935 self.manifestlog.update_caches(transaction=tr)
2938 for entry in self.store.walk():
2936 for entry in self.store.walk():
2939 if not entry.is_revlog:
2937 if not entry.is_revlog:
2940 continue
2938 continue
2941 if not entry.is_manifestlog:
2939 if not entry.is_manifestlog:
2942 continue
2940 continue
2943 manifestrevlog = entry.get_revlog_instance(self).get_revlog()
2941 manifestrevlog = entry.get_revlog_instance(self).get_revlog()
2944 if manifestrevlog is not None:
2942 if manifestrevlog is not None:
2945 manifestrevlog.update_caches(transaction=tr)
2943 manifestrevlog.update_caches(transaction=tr)
2946
2944
2947 if repository.CACHE_REV_BRANCH in caches:
2945 if repository.CACHE_REV_BRANCH in caches:
2948 rbc = unfi.revbranchcache()
2946 rbc = unfi.revbranchcache()
2949 for r in unfi.changelog:
2947 for r in unfi.changelog:
2950 rbc.branchinfo(r)
2948 rbc.branchinfo(r)
2951 rbc.write()
2949 rbc.write()
2952
2950
2953 if repository.CACHE_FULL_MANIFEST in caches:
2951 if repository.CACHE_FULL_MANIFEST in caches:
2954 # ensure the working copy parents are in the manifestfulltextcache
2952 # ensure the working copy parents are in the manifestfulltextcache
2955 for ctx in self[b'.'].parents():
2953 for ctx in self[b'.'].parents():
2956 ctx.manifest() # accessing the manifest is enough
2954 ctx.manifest() # accessing the manifest is enough
2957
2955
2958 if repository.CACHE_FILE_NODE_TAGS in caches:
2956 if repository.CACHE_FILE_NODE_TAGS in caches:
2959 # accessing fnode cache warms the cache
2957 # accessing fnode cache warms the cache
2960 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2958 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2961
2959
2962 if repository.CACHE_TAGS_DEFAULT in caches:
2960 if repository.CACHE_TAGS_DEFAULT in caches:
2963 # accessing tags warm the cache
2961 # accessing tags warm the cache
2964 self.tags()
2962 self.tags()
2965 if repository.CACHE_TAGS_SERVED in caches:
2963 if repository.CACHE_TAGS_SERVED in caches:
2966 self.filtered(b'served').tags()
2964 self.filtered(b'served').tags()
2967
2965
2968 if repository.CACHE_BRANCHMAP_ALL in caches:
2966 if repository.CACHE_BRANCHMAP_ALL in caches:
2969 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2967 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2970 # so we're forcing a write to cause these caches to be warmed up
2968 # so we're forcing a write to cause these caches to be warmed up
2971 # even if they haven't explicitly been requested yet (if they've
2969 # even if they haven't explicitly been requested yet (if they've
2972 # never been used by hg, they won't ever have been written, even if
2970 # never been used by hg, they won't ever have been written, even if
2973 # they're a subset of another kind of cache that *has* been used).
2971 # they're a subset of another kind of cache that *has* been used).
2974 for filt in repoview.filtertable.keys():
2972 for filt in repoview.filtertable.keys():
2975 filtered = self.filtered(filt)
2973 filtered = self.filtered(filt)
2976 self._branchcaches.update_disk(filtered)
2974 self._branchcaches.update_disk(filtered)
2977
2975
2976 # flush all possibly delayed write.
2977 self._branchcaches.write_delayed(self)
2978
2978 def invalidatecaches(self):
2979 def invalidatecaches(self):
2979 if '_tagscache' in vars(self):
2980 if '_tagscache' in vars(self):
2980 # can't use delattr on proxy
2981 # can't use delattr on proxy
2981 del self.__dict__['_tagscache']
2982 del self.__dict__['_tagscache']
2982
2983
2983 self._branchcaches.clear()
2984 self._branchcaches.clear()
2984 self.invalidatevolatilesets()
2985 self.invalidatevolatilesets()
2985 self._sparsesignaturecache.clear()
2986 self._sparsesignaturecache.clear()
2986
2987
2987 def invalidatevolatilesets(self):
2988 def invalidatevolatilesets(self):
2988 self.filteredrevcache.clear()
2989 self.filteredrevcache.clear()
2989 obsolete.clearobscaches(self)
2990 obsolete.clearobscaches(self)
2990 self._quick_access_changeid_invalidate()
2991 self._quick_access_changeid_invalidate()
2991
2992
2992 def invalidatedirstate(self):
2993 def invalidatedirstate(self):
2993 """Invalidates the dirstate, causing the next call to dirstate
2994 """Invalidates the dirstate, causing the next call to dirstate
2994 to check if it was modified since the last time it was read,
2995 to check if it was modified since the last time it was read,
2995 rereading it if it has.
2996 rereading it if it has.
2996
2997
2997 This is different to dirstate.invalidate() that it doesn't always
2998 This is different to dirstate.invalidate() that it doesn't always
2998 rereads the dirstate. Use dirstate.invalidate() if you want to
2999 rereads the dirstate. Use dirstate.invalidate() if you want to
2999 explicitly read the dirstate again (i.e. restoring it to a previous
3000 explicitly read the dirstate again (i.e. restoring it to a previous
3000 known good state)."""
3001 known good state)."""
3001 unfi = self.unfiltered()
3002 unfi = self.unfiltered()
3002 if 'dirstate' in unfi.__dict__:
3003 if 'dirstate' in unfi.__dict__:
3003 assert not self.dirstate.is_changing_any
3004 assert not self.dirstate.is_changing_any
3004 del unfi.__dict__['dirstate']
3005 del unfi.__dict__['dirstate']
3005
3006
3006 def invalidate(self, clearfilecache=False):
3007 def invalidate(self, clearfilecache=False):
3007 """Invalidates both store and non-store parts other than dirstate
3008 """Invalidates both store and non-store parts other than dirstate
3008
3009
3009 If a transaction is running, invalidation of store is omitted,
3010 If a transaction is running, invalidation of store is omitted,
3010 because discarding in-memory changes might cause inconsistency
3011 because discarding in-memory changes might cause inconsistency
3011 (e.g. incomplete fncache causes unintentional failure, but
3012 (e.g. incomplete fncache causes unintentional failure, but
3012 redundant one doesn't).
3013 redundant one doesn't).
3013 """
3014 """
3014 unfiltered = self.unfiltered() # all file caches are stored unfiltered
3015 unfiltered = self.unfiltered() # all file caches are stored unfiltered
3015 for k in list(self._filecache.keys()):
3016 for k in list(self._filecache.keys()):
3016 if (
3017 if (
3017 k == b'changelog'
3018 k == b'changelog'
3018 and self.currenttransaction()
3019 and self.currenttransaction()
3019 and self.changelog.is_delaying
3020 and self.changelog.is_delaying
3020 ):
3021 ):
3021 # The changelog object may store unwritten revisions. We don't
3022 # The changelog object may store unwritten revisions. We don't
3022 # want to lose them.
3023 # want to lose them.
3023 # TODO: Solve the problem instead of working around it.
3024 # TODO: Solve the problem instead of working around it.
3024 continue
3025 continue
3025
3026
3026 if clearfilecache:
3027 if clearfilecache:
3027 del self._filecache[k]
3028 del self._filecache[k]
3028 try:
3029 try:
3029 # XXX ideally, the key would be a unicode string to match the
3030 # XXX ideally, the key would be a unicode string to match the
3030 # fact it refers to an attribut name. However changing this was
3031 # fact it refers to an attribut name. However changing this was
3031 # a bit a scope creep compared to the series cleaning up
3032 # a bit a scope creep compared to the series cleaning up
3032 # del/set/getattr so we kept thing simple here.
3033 # del/set/getattr so we kept thing simple here.
3033 delattr(unfiltered, pycompat.sysstr(k))
3034 delattr(unfiltered, pycompat.sysstr(k))
3034 except AttributeError:
3035 except AttributeError:
3035 pass
3036 pass
3036 self.invalidatecaches()
3037 self.invalidatecaches()
3037 if not self.currenttransaction():
3038 if not self.currenttransaction():
3038 # TODO: Changing contents of store outside transaction
3039 # TODO: Changing contents of store outside transaction
3039 # causes inconsistency. We should make in-memory store
3040 # causes inconsistency. We should make in-memory store
3040 # changes detectable, and abort if changed.
3041 # changes detectable, and abort if changed.
3041 self.store.invalidatecaches()
3042 self.store.invalidatecaches()
3042
3043
3043 def invalidateall(self):
3044 def invalidateall(self):
3044 """Fully invalidates both store and non-store parts, causing the
3045 """Fully invalidates both store and non-store parts, causing the
3045 subsequent operation to reread any outside changes."""
3046 subsequent operation to reread any outside changes."""
3046 # extension should hook this to invalidate its caches
3047 # extension should hook this to invalidate its caches
3047 self.invalidate()
3048 self.invalidate()
3048 self.invalidatedirstate()
3049 self.invalidatedirstate()
3049
3050
3050 @unfilteredmethod
3051 @unfilteredmethod
3051 def _refreshfilecachestats(self, tr):
3052 def _refreshfilecachestats(self, tr):
3052 """Reload stats of cached files so that they are flagged as valid"""
3053 """Reload stats of cached files so that they are flagged as valid"""
3053 for k, ce in self._filecache.items():
3054 for k, ce in self._filecache.items():
3054 k = pycompat.sysstr(k)
3055 k = pycompat.sysstr(k)
3055 if k == 'dirstate' or k not in self.__dict__:
3056 if k == 'dirstate' or k not in self.__dict__:
3056 continue
3057 continue
3057 ce.refresh()
3058 ce.refresh()
3058
3059
3059 def _lock(
3060 def _lock(
3060 self,
3061 self,
3061 vfs,
3062 vfs,
3062 lockname,
3063 lockname,
3063 wait,
3064 wait,
3064 releasefn,
3065 releasefn,
3065 acquirefn,
3066 acquirefn,
3066 desc,
3067 desc,
3067 ):
3068 ):
3068 timeout = 0
3069 timeout = 0
3069 warntimeout = 0
3070 warntimeout = 0
3070 if wait:
3071 if wait:
3071 timeout = self.ui.configint(b"ui", b"timeout")
3072 timeout = self.ui.configint(b"ui", b"timeout")
3072 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
3073 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
3073 # internal config: ui.signal-safe-lock
3074 # internal config: ui.signal-safe-lock
3074 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
3075 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
3075
3076
3076 l = lockmod.trylock(
3077 l = lockmod.trylock(
3077 self.ui,
3078 self.ui,
3078 vfs,
3079 vfs,
3079 lockname,
3080 lockname,
3080 timeout,
3081 timeout,
3081 warntimeout,
3082 warntimeout,
3082 releasefn=releasefn,
3083 releasefn=releasefn,
3083 acquirefn=acquirefn,
3084 acquirefn=acquirefn,
3084 desc=desc,
3085 desc=desc,
3085 signalsafe=signalsafe,
3086 signalsafe=signalsafe,
3086 )
3087 )
3087 return l
3088 return l
3088
3089
3089 def _afterlock(self, callback):
3090 def _afterlock(self, callback):
3090 """add a callback to be run when the repository is fully unlocked
3091 """add a callback to be run when the repository is fully unlocked
3091
3092
3092 The callback will be executed when the outermost lock is released
3093 The callback will be executed when the outermost lock is released
3093 (with wlock being higher level than 'lock')."""
3094 (with wlock being higher level than 'lock')."""
3094 for ref in (self._wlockref, self._lockref):
3095 for ref in (self._wlockref, self._lockref):
3095 l = ref and ref()
3096 l = ref and ref()
3096 if l and l.held:
3097 if l and l.held:
3097 l.postrelease.append(callback)
3098 l.postrelease.append(callback)
3098 break
3099 break
3099 else: # no lock have been found.
3100 else: # no lock have been found.
3100 callback(True)
3101 callback(True)
3101
3102
3102 def lock(self, wait=True):
3103 def lock(self, wait=True):
3103 """Lock the repository store (.hg/store) and return a weak reference
3104 """Lock the repository store (.hg/store) and return a weak reference
3104 to the lock. Use this before modifying the store (e.g. committing or
3105 to the lock. Use this before modifying the store (e.g. committing or
3105 stripping). If you are opening a transaction, get a lock as well.)
3106 stripping). If you are opening a transaction, get a lock as well.)
3106
3107
3107 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3108 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3108 'wlock' first to avoid a dead-lock hazard."""
3109 'wlock' first to avoid a dead-lock hazard."""
3109 l = self._currentlock(self._lockref)
3110 l = self._currentlock(self._lockref)
3110 if l is not None:
3111 if l is not None:
3111 l.lock()
3112 l.lock()
3112 return l
3113 return l
3113
3114
3114 l = self._lock(
3115 l = self._lock(
3115 vfs=self.svfs,
3116 vfs=self.svfs,
3116 lockname=b"lock",
3117 lockname=b"lock",
3117 wait=wait,
3118 wait=wait,
3118 releasefn=None,
3119 releasefn=None,
3119 acquirefn=self.invalidate,
3120 acquirefn=self.invalidate,
3120 desc=_(b'repository %s') % self.origroot,
3121 desc=_(b'repository %s') % self.origroot,
3121 )
3122 )
3122 self._lockref = weakref.ref(l)
3123 self._lockref = weakref.ref(l)
3123 return l
3124 return l
3124
3125
3125 def wlock(self, wait=True):
3126 def wlock(self, wait=True):
3126 """Lock the non-store parts of the repository (everything under
3127 """Lock the non-store parts of the repository (everything under
3127 .hg except .hg/store) and return a weak reference to the lock.
3128 .hg except .hg/store) and return a weak reference to the lock.
3128
3129
3129 Use this before modifying files in .hg.
3130 Use this before modifying files in .hg.
3130
3131
3131 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3132 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3132 'wlock' first to avoid a dead-lock hazard."""
3133 'wlock' first to avoid a dead-lock hazard."""
3133 l = self._wlockref() if self._wlockref else None
3134 l = self._wlockref() if self._wlockref else None
3134 if l is not None and l.held:
3135 if l is not None and l.held:
3135 l.lock()
3136 l.lock()
3136 return l
3137 return l
3137
3138
3138 # We do not need to check for non-waiting lock acquisition. Such
3139 # We do not need to check for non-waiting lock acquisition. Such
3139 # acquisition would not cause dead-lock as they would just fail.
3140 # acquisition would not cause dead-lock as they would just fail.
3140 if wait and (
3141 if wait and (
3141 self.ui.configbool(b'devel', b'all-warnings')
3142 self.ui.configbool(b'devel', b'all-warnings')
3142 or self.ui.configbool(b'devel', b'check-locks')
3143 or self.ui.configbool(b'devel', b'check-locks')
3143 ):
3144 ):
3144 if self._currentlock(self._lockref) is not None:
3145 if self._currentlock(self._lockref) is not None:
3145 self.ui.develwarn(b'"wlock" acquired after "lock"')
3146 self.ui.develwarn(b'"wlock" acquired after "lock"')
3146
3147
3147 def unlock():
3148 def unlock():
3148 if self.dirstate.is_changing_any:
3149 if self.dirstate.is_changing_any:
3149 msg = b"wlock release in the middle of a changing parents"
3150 msg = b"wlock release in the middle of a changing parents"
3150 self.ui.develwarn(msg)
3151 self.ui.develwarn(msg)
3151 self.dirstate.invalidate()
3152 self.dirstate.invalidate()
3152 else:
3153 else:
3153 if self.dirstate._dirty:
3154 if self.dirstate._dirty:
3154 msg = b"dirty dirstate on wlock release"
3155 msg = b"dirty dirstate on wlock release"
3155 self.ui.develwarn(msg)
3156 self.ui.develwarn(msg)
3156 self.dirstate.write(None)
3157 self.dirstate.write(None)
3157
3158
3158 unfi = self.unfiltered()
3159 unfi = self.unfiltered()
3159 if 'dirstate' in unfi.__dict__:
3160 if 'dirstate' in unfi.__dict__:
3160 del unfi.__dict__['dirstate']
3161 del unfi.__dict__['dirstate']
3161
3162
3162 l = self._lock(
3163 l = self._lock(
3163 self.vfs,
3164 self.vfs,
3164 b"wlock",
3165 b"wlock",
3165 wait,
3166 wait,
3166 unlock,
3167 unlock,
3167 self.invalidatedirstate,
3168 self.invalidatedirstate,
3168 _(b'working directory of %s') % self.origroot,
3169 _(b'working directory of %s') % self.origroot,
3169 )
3170 )
3170 self._wlockref = weakref.ref(l)
3171 self._wlockref = weakref.ref(l)
3171 return l
3172 return l
3172
3173
3173 def _currentlock(self, lockref):
3174 def _currentlock(self, lockref):
3174 """Returns the lock if it's held, or None if it's not."""
3175 """Returns the lock if it's held, or None if it's not."""
3175 if lockref is None:
3176 if lockref is None:
3176 return None
3177 return None
3177 l = lockref()
3178 l = lockref()
3178 if l is None or not l.held:
3179 if l is None or not l.held:
3179 return None
3180 return None
3180 return l
3181 return l
3181
3182
3182 def currentwlock(self):
3183 def currentwlock(self):
3183 """Returns the wlock if it's held, or None if it's not."""
3184 """Returns the wlock if it's held, or None if it's not."""
3184 return self._currentlock(self._wlockref)
3185 return self._currentlock(self._wlockref)
3185
3186
3186 def currentlock(self):
3187 def currentlock(self):
3187 """Returns the lock if it's held, or None if it's not."""
3188 """Returns the lock if it's held, or None if it's not."""
3188 return self._currentlock(self._lockref)
3189 return self._currentlock(self._lockref)
3189
3190
3190 def checkcommitpatterns(self, wctx, match, status, fail):
3191 def checkcommitpatterns(self, wctx, match, status, fail):
3191 """check for commit arguments that aren't committable"""
3192 """check for commit arguments that aren't committable"""
3192 if match.isexact() or match.prefix():
3193 if match.isexact() or match.prefix():
3193 matched = set(status.modified + status.added + status.removed)
3194 matched = set(status.modified + status.added + status.removed)
3194
3195
3195 for f in match.files():
3196 for f in match.files():
3196 f = self.dirstate.normalize(f)
3197 f = self.dirstate.normalize(f)
3197 if f == b'.' or f in matched or f in wctx.substate:
3198 if f == b'.' or f in matched or f in wctx.substate:
3198 continue
3199 continue
3199 if f in status.deleted:
3200 if f in status.deleted:
3200 fail(f, _(b'file not found!'))
3201 fail(f, _(b'file not found!'))
3201 # Is it a directory that exists or used to exist?
3202 # Is it a directory that exists or used to exist?
3202 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3203 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3203 d = f + b'/'
3204 d = f + b'/'
3204 for mf in matched:
3205 for mf in matched:
3205 if mf.startswith(d):
3206 if mf.startswith(d):
3206 break
3207 break
3207 else:
3208 else:
3208 fail(f, _(b"no match under directory!"))
3209 fail(f, _(b"no match under directory!"))
3209 elif f not in self.dirstate:
3210 elif f not in self.dirstate:
3210 fail(f, _(b"file not tracked!"))
3211 fail(f, _(b"file not tracked!"))
3211
3212
3212 @unfilteredmethod
3213 @unfilteredmethod
3213 def commit(
3214 def commit(
3214 self,
3215 self,
3215 text=b"",
3216 text=b"",
3216 user=None,
3217 user=None,
3217 date=None,
3218 date=None,
3218 match=None,
3219 match=None,
3219 force=False,
3220 force=False,
3220 editor=None,
3221 editor=None,
3221 extra=None,
3222 extra=None,
3222 ):
3223 ):
3223 """Add a new revision to current repository.
3224 """Add a new revision to current repository.
3224
3225
3225 Revision information is gathered from the working directory,
3226 Revision information is gathered from the working directory,
3226 match can be used to filter the committed files. If editor is
3227 match can be used to filter the committed files. If editor is
3227 supplied, it is called to get a commit message.
3228 supplied, it is called to get a commit message.
3228 """
3229 """
3229 if extra is None:
3230 if extra is None:
3230 extra = {}
3231 extra = {}
3231
3232
3232 def fail(f, msg):
3233 def fail(f, msg):
3233 raise error.InputError(b'%s: %s' % (f, msg))
3234 raise error.InputError(b'%s: %s' % (f, msg))
3234
3235
3235 if not match:
3236 if not match:
3236 match = matchmod.always()
3237 match = matchmod.always()
3237
3238
3238 if not force:
3239 if not force:
3239 match.bad = fail
3240 match.bad = fail
3240
3241
3241 # lock() for recent changelog (see issue4368)
3242 # lock() for recent changelog (see issue4368)
3242 with self.wlock(), self.lock():
3243 with self.wlock(), self.lock():
3243 wctx = self[None]
3244 wctx = self[None]
3244 merge = len(wctx.parents()) > 1
3245 merge = len(wctx.parents()) > 1
3245
3246
3246 if not force and merge and not match.always():
3247 if not force and merge and not match.always():
3247 raise error.Abort(
3248 raise error.Abort(
3248 _(
3249 _(
3249 b'cannot partially commit a merge '
3250 b'cannot partially commit a merge '
3250 b'(do not specify files or patterns)'
3251 b'(do not specify files or patterns)'
3251 )
3252 )
3252 )
3253 )
3253
3254
3254 status = self.status(match=match, clean=force)
3255 status = self.status(match=match, clean=force)
3255 if force:
3256 if force:
3256 status.modified.extend(
3257 status.modified.extend(
3257 status.clean
3258 status.clean
3258 ) # mq may commit clean files
3259 ) # mq may commit clean files
3259
3260
3260 # check subrepos
3261 # check subrepos
3261 subs, commitsubs, newstate = subrepoutil.precommit(
3262 subs, commitsubs, newstate = subrepoutil.precommit(
3262 self.ui, wctx, status, match, force=force
3263 self.ui, wctx, status, match, force=force
3263 )
3264 )
3264
3265
3265 # make sure all explicit patterns are matched
3266 # make sure all explicit patterns are matched
3266 if not force:
3267 if not force:
3267 self.checkcommitpatterns(wctx, match, status, fail)
3268 self.checkcommitpatterns(wctx, match, status, fail)
3268
3269
3269 cctx = context.workingcommitctx(
3270 cctx = context.workingcommitctx(
3270 self, status, text, user, date, extra
3271 self, status, text, user, date, extra
3271 )
3272 )
3272
3273
3273 ms = mergestatemod.mergestate.read(self)
3274 ms = mergestatemod.mergestate.read(self)
3274 mergeutil.checkunresolved(ms)
3275 mergeutil.checkunresolved(ms)
3275
3276
3276 # internal config: ui.allowemptycommit
3277 # internal config: ui.allowemptycommit
3277 if cctx.isempty() and not self.ui.configbool(
3278 if cctx.isempty() and not self.ui.configbool(
3278 b'ui', b'allowemptycommit'
3279 b'ui', b'allowemptycommit'
3279 ):
3280 ):
3280 self.ui.debug(b'nothing to commit, clearing merge state\n')
3281 self.ui.debug(b'nothing to commit, clearing merge state\n')
3281 ms.reset()
3282 ms.reset()
3282 return None
3283 return None
3283
3284
3284 if merge and cctx.deleted():
3285 if merge and cctx.deleted():
3285 raise error.Abort(_(b"cannot commit merge with missing files"))
3286 raise error.Abort(_(b"cannot commit merge with missing files"))
3286
3287
3287 if editor:
3288 if editor:
3288 cctx._text = editor(self, cctx, subs)
3289 cctx._text = editor(self, cctx, subs)
3289 edited = text != cctx._text
3290 edited = text != cctx._text
3290
3291
3291 # Save commit message in case this transaction gets rolled back
3292 # Save commit message in case this transaction gets rolled back
3292 # (e.g. by a pretxncommit hook). Leave the content alone on
3293 # (e.g. by a pretxncommit hook). Leave the content alone on
3293 # the assumption that the user will use the same editor again.
3294 # the assumption that the user will use the same editor again.
3294 msg_path = self.savecommitmessage(cctx._text)
3295 msg_path = self.savecommitmessage(cctx._text)
3295
3296
3296 # commit subs and write new state
3297 # commit subs and write new state
3297 if subs:
3298 if subs:
3298 uipathfn = scmutil.getuipathfn(self)
3299 uipathfn = scmutil.getuipathfn(self)
3299 for s in sorted(commitsubs):
3300 for s in sorted(commitsubs):
3300 sub = wctx.sub(s)
3301 sub = wctx.sub(s)
3301 self.ui.status(
3302 self.ui.status(
3302 _(b'committing subrepository %s\n')
3303 _(b'committing subrepository %s\n')
3303 % uipathfn(subrepoutil.subrelpath(sub))
3304 % uipathfn(subrepoutil.subrelpath(sub))
3304 )
3305 )
3305 sr = sub.commit(cctx._text, user, date)
3306 sr = sub.commit(cctx._text, user, date)
3306 newstate[s] = (newstate[s][0], sr)
3307 newstate[s] = (newstate[s][0], sr)
3307 subrepoutil.writestate(self, newstate)
3308 subrepoutil.writestate(self, newstate)
3308
3309
3309 p1, p2 = self.dirstate.parents()
3310 p1, p2 = self.dirstate.parents()
3310 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3311 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3311 try:
3312 try:
3312 self.hook(
3313 self.hook(
3313 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3314 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3314 )
3315 )
3315 with self.transaction(b'commit'):
3316 with self.transaction(b'commit'):
3316 ret = self.commitctx(cctx, True)
3317 ret = self.commitctx(cctx, True)
3317 # update bookmarks, dirstate and mergestate
3318 # update bookmarks, dirstate and mergestate
3318 bookmarks.update(self, [p1, p2], ret)
3319 bookmarks.update(self, [p1, p2], ret)
3319 cctx.markcommitted(ret)
3320 cctx.markcommitted(ret)
3320 ms.reset()
3321 ms.reset()
3321 except: # re-raises
3322 except: # re-raises
3322 if edited:
3323 if edited:
3323 self.ui.write(
3324 self.ui.write(
3324 _(b'note: commit message saved in %s\n') % msg_path
3325 _(b'note: commit message saved in %s\n') % msg_path
3325 )
3326 )
3326 self.ui.write(
3327 self.ui.write(
3327 _(
3328 _(
3328 b"note: use 'hg commit --logfile "
3329 b"note: use 'hg commit --logfile "
3329 b"%s --edit' to reuse it\n"
3330 b"%s --edit' to reuse it\n"
3330 )
3331 )
3331 % msg_path
3332 % msg_path
3332 )
3333 )
3333 raise
3334 raise
3334
3335
3335 def commithook(unused_success):
3336 def commithook(unused_success):
3336 # hack for command that use a temporary commit (eg: histedit)
3337 # hack for command that use a temporary commit (eg: histedit)
3337 # temporary commit got stripped before hook release
3338 # temporary commit got stripped before hook release
3338 if self.changelog.hasnode(ret):
3339 if self.changelog.hasnode(ret):
3339 self.hook(
3340 self.hook(
3340 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3341 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3341 )
3342 )
3342
3343
3343 self._afterlock(commithook)
3344 self._afterlock(commithook)
3344 return ret
3345 return ret
3345
3346
3346 @unfilteredmethod
3347 @unfilteredmethod
3347 def commitctx(self, ctx, error=False, origctx=None):
3348 def commitctx(self, ctx, error=False, origctx=None):
3348 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3349 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3349
3350
3350 @unfilteredmethod
3351 @unfilteredmethod
3351 def destroying(self):
3352 def destroying(self):
3352 """Inform the repository that nodes are about to be destroyed.
3353 """Inform the repository that nodes are about to be destroyed.
3353 Intended for use by strip and rollback, so there's a common
3354 Intended for use by strip and rollback, so there's a common
3354 place for anything that has to be done before destroying history.
3355 place for anything that has to be done before destroying history.
3355
3356
3356 This is mostly useful for saving state that is in memory and waiting
3357 This is mostly useful for saving state that is in memory and waiting
3357 to be flushed when the current lock is released. Because a call to
3358 to be flushed when the current lock is released. Because a call to
3358 destroyed is imminent, the repo will be invalidated causing those
3359 destroyed is imminent, the repo will be invalidated causing those
3359 changes to stay in memory (waiting for the next unlock), or vanish
3360 changes to stay in memory (waiting for the next unlock), or vanish
3360 completely.
3361 completely.
3361 """
3362 """
3362 # When using the same lock to commit and strip, the phasecache is left
3363 # When using the same lock to commit and strip, the phasecache is left
3363 # dirty after committing. Then when we strip, the repo is invalidated,
3364 # dirty after committing. Then when we strip, the repo is invalidated,
3364 # causing those changes to disappear.
3365 # causing those changes to disappear.
3365 if '_phasecache' in vars(self):
3366 if '_phasecache' in vars(self):
3366 self._phasecache.write(self)
3367 self._phasecache.write(self)
3367
3368
3368 @unfilteredmethod
3369 @unfilteredmethod
3369 def destroyed(self):
3370 def destroyed(self):
3370 """Inform the repository that nodes have been destroyed.
3371 """Inform the repository that nodes have been destroyed.
3371 Intended for use by strip and rollback, so there's a common
3372 Intended for use by strip and rollback, so there's a common
3372 place for anything that has to be done after destroying history.
3373 place for anything that has to be done after destroying history.
3373 """
3374 """
3374 # refresh all repository caches
3375 # refresh all repository caches
3375 self.updatecaches()
3376 self.updatecaches()
3376
3377
3377 # Ensure the persistent tag cache is updated. Doing it now
3378 # Ensure the persistent tag cache is updated. Doing it now
3378 # means that the tag cache only has to worry about destroyed
3379 # means that the tag cache only has to worry about destroyed
3379 # heads immediately after a strip/rollback. That in turn
3380 # heads immediately after a strip/rollback. That in turn
3380 # guarantees that "cachetip == currenttip" (comparing both rev
3381 # guarantees that "cachetip == currenttip" (comparing both rev
3381 # and node) always means no nodes have been added or destroyed.
3382 # and node) always means no nodes have been added or destroyed.
3382
3383
3383 # XXX this is suboptimal when qrefresh'ing: we strip the current
3384 # XXX this is suboptimal when qrefresh'ing: we strip the current
3384 # head, refresh the tag cache, then immediately add a new head.
3385 # head, refresh the tag cache, then immediately add a new head.
3385 # But I think doing it this way is necessary for the "instant
3386 # But I think doing it this way is necessary for the "instant
3386 # tag cache retrieval" case to work.
3387 # tag cache retrieval" case to work.
3387 self.invalidate()
3388 self.invalidate()
3388
3389
3389 def status(
3390 def status(
3390 self,
3391 self,
3391 node1=b'.',
3392 node1=b'.',
3392 node2=None,
3393 node2=None,
3393 match=None,
3394 match=None,
3394 ignored=False,
3395 ignored=False,
3395 clean=False,
3396 clean=False,
3396 unknown=False,
3397 unknown=False,
3397 listsubrepos=False,
3398 listsubrepos=False,
3398 ):
3399 ):
3399 '''a convenience method that calls node1.status(node2)'''
3400 '''a convenience method that calls node1.status(node2)'''
3400 return self[node1].status(
3401 return self[node1].status(
3401 node2, match, ignored, clean, unknown, listsubrepos
3402 node2, match, ignored, clean, unknown, listsubrepos
3402 )
3403 )
3403
3404
3404 def addpostdsstatus(self, ps):
3405 def addpostdsstatus(self, ps):
3405 """Add a callback to run within the wlock, at the point at which status
3406 """Add a callback to run within the wlock, at the point at which status
3406 fixups happen.
3407 fixups happen.
3407
3408
3408 On status completion, callback(wctx, status) will be called with the
3409 On status completion, callback(wctx, status) will be called with the
3409 wlock held, unless the dirstate has changed from underneath or the wlock
3410 wlock held, unless the dirstate has changed from underneath or the wlock
3410 couldn't be grabbed.
3411 couldn't be grabbed.
3411
3412
3412 Callbacks should not capture and use a cached copy of the dirstate --
3413 Callbacks should not capture and use a cached copy of the dirstate --
3413 it might change in the meanwhile. Instead, they should access the
3414 it might change in the meanwhile. Instead, they should access the
3414 dirstate via wctx.repo().dirstate.
3415 dirstate via wctx.repo().dirstate.
3415
3416
3416 This list is emptied out after each status run -- extensions should
3417 This list is emptied out after each status run -- extensions should
3417 make sure it adds to this list each time dirstate.status is called.
3418 make sure it adds to this list each time dirstate.status is called.
3418 Extensions should also make sure they don't call this for statuses
3419 Extensions should also make sure they don't call this for statuses
3419 that don't involve the dirstate.
3420 that don't involve the dirstate.
3420 """
3421 """
3421
3422
3422 # The list is located here for uniqueness reasons -- it is actually
3423 # The list is located here for uniqueness reasons -- it is actually
3423 # managed by the workingctx, but that isn't unique per-repo.
3424 # managed by the workingctx, but that isn't unique per-repo.
3424 self._postdsstatus.append(ps)
3425 self._postdsstatus.append(ps)
3425
3426
3426 def postdsstatus(self):
3427 def postdsstatus(self):
3427 """Used by workingctx to get the list of post-dirstate-status hooks."""
3428 """Used by workingctx to get the list of post-dirstate-status hooks."""
3428 return self._postdsstatus
3429 return self._postdsstatus
3429
3430
3430 def clearpostdsstatus(self):
3431 def clearpostdsstatus(self):
3431 """Used by workingctx to clear post-dirstate-status hooks."""
3432 """Used by workingctx to clear post-dirstate-status hooks."""
3432 del self._postdsstatus[:]
3433 del self._postdsstatus[:]
3433
3434
3434 def heads(self, start=None):
3435 def heads(self, start=None):
3435 if start is None:
3436 if start is None:
3436 cl = self.changelog
3437 cl = self.changelog
3437 headrevs = reversed(cl.headrevs())
3438 headrevs = reversed(cl.headrevs())
3438 return [cl.node(rev) for rev in headrevs]
3439 return [cl.node(rev) for rev in headrevs]
3439
3440
3440 heads = self.changelog.heads(start)
3441 heads = self.changelog.heads(start)
3441 # sort the output in rev descending order
3442 # sort the output in rev descending order
3442 return sorted(heads, key=self.changelog.rev, reverse=True)
3443 return sorted(heads, key=self.changelog.rev, reverse=True)
3443
3444
3444 def branchheads(self, branch=None, start=None, closed=False):
3445 def branchheads(self, branch=None, start=None, closed=False):
3445 """return a (possibly filtered) list of heads for the given branch
3446 """return a (possibly filtered) list of heads for the given branch
3446
3447
3447 Heads are returned in topological order, from newest to oldest.
3448 Heads are returned in topological order, from newest to oldest.
3448 If branch is None, use the dirstate branch.
3449 If branch is None, use the dirstate branch.
3449 If start is not None, return only heads reachable from start.
3450 If start is not None, return only heads reachable from start.
3450 If closed is True, return heads that are marked as closed as well.
3451 If closed is True, return heads that are marked as closed as well.
3451 """
3452 """
3452 if branch is None:
3453 if branch is None:
3453 branch = self[None].branch()
3454 branch = self[None].branch()
3454 branches = self.branchmap()
3455 branches = self.branchmap()
3455 if not branches.hasbranch(branch):
3456 if not branches.hasbranch(branch):
3456 return []
3457 return []
3457 # the cache returns heads ordered lowest to highest
3458 # the cache returns heads ordered lowest to highest
3458 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3459 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3459 if start is not None:
3460 if start is not None:
3460 # filter out the heads that cannot be reached from startrev
3461 # filter out the heads that cannot be reached from startrev
3461 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3462 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3462 bheads = [h for h in bheads if h in fbheads]
3463 bheads = [h for h in bheads if h in fbheads]
3463 return bheads
3464 return bheads
3464
3465
3465 def branches(self, nodes):
3466 def branches(self, nodes):
3466 if not nodes:
3467 if not nodes:
3467 nodes = [self.changelog.tip()]
3468 nodes = [self.changelog.tip()]
3468 b = []
3469 b = []
3469 for n in nodes:
3470 for n in nodes:
3470 t = n
3471 t = n
3471 while True:
3472 while True:
3472 p = self.changelog.parents(n)
3473 p = self.changelog.parents(n)
3473 if p[1] != self.nullid or p[0] == self.nullid:
3474 if p[1] != self.nullid or p[0] == self.nullid:
3474 b.append((t, n, p[0], p[1]))
3475 b.append((t, n, p[0], p[1]))
3475 break
3476 break
3476 n = p[0]
3477 n = p[0]
3477 return b
3478 return b
3478
3479
3479 def between(self, pairs):
3480 def between(self, pairs):
3480 r = []
3481 r = []
3481
3482
3482 for top, bottom in pairs:
3483 for top, bottom in pairs:
3483 n, l, i = top, [], 0
3484 n, l, i = top, [], 0
3484 f = 1
3485 f = 1
3485
3486
3486 while n != bottom and n != self.nullid:
3487 while n != bottom and n != self.nullid:
3487 p = self.changelog.parents(n)[0]
3488 p = self.changelog.parents(n)[0]
3488 if i == f:
3489 if i == f:
3489 l.append(n)
3490 l.append(n)
3490 f = f * 2
3491 f = f * 2
3491 n = p
3492 n = p
3492 i += 1
3493 i += 1
3493
3494
3494 r.append(l)
3495 r.append(l)
3495
3496
3496 return r
3497 return r
3497
3498
3498 def checkpush(self, pushop):
3499 def checkpush(self, pushop):
3499 """Extensions can override this function if additional checks have
3500 """Extensions can override this function if additional checks have
3500 to be performed before pushing, or call it if they override push
3501 to be performed before pushing, or call it if they override push
3501 command.
3502 command.
3502 """
3503 """
3503
3504
3504 @unfilteredpropertycache
3505 @unfilteredpropertycache
3505 def prepushoutgoinghooks(self):
3506 def prepushoutgoinghooks(self):
3506 """Return util.hooks consists of a pushop with repo, remote, outgoing
3507 """Return util.hooks consists of a pushop with repo, remote, outgoing
3507 methods, which are called before pushing changesets.
3508 methods, which are called before pushing changesets.
3508 """
3509 """
3509 return util.hooks()
3510 return util.hooks()
3510
3511
3511 def pushkey(self, namespace, key, old, new):
3512 def pushkey(self, namespace, key, old, new):
3512 try:
3513 try:
3513 tr = self.currenttransaction()
3514 tr = self.currenttransaction()
3514 hookargs = {}
3515 hookargs = {}
3515 if tr is not None:
3516 if tr is not None:
3516 hookargs.update(tr.hookargs)
3517 hookargs.update(tr.hookargs)
3517 hookargs = pycompat.strkwargs(hookargs)
3518 hookargs = pycompat.strkwargs(hookargs)
3518 hookargs['namespace'] = namespace
3519 hookargs['namespace'] = namespace
3519 hookargs['key'] = key
3520 hookargs['key'] = key
3520 hookargs['old'] = old
3521 hookargs['old'] = old
3521 hookargs['new'] = new
3522 hookargs['new'] = new
3522 self.hook(b'prepushkey', throw=True, **hookargs)
3523 self.hook(b'prepushkey', throw=True, **hookargs)
3523 except error.HookAbort as exc:
3524 except error.HookAbort as exc:
3524 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3525 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3525 if exc.hint:
3526 if exc.hint:
3526 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3527 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3527 return False
3528 return False
3528 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3529 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3529 ret = pushkey.push(self, namespace, key, old, new)
3530 ret = pushkey.push(self, namespace, key, old, new)
3530
3531
3531 def runhook(unused_success):
3532 def runhook(unused_success):
3532 self.hook(
3533 self.hook(
3533 b'pushkey',
3534 b'pushkey',
3534 namespace=namespace,
3535 namespace=namespace,
3535 key=key,
3536 key=key,
3536 old=old,
3537 old=old,
3537 new=new,
3538 new=new,
3538 ret=ret,
3539 ret=ret,
3539 )
3540 )
3540
3541
3541 self._afterlock(runhook)
3542 self._afterlock(runhook)
3542 return ret
3543 return ret
3543
3544
3544 def listkeys(self, namespace):
3545 def listkeys(self, namespace):
3545 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3546 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3546 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3547 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3547 values = pushkey.list(self, namespace)
3548 values = pushkey.list(self, namespace)
3548 self.hook(b'listkeys', namespace=namespace, values=values)
3549 self.hook(b'listkeys', namespace=namespace, values=values)
3549 return values
3550 return values
3550
3551
3551 def debugwireargs(self, one, two, three=None, four=None, five=None):
3552 def debugwireargs(self, one, two, three=None, four=None, five=None):
3552 '''used to test argument passing over the wire'''
3553 '''used to test argument passing over the wire'''
3553 return b"%s %s %s %s %s" % (
3554 return b"%s %s %s %s %s" % (
3554 one,
3555 one,
3555 two,
3556 two,
3556 pycompat.bytestr(three),
3557 pycompat.bytestr(three),
3557 pycompat.bytestr(four),
3558 pycompat.bytestr(four),
3558 pycompat.bytestr(five),
3559 pycompat.bytestr(five),
3559 )
3560 )
3560
3561
3561 def savecommitmessage(self, text):
3562 def savecommitmessage(self, text):
3562 fp = self.vfs(b'last-message.txt', b'wb')
3563 fp = self.vfs(b'last-message.txt', b'wb')
3563 try:
3564 try:
3564 fp.write(text)
3565 fp.write(text)
3565 finally:
3566 finally:
3566 fp.close()
3567 fp.close()
3567 return self.pathto(fp.name[len(self.root) + 1 :])
3568 return self.pathto(fp.name[len(self.root) + 1 :])
3568
3569
3569 def register_wanted_sidedata(self, category):
3570 def register_wanted_sidedata(self, category):
3570 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3571 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3571 # Only revlogv2 repos can want sidedata.
3572 # Only revlogv2 repos can want sidedata.
3572 return
3573 return
3573 self._wanted_sidedata.add(pycompat.bytestr(category))
3574 self._wanted_sidedata.add(pycompat.bytestr(category))
3574
3575
3575 def register_sidedata_computer(
3576 def register_sidedata_computer(
3576 self, kind, category, keys, computer, flags, replace=False
3577 self, kind, category, keys, computer, flags, replace=False
3577 ):
3578 ):
3578 if kind not in revlogconst.ALL_KINDS:
3579 if kind not in revlogconst.ALL_KINDS:
3579 msg = _(b"unexpected revlog kind '%s'.")
3580 msg = _(b"unexpected revlog kind '%s'.")
3580 raise error.ProgrammingError(msg % kind)
3581 raise error.ProgrammingError(msg % kind)
3581 category = pycompat.bytestr(category)
3582 category = pycompat.bytestr(category)
3582 already_registered = category in self._sidedata_computers.get(kind, [])
3583 already_registered = category in self._sidedata_computers.get(kind, [])
3583 if already_registered and not replace:
3584 if already_registered and not replace:
3584 msg = _(
3585 msg = _(
3585 b"cannot register a sidedata computer twice for category '%s'."
3586 b"cannot register a sidedata computer twice for category '%s'."
3586 )
3587 )
3587 raise error.ProgrammingError(msg % category)
3588 raise error.ProgrammingError(msg % category)
3588 if replace and not already_registered:
3589 if replace and not already_registered:
3589 msg = _(
3590 msg = _(
3590 b"cannot replace a sidedata computer that isn't registered "
3591 b"cannot replace a sidedata computer that isn't registered "
3591 b"for category '%s'."
3592 b"for category '%s'."
3592 )
3593 )
3593 raise error.ProgrammingError(msg % category)
3594 raise error.ProgrammingError(msg % category)
3594 self._sidedata_computers.setdefault(kind, {})
3595 self._sidedata_computers.setdefault(kind, {})
3595 self._sidedata_computers[kind][category] = (keys, computer, flags)
3596 self._sidedata_computers[kind][category] = (keys, computer, flags)
3596
3597
3597
3598
3598 def undoname(fn: bytes) -> bytes:
3599 def undoname(fn: bytes) -> bytes:
3599 base, name = os.path.split(fn)
3600 base, name = os.path.split(fn)
3600 assert name.startswith(b'journal')
3601 assert name.startswith(b'journal')
3601 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3602 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3602
3603
3603
3604
3604 def instance(ui, path: bytes, create, intents=None, createopts=None):
3605 def instance(ui, path: bytes, create, intents=None, createopts=None):
3605 # prevent cyclic import localrepo -> upgrade -> localrepo
3606 # prevent cyclic import localrepo -> upgrade -> localrepo
3606 from . import upgrade
3607 from . import upgrade
3607
3608
3608 localpath = urlutil.urllocalpath(path)
3609 localpath = urlutil.urllocalpath(path)
3609 if create:
3610 if create:
3610 createrepository(ui, localpath, createopts=createopts)
3611 createrepository(ui, localpath, createopts=createopts)
3611
3612
3612 def repo_maker():
3613 def repo_maker():
3613 return makelocalrepository(ui, localpath, intents=intents)
3614 return makelocalrepository(ui, localpath, intents=intents)
3614
3615
3615 repo = repo_maker()
3616 repo = repo_maker()
3616 repo = upgrade.may_auto_upgrade(repo, repo_maker)
3617 repo = upgrade.may_auto_upgrade(repo, repo_maker)
3617 return repo
3618 return repo
3618
3619
3619
3620
3620 def islocal(path: bytes) -> bool:
3621 def islocal(path: bytes) -> bool:
3621 return True
3622 return True
3622
3623
3623
3624
3624 def defaultcreateopts(ui, createopts=None):
3625 def defaultcreateopts(ui, createopts=None):
3625 """Populate the default creation options for a repository.
3626 """Populate the default creation options for a repository.
3626
3627
3627 A dictionary of explicitly requested creation options can be passed
3628 A dictionary of explicitly requested creation options can be passed
3628 in. Missing keys will be populated.
3629 in. Missing keys will be populated.
3629 """
3630 """
3630 createopts = dict(createopts or {})
3631 createopts = dict(createopts or {})
3631
3632
3632 if b'backend' not in createopts:
3633 if b'backend' not in createopts:
3633 # experimental config: storage.new-repo-backend
3634 # experimental config: storage.new-repo-backend
3634 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3635 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3635
3636
3636 return createopts
3637 return createopts
3637
3638
3638
3639
3639 def clone_requirements(ui, createopts, srcrepo):
3640 def clone_requirements(ui, createopts, srcrepo):
3640 """clone the requirements of a local repo for a local clone
3641 """clone the requirements of a local repo for a local clone
3641
3642
3642 The store requirements are unchanged while the working copy requirements
3643 The store requirements are unchanged while the working copy requirements
3643 depends on the configuration
3644 depends on the configuration
3644 """
3645 """
3645 target_requirements = set()
3646 target_requirements = set()
3646 if not srcrepo.requirements:
3647 if not srcrepo.requirements:
3647 # this is a legacy revlog "v0" repository, we cannot do anything fancy
3648 # this is a legacy revlog "v0" repository, we cannot do anything fancy
3648 # with it.
3649 # with it.
3649 return target_requirements
3650 return target_requirements
3650 createopts = defaultcreateopts(ui, createopts=createopts)
3651 createopts = defaultcreateopts(ui, createopts=createopts)
3651 for r in newreporequirements(ui, createopts):
3652 for r in newreporequirements(ui, createopts):
3652 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3653 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3653 target_requirements.add(r)
3654 target_requirements.add(r)
3654
3655
3655 for r in srcrepo.requirements:
3656 for r in srcrepo.requirements:
3656 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3657 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3657 target_requirements.add(r)
3658 target_requirements.add(r)
3658 return target_requirements
3659 return target_requirements
3659
3660
3660
3661
3661 def newreporequirements(ui, createopts):
3662 def newreporequirements(ui, createopts):
3662 """Determine the set of requirements for a new local repository.
3663 """Determine the set of requirements for a new local repository.
3663
3664
3664 Extensions can wrap this function to specify custom requirements for
3665 Extensions can wrap this function to specify custom requirements for
3665 new repositories.
3666 new repositories.
3666 """
3667 """
3667
3668
3668 if b'backend' not in createopts:
3669 if b'backend' not in createopts:
3669 raise error.ProgrammingError(
3670 raise error.ProgrammingError(
3670 b'backend key not present in createopts; '
3671 b'backend key not present in createopts; '
3671 b'was defaultcreateopts() called?'
3672 b'was defaultcreateopts() called?'
3672 )
3673 )
3673
3674
3674 if createopts[b'backend'] != b'revlogv1':
3675 if createopts[b'backend'] != b'revlogv1':
3675 raise error.Abort(
3676 raise error.Abort(
3676 _(
3677 _(
3677 b'unable to determine repository requirements for '
3678 b'unable to determine repository requirements for '
3678 b'storage backend: %s'
3679 b'storage backend: %s'
3679 )
3680 )
3680 % createopts[b'backend']
3681 % createopts[b'backend']
3681 )
3682 )
3682
3683
3683 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3684 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3684 if ui.configbool(b'format', b'usestore'):
3685 if ui.configbool(b'format', b'usestore'):
3685 requirements.add(requirementsmod.STORE_REQUIREMENT)
3686 requirements.add(requirementsmod.STORE_REQUIREMENT)
3686 if ui.configbool(b'format', b'usefncache'):
3687 if ui.configbool(b'format', b'usefncache'):
3687 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3688 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3688 if ui.configbool(b'format', b'dotencode'):
3689 if ui.configbool(b'format', b'dotencode'):
3689 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3690 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3690
3691
3691 compengines = ui.configlist(b'format', b'revlog-compression')
3692 compengines = ui.configlist(b'format', b'revlog-compression')
3692 for compengine in compengines:
3693 for compengine in compengines:
3693 if compengine in util.compengines:
3694 if compengine in util.compengines:
3694 engine = util.compengines[compengine]
3695 engine = util.compengines[compengine]
3695 if engine.available() and engine.revlogheader():
3696 if engine.available() and engine.revlogheader():
3696 break
3697 break
3697 else:
3698 else:
3698 raise error.Abort(
3699 raise error.Abort(
3699 _(
3700 _(
3700 b'compression engines %s defined by '
3701 b'compression engines %s defined by '
3701 b'format.revlog-compression not available'
3702 b'format.revlog-compression not available'
3702 )
3703 )
3703 % b', '.join(b'"%s"' % e for e in compengines),
3704 % b', '.join(b'"%s"' % e for e in compengines),
3704 hint=_(
3705 hint=_(
3705 b'run "hg debuginstall" to list available '
3706 b'run "hg debuginstall" to list available '
3706 b'compression engines'
3707 b'compression engines'
3707 ),
3708 ),
3708 )
3709 )
3709
3710
3710 # zlib is the historical default and doesn't need an explicit requirement.
3711 # zlib is the historical default and doesn't need an explicit requirement.
3711 if compengine == b'zstd':
3712 if compengine == b'zstd':
3712 requirements.add(b'revlog-compression-zstd')
3713 requirements.add(b'revlog-compression-zstd')
3713 elif compengine != b'zlib':
3714 elif compengine != b'zlib':
3714 requirements.add(b'exp-compression-%s' % compengine)
3715 requirements.add(b'exp-compression-%s' % compengine)
3715
3716
3716 if scmutil.gdinitconfig(ui):
3717 if scmutil.gdinitconfig(ui):
3717 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3718 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3718 if ui.configbool(b'format', b'sparse-revlog'):
3719 if ui.configbool(b'format', b'sparse-revlog'):
3719 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3720 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3720
3721
3721 # experimental config: format.use-dirstate-v2
3722 # experimental config: format.use-dirstate-v2
3722 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3723 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3723 if ui.configbool(b'format', b'use-dirstate-v2'):
3724 if ui.configbool(b'format', b'use-dirstate-v2'):
3724 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3725 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3725
3726
3726 # experimental config: format.exp-use-copies-side-data-changeset
3727 # experimental config: format.exp-use-copies-side-data-changeset
3727 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3728 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3728 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3729 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3729 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3730 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3730 if ui.configbool(b'experimental', b'treemanifest'):
3731 if ui.configbool(b'experimental', b'treemanifest'):
3731 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3732 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3732
3733
3733 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3734 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3734 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3735 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3735 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3736 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3736
3737
3737 revlogv2 = ui.config(b'experimental', b'revlogv2')
3738 revlogv2 = ui.config(b'experimental', b'revlogv2')
3738 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3739 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3739 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3740 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3740 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3741 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3741 # experimental config: format.internal-phase
3742 # experimental config: format.internal-phase
3742 if ui.configbool(b'format', b'use-internal-phase'):
3743 if ui.configbool(b'format', b'use-internal-phase'):
3743 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3744 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3744
3745
3745 # experimental config: format.exp-archived-phase
3746 # experimental config: format.exp-archived-phase
3746 if ui.configbool(b'format', b'exp-archived-phase'):
3747 if ui.configbool(b'format', b'exp-archived-phase'):
3747 requirements.add(requirementsmod.ARCHIVED_PHASE_REQUIREMENT)
3748 requirements.add(requirementsmod.ARCHIVED_PHASE_REQUIREMENT)
3748
3749
3749 if createopts.get(b'narrowfiles'):
3750 if createopts.get(b'narrowfiles'):
3750 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3751 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3751
3752
3752 if createopts.get(b'lfs'):
3753 if createopts.get(b'lfs'):
3753 requirements.add(b'lfs')
3754 requirements.add(b'lfs')
3754
3755
3755 if ui.configbool(b'format', b'bookmarks-in-store'):
3756 if ui.configbool(b'format', b'bookmarks-in-store'):
3756 requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3757 requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3757
3758
3758 # The feature is disabled unless a fast implementation is available.
3759 # The feature is disabled unless a fast implementation is available.
3759 persistent_nodemap_default = policy.importrust('revlog') is not None
3760 persistent_nodemap_default = policy.importrust('revlog') is not None
3760 if ui.configbool(
3761 if ui.configbool(
3761 b'format', b'use-persistent-nodemap', persistent_nodemap_default
3762 b'format', b'use-persistent-nodemap', persistent_nodemap_default
3762 ):
3763 ):
3763 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3764 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3764
3765
3765 # if share-safe is enabled, let's create the new repository with the new
3766 # if share-safe is enabled, let's create the new repository with the new
3766 # requirement
3767 # requirement
3767 if ui.configbool(b'format', b'use-share-safe'):
3768 if ui.configbool(b'format', b'use-share-safe'):
3768 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3769 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3769
3770
3770 # if we are creating a share-repoΒΉ we have to handle requirement
3771 # if we are creating a share-repoΒΉ we have to handle requirement
3771 # differently.
3772 # differently.
3772 #
3773 #
3773 # [1] (i.e. reusing the store from another repository, just having a
3774 # [1] (i.e. reusing the store from another repository, just having a
3774 # working copy)
3775 # working copy)
3775 if b'sharedrepo' in createopts:
3776 if b'sharedrepo' in createopts:
3776 source_requirements = set(createopts[b'sharedrepo'].requirements)
3777 source_requirements = set(createopts[b'sharedrepo'].requirements)
3777
3778
3778 if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements:
3779 if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements:
3779 # share to an old school repository, we have to copy the
3780 # share to an old school repository, we have to copy the
3780 # requirements and hope for the best.
3781 # requirements and hope for the best.
3781 requirements = source_requirements
3782 requirements = source_requirements
3782 else:
3783 else:
3783 # We have control on the working copy only, so "copy" the non
3784 # We have control on the working copy only, so "copy" the non
3784 # working copy part over, ignoring previous logic.
3785 # working copy part over, ignoring previous logic.
3785 to_drop = set()
3786 to_drop = set()
3786 for req in requirements:
3787 for req in requirements:
3787 if req in requirementsmod.WORKING_DIR_REQUIREMENTS:
3788 if req in requirementsmod.WORKING_DIR_REQUIREMENTS:
3788 continue
3789 continue
3789 if req in source_requirements:
3790 if req in source_requirements:
3790 continue
3791 continue
3791 to_drop.add(req)
3792 to_drop.add(req)
3792 requirements -= to_drop
3793 requirements -= to_drop
3793 requirements |= source_requirements
3794 requirements |= source_requirements
3794
3795
3795 if createopts.get(b'sharedrelative'):
3796 if createopts.get(b'sharedrelative'):
3796 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3797 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3797 else:
3798 else:
3798 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3799 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3799
3800
3800 if ui.configbool(b'format', b'use-dirstate-tracked-hint'):
3801 if ui.configbool(b'format', b'use-dirstate-tracked-hint'):
3801 version = ui.configint(b'format', b'use-dirstate-tracked-hint.version')
3802 version = ui.configint(b'format', b'use-dirstate-tracked-hint.version')
3802 msg = _(b"ignoring unknown tracked key version: %d\n")
3803 msg = _(b"ignoring unknown tracked key version: %d\n")
3803 hint = _(
3804 hint = _(
3804 b"see `hg help config.format.use-dirstate-tracked-hint-version"
3805 b"see `hg help config.format.use-dirstate-tracked-hint-version"
3805 )
3806 )
3806 if version != 1:
3807 if version != 1:
3807 ui.warn(msg % version, hint=hint)
3808 ui.warn(msg % version, hint=hint)
3808 else:
3809 else:
3809 requirements.add(requirementsmod.DIRSTATE_TRACKED_HINT_V1)
3810 requirements.add(requirementsmod.DIRSTATE_TRACKED_HINT_V1)
3810
3811
3811 return requirements
3812 return requirements
3812
3813
3813
3814
3814 def checkrequirementscompat(ui, requirements):
3815 def checkrequirementscompat(ui, requirements):
3815 """Checks compatibility of repository requirements enabled and disabled.
3816 """Checks compatibility of repository requirements enabled and disabled.
3816
3817
3817 Returns a set of requirements which needs to be dropped because dependend
3818 Returns a set of requirements which needs to be dropped because dependend
3818 requirements are not enabled. Also warns users about it"""
3819 requirements are not enabled. Also warns users about it"""
3819
3820
3820 dropped = set()
3821 dropped = set()
3821
3822
3822 if requirementsmod.STORE_REQUIREMENT not in requirements:
3823 if requirementsmod.STORE_REQUIREMENT not in requirements:
3823 if requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3824 if requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3824 ui.warn(
3825 ui.warn(
3825 _(
3826 _(
3826 b'ignoring enabled \'format.bookmarks-in-store\' config '
3827 b'ignoring enabled \'format.bookmarks-in-store\' config '
3827 b'beacuse it is incompatible with disabled '
3828 b'beacuse it is incompatible with disabled '
3828 b'\'format.usestore\' config\n'
3829 b'\'format.usestore\' config\n'
3829 )
3830 )
3830 )
3831 )
3831 dropped.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3832 dropped.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3832
3833
3833 if (
3834 if (
3834 requirementsmod.SHARED_REQUIREMENT in requirements
3835 requirementsmod.SHARED_REQUIREMENT in requirements
3835 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3836 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3836 ):
3837 ):
3837 raise error.Abort(
3838 raise error.Abort(
3838 _(
3839 _(
3839 b"cannot create shared repository as source was created"
3840 b"cannot create shared repository as source was created"
3840 b" with 'format.usestore' config disabled"
3841 b" with 'format.usestore' config disabled"
3841 )
3842 )
3842 )
3843 )
3843
3844
3844 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3845 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3845 if ui.hasconfig(b'format', b'use-share-safe'):
3846 if ui.hasconfig(b'format', b'use-share-safe'):
3846 msg = _(
3847 msg = _(
3847 b"ignoring enabled 'format.use-share-safe' config because "
3848 b"ignoring enabled 'format.use-share-safe' config because "
3848 b"it is incompatible with disabled 'format.usestore'"
3849 b"it is incompatible with disabled 'format.usestore'"
3849 b" config\n"
3850 b" config\n"
3850 )
3851 )
3851 ui.warn(msg)
3852 ui.warn(msg)
3852 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3853 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3853
3854
3854 return dropped
3855 return dropped
3855
3856
3856
3857
3857 def filterknowncreateopts(ui, createopts):
3858 def filterknowncreateopts(ui, createopts):
3858 """Filters a dict of repo creation options against options that are known.
3859 """Filters a dict of repo creation options against options that are known.
3859
3860
3860 Receives a dict of repo creation options and returns a dict of those
3861 Receives a dict of repo creation options and returns a dict of those
3861 options that we don't know how to handle.
3862 options that we don't know how to handle.
3862
3863
3863 This function is called as part of repository creation. If the
3864 This function is called as part of repository creation. If the
3864 returned dict contains any items, repository creation will not
3865 returned dict contains any items, repository creation will not
3865 be allowed, as it means there was a request to create a repository
3866 be allowed, as it means there was a request to create a repository
3866 with options not recognized by loaded code.
3867 with options not recognized by loaded code.
3867
3868
3868 Extensions can wrap this function to filter out creation options
3869 Extensions can wrap this function to filter out creation options
3869 they know how to handle.
3870 they know how to handle.
3870 """
3871 """
3871 known = {
3872 known = {
3872 b'backend',
3873 b'backend',
3873 b'lfs',
3874 b'lfs',
3874 b'narrowfiles',
3875 b'narrowfiles',
3875 b'sharedrepo',
3876 b'sharedrepo',
3876 b'sharedrelative',
3877 b'sharedrelative',
3877 b'shareditems',
3878 b'shareditems',
3878 b'shallowfilestore',
3879 b'shallowfilestore',
3879 }
3880 }
3880
3881
3881 return {k: v for k, v in createopts.items() if k not in known}
3882 return {k: v for k, v in createopts.items() if k not in known}
3882
3883
3883
3884
3884 def createrepository(ui, path: bytes, createopts=None, requirements=None):
3885 def createrepository(ui, path: bytes, createopts=None, requirements=None):
3885 """Create a new repository in a vfs.
3886 """Create a new repository in a vfs.
3886
3887
3887 ``path`` path to the new repo's working directory.
3888 ``path`` path to the new repo's working directory.
3888 ``createopts`` options for the new repository.
3889 ``createopts`` options for the new repository.
3889 ``requirement`` predefined set of requirements.
3890 ``requirement`` predefined set of requirements.
3890 (incompatible with ``createopts``)
3891 (incompatible with ``createopts``)
3891
3892
3892 The following keys for ``createopts`` are recognized:
3893 The following keys for ``createopts`` are recognized:
3893
3894
3894 backend
3895 backend
3895 The storage backend to use.
3896 The storage backend to use.
3896 lfs
3897 lfs
3897 Repository will be created with ``lfs`` requirement. The lfs extension
3898 Repository will be created with ``lfs`` requirement. The lfs extension
3898 will automatically be loaded when the repository is accessed.
3899 will automatically be loaded when the repository is accessed.
3899 narrowfiles
3900 narrowfiles
3900 Set up repository to support narrow file storage.
3901 Set up repository to support narrow file storage.
3901 sharedrepo
3902 sharedrepo
3902 Repository object from which storage should be shared.
3903 Repository object from which storage should be shared.
3903 sharedrelative
3904 sharedrelative
3904 Boolean indicating if the path to the shared repo should be
3905 Boolean indicating if the path to the shared repo should be
3905 stored as relative. By default, the pointer to the "parent" repo
3906 stored as relative. By default, the pointer to the "parent" repo
3906 is stored as an absolute path.
3907 is stored as an absolute path.
3907 shareditems
3908 shareditems
3908 Set of items to share to the new repository (in addition to storage).
3909 Set of items to share to the new repository (in addition to storage).
3909 shallowfilestore
3910 shallowfilestore
3910 Indicates that storage for files should be shallow (not all ancestor
3911 Indicates that storage for files should be shallow (not all ancestor
3911 revisions are known).
3912 revisions are known).
3912 """
3913 """
3913
3914
3914 if requirements is not None:
3915 if requirements is not None:
3915 if createopts is not None:
3916 if createopts is not None:
3916 msg = b'cannot specify both createopts and requirements'
3917 msg = b'cannot specify both createopts and requirements'
3917 raise error.ProgrammingError(msg)
3918 raise error.ProgrammingError(msg)
3918 createopts = {}
3919 createopts = {}
3919 else:
3920 else:
3920 createopts = defaultcreateopts(ui, createopts=createopts)
3921 createopts = defaultcreateopts(ui, createopts=createopts)
3921
3922
3922 unknownopts = filterknowncreateopts(ui, createopts)
3923 unknownopts = filterknowncreateopts(ui, createopts)
3923
3924
3924 if not isinstance(unknownopts, dict):
3925 if not isinstance(unknownopts, dict):
3925 raise error.ProgrammingError(
3926 raise error.ProgrammingError(
3926 b'filterknowncreateopts() did not return a dict'
3927 b'filterknowncreateopts() did not return a dict'
3927 )
3928 )
3928
3929
3929 if unknownopts:
3930 if unknownopts:
3930 raise error.Abort(
3931 raise error.Abort(
3931 _(
3932 _(
3932 b'unable to create repository because of unknown '
3933 b'unable to create repository because of unknown '
3933 b'creation option: %s'
3934 b'creation option: %s'
3934 )
3935 )
3935 % b', '.join(sorted(unknownopts)),
3936 % b', '.join(sorted(unknownopts)),
3936 hint=_(b'is a required extension not loaded?'),
3937 hint=_(b'is a required extension not loaded?'),
3937 )
3938 )
3938
3939
3939 requirements = newreporequirements(ui, createopts=createopts)
3940 requirements = newreporequirements(ui, createopts=createopts)
3940 requirements -= checkrequirementscompat(ui, requirements)
3941 requirements -= checkrequirementscompat(ui, requirements)
3941
3942
3942 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3943 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3943
3944
3944 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3945 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3945 if hgvfs.exists():
3946 if hgvfs.exists():
3946 raise error.RepoError(_(b'repository %s already exists') % path)
3947 raise error.RepoError(_(b'repository %s already exists') % path)
3947
3948
3948 if b'sharedrepo' in createopts:
3949 if b'sharedrepo' in createopts:
3949 sharedpath = createopts[b'sharedrepo'].sharedpath
3950 sharedpath = createopts[b'sharedrepo'].sharedpath
3950
3951
3951 if createopts.get(b'sharedrelative'):
3952 if createopts.get(b'sharedrelative'):
3952 try:
3953 try:
3953 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3954 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3954 sharedpath = util.pconvert(sharedpath)
3955 sharedpath = util.pconvert(sharedpath)
3955 except (IOError, ValueError) as e:
3956 except (IOError, ValueError) as e:
3956 # ValueError is raised on Windows if the drive letters differ
3957 # ValueError is raised on Windows if the drive letters differ
3957 # on each path.
3958 # on each path.
3958 raise error.Abort(
3959 raise error.Abort(
3959 _(b'cannot calculate relative path'),
3960 _(b'cannot calculate relative path'),
3960 hint=stringutil.forcebytestr(e),
3961 hint=stringutil.forcebytestr(e),
3961 )
3962 )
3962
3963
3963 if not wdirvfs.exists():
3964 if not wdirvfs.exists():
3964 wdirvfs.makedirs()
3965 wdirvfs.makedirs()
3965
3966
3966 hgvfs.makedir(notindexed=True)
3967 hgvfs.makedir(notindexed=True)
3967 if b'sharedrepo' not in createopts:
3968 if b'sharedrepo' not in createopts:
3968 hgvfs.mkdir(b'cache')
3969 hgvfs.mkdir(b'cache')
3969 hgvfs.mkdir(b'wcache')
3970 hgvfs.mkdir(b'wcache')
3970
3971
3971 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3972 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3972 if has_store and b'sharedrepo' not in createopts:
3973 if has_store and b'sharedrepo' not in createopts:
3973 hgvfs.mkdir(b'store')
3974 hgvfs.mkdir(b'store')
3974
3975
3975 # We create an invalid changelog outside the store so very old
3976 # We create an invalid changelog outside the store so very old
3976 # Mercurial versions (which didn't know about the requirements
3977 # Mercurial versions (which didn't know about the requirements
3977 # file) encounter an error on reading the changelog. This
3978 # file) encounter an error on reading the changelog. This
3978 # effectively locks out old clients and prevents them from
3979 # effectively locks out old clients and prevents them from
3979 # mucking with a repo in an unknown format.
3980 # mucking with a repo in an unknown format.
3980 #
3981 #
3981 # The revlog header has version 65535, which won't be recognized by
3982 # The revlog header has version 65535, which won't be recognized by
3982 # such old clients.
3983 # such old clients.
3983 hgvfs.append(
3984 hgvfs.append(
3984 b'00changelog.i',
3985 b'00changelog.i',
3985 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3986 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3986 b'layout',
3987 b'layout',
3987 )
3988 )
3988
3989
3989 # Filter the requirements into working copy and store ones
3990 # Filter the requirements into working copy and store ones
3990 wcreq, storereq = scmutil.filterrequirements(requirements)
3991 wcreq, storereq = scmutil.filterrequirements(requirements)
3991 # write working copy ones
3992 # write working copy ones
3992 scmutil.writerequires(hgvfs, wcreq)
3993 scmutil.writerequires(hgvfs, wcreq)
3993 # If there are store requirements and the current repository
3994 # If there are store requirements and the current repository
3994 # is not a shared one, write stored requirements
3995 # is not a shared one, write stored requirements
3995 # For new shared repository, we don't need to write the store
3996 # For new shared repository, we don't need to write the store
3996 # requirements as they are already present in store requires
3997 # requirements as they are already present in store requires
3997 if storereq and b'sharedrepo' not in createopts:
3998 if storereq and b'sharedrepo' not in createopts:
3998 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3999 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3999 scmutil.writerequires(storevfs, storereq)
4000 scmutil.writerequires(storevfs, storereq)
4000
4001
4001 # Write out file telling readers where to find the shared store.
4002 # Write out file telling readers where to find the shared store.
4002 if b'sharedrepo' in createopts:
4003 if b'sharedrepo' in createopts:
4003 hgvfs.write(b'sharedpath', sharedpath)
4004 hgvfs.write(b'sharedpath', sharedpath)
4004
4005
4005 if createopts.get(b'shareditems'):
4006 if createopts.get(b'shareditems'):
4006 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
4007 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
4007 hgvfs.write(b'shared', shared)
4008 hgvfs.write(b'shared', shared)
4008
4009
4009
4010
4010 def poisonrepository(repo):
4011 def poisonrepository(repo):
4011 """Poison a repository instance so it can no longer be used."""
4012 """Poison a repository instance so it can no longer be used."""
4012 # Perform any cleanup on the instance.
4013 # Perform any cleanup on the instance.
4013 repo.close()
4014 repo.close()
4014
4015
4015 # Our strategy is to replace the type of the object with one that
4016 # Our strategy is to replace the type of the object with one that
4016 # has all attribute lookups result in error.
4017 # has all attribute lookups result in error.
4017 #
4018 #
4018 # But we have to allow the close() method because some constructors
4019 # But we have to allow the close() method because some constructors
4019 # of repos call close() on repo references.
4020 # of repos call close() on repo references.
4020 class poisonedrepository:
4021 class poisonedrepository:
4021 def __getattribute__(self, item):
4022 def __getattribute__(self, item):
4022 if item == 'close':
4023 if item == 'close':
4023 return object.__getattribute__(self, item)
4024 return object.__getattribute__(self, item)
4024
4025
4025 raise error.ProgrammingError(
4026 raise error.ProgrammingError(
4026 b'repo instances should not be used after unshare'
4027 b'repo instances should not be used after unshare'
4027 )
4028 )
4028
4029
4029 def close(self):
4030 def close(self):
4030 pass
4031 pass
4031
4032
4032 # We may have a repoview, which intercepts __setattr__. So be sure
4033 # We may have a repoview, which intercepts __setattr__. So be sure
4033 # we operate at the lowest level possible.
4034 # we operate at the lowest level possible.
4034 object.__setattr__(repo, '__class__', poisonedrepository)
4035 object.__setattr__(repo, '__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now