##// END OF EJS Templates
safehasattr: pass attribute name as string instead of bytes...
marmoute -
r51476:833a4e88 default
parent child Browse files
Show More
@@ -1,4026 +1,4026 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 # coding: utf-8
2 # coding: utf-8
3 #
3 #
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9
9
10 import functools
10 import functools
11 import os
11 import os
12 import random
12 import random
13 import re
13 import re
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from concurrent import futures
18 from concurrent import futures
19 from typing import (
19 from typing import (
20 Optional,
20 Optional,
21 )
21 )
22
22
23 from .i18n import _
23 from .i18n import _
24 from .node import (
24 from .node import (
25 bin,
25 bin,
26 hex,
26 hex,
27 nullrev,
27 nullrev,
28 sha1nodeconstants,
28 sha1nodeconstants,
29 short,
29 short,
30 )
30 )
31 from .pycompat import (
31 from .pycompat import (
32 delattr,
32 delattr,
33 getattr,
33 getattr,
34 )
34 )
35 from . import (
35 from . import (
36 bookmarks,
36 bookmarks,
37 branchmap,
37 branchmap,
38 bundle2,
38 bundle2,
39 bundlecaches,
39 bundlecaches,
40 changegroup,
40 changegroup,
41 color,
41 color,
42 commit,
42 commit,
43 context,
43 context,
44 dirstate,
44 dirstate,
45 discovery,
45 discovery,
46 encoding,
46 encoding,
47 error,
47 error,
48 exchange,
48 exchange,
49 extensions,
49 extensions,
50 filelog,
50 filelog,
51 hook,
51 hook,
52 lock as lockmod,
52 lock as lockmod,
53 match as matchmod,
53 match as matchmod,
54 mergestate as mergestatemod,
54 mergestate as mergestatemod,
55 mergeutil,
55 mergeutil,
56 namespaces,
56 namespaces,
57 narrowspec,
57 narrowspec,
58 obsolete,
58 obsolete,
59 pathutil,
59 pathutil,
60 phases,
60 phases,
61 pushkey,
61 pushkey,
62 pycompat,
62 pycompat,
63 rcutil,
63 rcutil,
64 repoview,
64 repoview,
65 requirements as requirementsmod,
65 requirements as requirementsmod,
66 revlog,
66 revlog,
67 revset,
67 revset,
68 revsetlang,
68 revsetlang,
69 scmutil,
69 scmutil,
70 sparse,
70 sparse,
71 store as storemod,
71 store as storemod,
72 subrepoutil,
72 subrepoutil,
73 tags as tagsmod,
73 tags as tagsmod,
74 transaction,
74 transaction,
75 txnutil,
75 txnutil,
76 util,
76 util,
77 vfs as vfsmod,
77 vfs as vfsmod,
78 wireprototypes,
78 wireprototypes,
79 )
79 )
80
80
81 from .interfaces import (
81 from .interfaces import (
82 repository,
82 repository,
83 util as interfaceutil,
83 util as interfaceutil,
84 )
84 )
85
85
86 from .utils import (
86 from .utils import (
87 hashutil,
87 hashutil,
88 procutil,
88 procutil,
89 stringutil,
89 stringutil,
90 urlutil,
90 urlutil,
91 )
91 )
92
92
93 from .revlogutils import (
93 from .revlogutils import (
94 concurrency_checker as revlogchecker,
94 concurrency_checker as revlogchecker,
95 constants as revlogconst,
95 constants as revlogconst,
96 sidedata as sidedatamod,
96 sidedata as sidedatamod,
97 )
97 )
98
98
99 release = lockmod.release
99 release = lockmod.release
100 urlerr = util.urlerr
100 urlerr = util.urlerr
101 urlreq = util.urlreq
101 urlreq = util.urlreq
102
102
103 RE_SKIP_DIRSTATE_ROLLBACK = re.compile(
103 RE_SKIP_DIRSTATE_ROLLBACK = re.compile(
104 b"^((dirstate|narrowspec.dirstate).*|branch$)"
104 b"^((dirstate|narrowspec.dirstate).*|branch$)"
105 )
105 )
106
106
107 # set of (path, vfs-location) tuples. vfs-location is:
107 # set of (path, vfs-location) tuples. vfs-location is:
108 # - 'plain for vfs relative paths
108 # - 'plain for vfs relative paths
109 # - '' for svfs relative paths
109 # - '' for svfs relative paths
110 _cachedfiles = set()
110 _cachedfiles = set()
111
111
112
112
113 class _basefilecache(scmutil.filecache):
113 class _basefilecache(scmutil.filecache):
114 """All filecache usage on repo are done for logic that should be unfiltered"""
114 """All filecache usage on repo are done for logic that should be unfiltered"""
115
115
116 def __get__(self, repo, type=None):
116 def __get__(self, repo, type=None):
117 if repo is None:
117 if repo is None:
118 return self
118 return self
119 # proxy to unfiltered __dict__ since filtered repo has no entry
119 # proxy to unfiltered __dict__ since filtered repo has no entry
120 unfi = repo.unfiltered()
120 unfi = repo.unfiltered()
121 try:
121 try:
122 return unfi.__dict__[self.sname]
122 return unfi.__dict__[self.sname]
123 except KeyError:
123 except KeyError:
124 pass
124 pass
125 return super(_basefilecache, self).__get__(unfi, type)
125 return super(_basefilecache, self).__get__(unfi, type)
126
126
127 def set(self, repo, value):
127 def set(self, repo, value):
128 return super(_basefilecache, self).set(repo.unfiltered(), value)
128 return super(_basefilecache, self).set(repo.unfiltered(), value)
129
129
130
130
131 class repofilecache(_basefilecache):
131 class repofilecache(_basefilecache):
132 """filecache for files in .hg but outside of .hg/store"""
132 """filecache for files in .hg but outside of .hg/store"""
133
133
134 def __init__(self, *paths):
134 def __init__(self, *paths):
135 super(repofilecache, self).__init__(*paths)
135 super(repofilecache, self).__init__(*paths)
136 for path in paths:
136 for path in paths:
137 _cachedfiles.add((path, b'plain'))
137 _cachedfiles.add((path, b'plain'))
138
138
139 def join(self, obj, fname):
139 def join(self, obj, fname):
140 return obj.vfs.join(fname)
140 return obj.vfs.join(fname)
141
141
142
142
143 class storecache(_basefilecache):
143 class storecache(_basefilecache):
144 """filecache for files in the store"""
144 """filecache for files in the store"""
145
145
146 def __init__(self, *paths):
146 def __init__(self, *paths):
147 super(storecache, self).__init__(*paths)
147 super(storecache, self).__init__(*paths)
148 for path in paths:
148 for path in paths:
149 _cachedfiles.add((path, b''))
149 _cachedfiles.add((path, b''))
150
150
151 def join(self, obj, fname):
151 def join(self, obj, fname):
152 return obj.sjoin(fname)
152 return obj.sjoin(fname)
153
153
154
154
155 class changelogcache(storecache):
155 class changelogcache(storecache):
156 """filecache for the changelog"""
156 """filecache for the changelog"""
157
157
158 def __init__(self):
158 def __init__(self):
159 super(changelogcache, self).__init__()
159 super(changelogcache, self).__init__()
160 _cachedfiles.add((b'00changelog.i', b''))
160 _cachedfiles.add((b'00changelog.i', b''))
161 _cachedfiles.add((b'00changelog.n', b''))
161 _cachedfiles.add((b'00changelog.n', b''))
162
162
163 def tracked_paths(self, obj):
163 def tracked_paths(self, obj):
164 paths = [self.join(obj, b'00changelog.i')]
164 paths = [self.join(obj, b'00changelog.i')]
165 if obj.store.opener.options.get(b'persistent-nodemap', False):
165 if obj.store.opener.options.get(b'persistent-nodemap', False):
166 paths.append(self.join(obj, b'00changelog.n'))
166 paths.append(self.join(obj, b'00changelog.n'))
167 return paths
167 return paths
168
168
169
169
170 class manifestlogcache(storecache):
170 class manifestlogcache(storecache):
171 """filecache for the manifestlog"""
171 """filecache for the manifestlog"""
172
172
173 def __init__(self):
173 def __init__(self):
174 super(manifestlogcache, self).__init__()
174 super(manifestlogcache, self).__init__()
175 _cachedfiles.add((b'00manifest.i', b''))
175 _cachedfiles.add((b'00manifest.i', b''))
176 _cachedfiles.add((b'00manifest.n', b''))
176 _cachedfiles.add((b'00manifest.n', b''))
177
177
178 def tracked_paths(self, obj):
178 def tracked_paths(self, obj):
179 paths = [self.join(obj, b'00manifest.i')]
179 paths = [self.join(obj, b'00manifest.i')]
180 if obj.store.opener.options.get(b'persistent-nodemap', False):
180 if obj.store.opener.options.get(b'persistent-nodemap', False):
181 paths.append(self.join(obj, b'00manifest.n'))
181 paths.append(self.join(obj, b'00manifest.n'))
182 return paths
182 return paths
183
183
184
184
185 class mixedrepostorecache(_basefilecache):
185 class mixedrepostorecache(_basefilecache):
186 """filecache for a mix files in .hg/store and outside"""
186 """filecache for a mix files in .hg/store and outside"""
187
187
188 def __init__(self, *pathsandlocations):
188 def __init__(self, *pathsandlocations):
189 # scmutil.filecache only uses the path for passing back into our
189 # scmutil.filecache only uses the path for passing back into our
190 # join(), so we can safely pass a list of paths and locations
190 # join(), so we can safely pass a list of paths and locations
191 super(mixedrepostorecache, self).__init__(*pathsandlocations)
191 super(mixedrepostorecache, self).__init__(*pathsandlocations)
192 _cachedfiles.update(pathsandlocations)
192 _cachedfiles.update(pathsandlocations)
193
193
194 def join(self, obj, fnameandlocation):
194 def join(self, obj, fnameandlocation):
195 fname, location = fnameandlocation
195 fname, location = fnameandlocation
196 if location == b'plain':
196 if location == b'plain':
197 return obj.vfs.join(fname)
197 return obj.vfs.join(fname)
198 else:
198 else:
199 if location != b'':
199 if location != b'':
200 raise error.ProgrammingError(
200 raise error.ProgrammingError(
201 b'unexpected location: %s' % location
201 b'unexpected location: %s' % location
202 )
202 )
203 return obj.sjoin(fname)
203 return obj.sjoin(fname)
204
204
205
205
206 def isfilecached(repo, name):
206 def isfilecached(repo, name):
207 """check if a repo has already cached "name" filecache-ed property
207 """check if a repo has already cached "name" filecache-ed property
208
208
209 This returns (cachedobj-or-None, iscached) tuple.
209 This returns (cachedobj-or-None, iscached) tuple.
210 """
210 """
211 cacheentry = repo.unfiltered()._filecache.get(name, None)
211 cacheentry = repo.unfiltered()._filecache.get(name, None)
212 if not cacheentry:
212 if not cacheentry:
213 return None, False
213 return None, False
214 return cacheentry.obj, True
214 return cacheentry.obj, True
215
215
216
216
217 class unfilteredpropertycache(util.propertycache):
217 class unfilteredpropertycache(util.propertycache):
218 """propertycache that apply to unfiltered repo only"""
218 """propertycache that apply to unfiltered repo only"""
219
219
220 def __get__(self, repo, type=None):
220 def __get__(self, repo, type=None):
221 unfi = repo.unfiltered()
221 unfi = repo.unfiltered()
222 if unfi is repo:
222 if unfi is repo:
223 return super(unfilteredpropertycache, self).__get__(unfi)
223 return super(unfilteredpropertycache, self).__get__(unfi)
224 return getattr(unfi, self.name)
224 return getattr(unfi, self.name)
225
225
226
226
227 class filteredpropertycache(util.propertycache):
227 class filteredpropertycache(util.propertycache):
228 """propertycache that must take filtering in account"""
228 """propertycache that must take filtering in account"""
229
229
230 def cachevalue(self, obj, value):
230 def cachevalue(self, obj, value):
231 object.__setattr__(obj, self.name, value)
231 object.__setattr__(obj, self.name, value)
232
232
233
233
234 def hasunfilteredcache(repo, name):
234 def hasunfilteredcache(repo, name):
235 """check if a repo has an unfilteredpropertycache value for <name>"""
235 """check if a repo has an unfilteredpropertycache value for <name>"""
236 return name in vars(repo.unfiltered())
236 return name in vars(repo.unfiltered())
237
237
238
238
239 def unfilteredmethod(orig):
239 def unfilteredmethod(orig):
240 """decorate method that always need to be run on unfiltered version"""
240 """decorate method that always need to be run on unfiltered version"""
241
241
242 @functools.wraps(orig)
242 @functools.wraps(orig)
243 def wrapper(repo, *args, **kwargs):
243 def wrapper(repo, *args, **kwargs):
244 return orig(repo.unfiltered(), *args, **kwargs)
244 return orig(repo.unfiltered(), *args, **kwargs)
245
245
246 return wrapper
246 return wrapper
247
247
248
248
249 moderncaps = {
249 moderncaps = {
250 b'lookup',
250 b'lookup',
251 b'branchmap',
251 b'branchmap',
252 b'pushkey',
252 b'pushkey',
253 b'known',
253 b'known',
254 b'getbundle',
254 b'getbundle',
255 b'unbundle',
255 b'unbundle',
256 }
256 }
257 legacycaps = moderncaps.union({b'changegroupsubset'})
257 legacycaps = moderncaps.union({b'changegroupsubset'})
258
258
259
259
260 @interfaceutil.implementer(repository.ipeercommandexecutor)
260 @interfaceutil.implementer(repository.ipeercommandexecutor)
261 class localcommandexecutor:
261 class localcommandexecutor:
262 def __init__(self, peer):
262 def __init__(self, peer):
263 self._peer = peer
263 self._peer = peer
264 self._sent = False
264 self._sent = False
265 self._closed = False
265 self._closed = False
266
266
267 def __enter__(self):
267 def __enter__(self):
268 return self
268 return self
269
269
270 def __exit__(self, exctype, excvalue, exctb):
270 def __exit__(self, exctype, excvalue, exctb):
271 self.close()
271 self.close()
272
272
273 def callcommand(self, command, args):
273 def callcommand(self, command, args):
274 if self._sent:
274 if self._sent:
275 raise error.ProgrammingError(
275 raise error.ProgrammingError(
276 b'callcommand() cannot be used after sendcommands()'
276 b'callcommand() cannot be used after sendcommands()'
277 )
277 )
278
278
279 if self._closed:
279 if self._closed:
280 raise error.ProgrammingError(
280 raise error.ProgrammingError(
281 b'callcommand() cannot be used after close()'
281 b'callcommand() cannot be used after close()'
282 )
282 )
283
283
284 # We don't need to support anything fancy. Just call the named
284 # We don't need to support anything fancy. Just call the named
285 # method on the peer and return a resolved future.
285 # method on the peer and return a resolved future.
286 fn = getattr(self._peer, pycompat.sysstr(command))
286 fn = getattr(self._peer, pycompat.sysstr(command))
287
287
288 f = futures.Future()
288 f = futures.Future()
289
289
290 try:
290 try:
291 result = fn(**pycompat.strkwargs(args))
291 result = fn(**pycompat.strkwargs(args))
292 except Exception:
292 except Exception:
293 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
293 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
294 else:
294 else:
295 f.set_result(result)
295 f.set_result(result)
296
296
297 return f
297 return f
298
298
299 def sendcommands(self):
299 def sendcommands(self):
300 self._sent = True
300 self._sent = True
301
301
302 def close(self):
302 def close(self):
303 self._closed = True
303 self._closed = True
304
304
305
305
306 @interfaceutil.implementer(repository.ipeercommands)
306 @interfaceutil.implementer(repository.ipeercommands)
307 class localpeer(repository.peer):
307 class localpeer(repository.peer):
308 '''peer for a local repo; reflects only the most recent API'''
308 '''peer for a local repo; reflects only the most recent API'''
309
309
310 def __init__(self, repo, caps=None, path=None, remotehidden=False):
310 def __init__(self, repo, caps=None, path=None, remotehidden=False):
311 super(localpeer, self).__init__(
311 super(localpeer, self).__init__(
312 repo.ui, path=path, remotehidden=remotehidden
312 repo.ui, path=path, remotehidden=remotehidden
313 )
313 )
314
314
315 if caps is None:
315 if caps is None:
316 caps = moderncaps.copy()
316 caps = moderncaps.copy()
317 if remotehidden:
317 if remotehidden:
318 self._repo = repo.filtered(b'served.hidden')
318 self._repo = repo.filtered(b'served.hidden')
319 else:
319 else:
320 self._repo = repo.filtered(b'served')
320 self._repo = repo.filtered(b'served')
321 if repo._wanted_sidedata:
321 if repo._wanted_sidedata:
322 formatted = bundle2.format_remote_wanted_sidedata(repo)
322 formatted = bundle2.format_remote_wanted_sidedata(repo)
323 caps.add(b'exp-wanted-sidedata=' + formatted)
323 caps.add(b'exp-wanted-sidedata=' + formatted)
324
324
325 self._caps = repo._restrictcapabilities(caps)
325 self._caps = repo._restrictcapabilities(caps)
326
326
327 # Begin of _basepeer interface.
327 # Begin of _basepeer interface.
328
328
329 def url(self):
329 def url(self):
330 return self._repo.url()
330 return self._repo.url()
331
331
332 def local(self):
332 def local(self):
333 return self._repo
333 return self._repo
334
334
335 def canpush(self):
335 def canpush(self):
336 return True
336 return True
337
337
338 def close(self):
338 def close(self):
339 self._repo.close()
339 self._repo.close()
340
340
341 # End of _basepeer interface.
341 # End of _basepeer interface.
342
342
343 # Begin of _basewirecommands interface.
343 # Begin of _basewirecommands interface.
344
344
345 def branchmap(self):
345 def branchmap(self):
346 return self._repo.branchmap()
346 return self._repo.branchmap()
347
347
348 def capabilities(self):
348 def capabilities(self):
349 return self._caps
349 return self._caps
350
350
351 def clonebundles(self):
351 def clonebundles(self):
352 return bundlecaches.get_manifest(self._repo)
352 return bundlecaches.get_manifest(self._repo)
353
353
354 def debugwireargs(self, one, two, three=None, four=None, five=None):
354 def debugwireargs(self, one, two, three=None, four=None, five=None):
355 """Used to test argument passing over the wire"""
355 """Used to test argument passing over the wire"""
356 return b"%s %s %s %s %s" % (
356 return b"%s %s %s %s %s" % (
357 one,
357 one,
358 two,
358 two,
359 pycompat.bytestr(three),
359 pycompat.bytestr(three),
360 pycompat.bytestr(four),
360 pycompat.bytestr(four),
361 pycompat.bytestr(five),
361 pycompat.bytestr(five),
362 )
362 )
363
363
364 def getbundle(
364 def getbundle(
365 self,
365 self,
366 source,
366 source,
367 heads=None,
367 heads=None,
368 common=None,
368 common=None,
369 bundlecaps=None,
369 bundlecaps=None,
370 remote_sidedata=None,
370 remote_sidedata=None,
371 **kwargs
371 **kwargs
372 ):
372 ):
373 chunks = exchange.getbundlechunks(
373 chunks = exchange.getbundlechunks(
374 self._repo,
374 self._repo,
375 source,
375 source,
376 heads=heads,
376 heads=heads,
377 common=common,
377 common=common,
378 bundlecaps=bundlecaps,
378 bundlecaps=bundlecaps,
379 remote_sidedata=remote_sidedata,
379 remote_sidedata=remote_sidedata,
380 **kwargs
380 **kwargs
381 )[1]
381 )[1]
382 cb = util.chunkbuffer(chunks)
382 cb = util.chunkbuffer(chunks)
383
383
384 if exchange.bundle2requested(bundlecaps):
384 if exchange.bundle2requested(bundlecaps):
385 # When requesting a bundle2, getbundle returns a stream to make the
385 # When requesting a bundle2, getbundle returns a stream to make the
386 # wire level function happier. We need to build a proper object
386 # wire level function happier. We need to build a proper object
387 # from it in local peer.
387 # from it in local peer.
388 return bundle2.getunbundler(self.ui, cb)
388 return bundle2.getunbundler(self.ui, cb)
389 else:
389 else:
390 return changegroup.getunbundler(b'01', cb, None)
390 return changegroup.getunbundler(b'01', cb, None)
391
391
392 def heads(self):
392 def heads(self):
393 return self._repo.heads()
393 return self._repo.heads()
394
394
395 def known(self, nodes):
395 def known(self, nodes):
396 return self._repo.known(nodes)
396 return self._repo.known(nodes)
397
397
398 def listkeys(self, namespace):
398 def listkeys(self, namespace):
399 return self._repo.listkeys(namespace)
399 return self._repo.listkeys(namespace)
400
400
401 def lookup(self, key):
401 def lookup(self, key):
402 return self._repo.lookup(key)
402 return self._repo.lookup(key)
403
403
404 def pushkey(self, namespace, key, old, new):
404 def pushkey(self, namespace, key, old, new):
405 return self._repo.pushkey(namespace, key, old, new)
405 return self._repo.pushkey(namespace, key, old, new)
406
406
407 def stream_out(self):
407 def stream_out(self):
408 raise error.Abort(_(b'cannot perform stream clone against local peer'))
408 raise error.Abort(_(b'cannot perform stream clone against local peer'))
409
409
410 def unbundle(self, bundle, heads, url):
410 def unbundle(self, bundle, heads, url):
411 """apply a bundle on a repo
411 """apply a bundle on a repo
412
412
413 This function handles the repo locking itself."""
413 This function handles the repo locking itself."""
414 try:
414 try:
415 try:
415 try:
416 bundle = exchange.readbundle(self.ui, bundle, None)
416 bundle = exchange.readbundle(self.ui, bundle, None)
417 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
417 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
418 if util.safehasattr(ret, b'getchunks'):
418 if util.safehasattr(ret, 'getchunks'):
419 # This is a bundle20 object, turn it into an unbundler.
419 # This is a bundle20 object, turn it into an unbundler.
420 # This little dance should be dropped eventually when the
420 # This little dance should be dropped eventually when the
421 # API is finally improved.
421 # API is finally improved.
422 stream = util.chunkbuffer(ret.getchunks())
422 stream = util.chunkbuffer(ret.getchunks())
423 ret = bundle2.getunbundler(self.ui, stream)
423 ret = bundle2.getunbundler(self.ui, stream)
424 return ret
424 return ret
425 except Exception as exc:
425 except Exception as exc:
426 # If the exception contains output salvaged from a bundle2
426 # If the exception contains output salvaged from a bundle2
427 # reply, we need to make sure it is printed before continuing
427 # reply, we need to make sure it is printed before continuing
428 # to fail. So we build a bundle2 with such output and consume
428 # to fail. So we build a bundle2 with such output and consume
429 # it directly.
429 # it directly.
430 #
430 #
431 # This is not very elegant but allows a "simple" solution for
431 # This is not very elegant but allows a "simple" solution for
432 # issue4594
432 # issue4594
433 output = getattr(exc, '_bundle2salvagedoutput', ())
433 output = getattr(exc, '_bundle2salvagedoutput', ())
434 if output:
434 if output:
435 bundler = bundle2.bundle20(self._repo.ui)
435 bundler = bundle2.bundle20(self._repo.ui)
436 for out in output:
436 for out in output:
437 bundler.addpart(out)
437 bundler.addpart(out)
438 stream = util.chunkbuffer(bundler.getchunks())
438 stream = util.chunkbuffer(bundler.getchunks())
439 b = bundle2.getunbundler(self.ui, stream)
439 b = bundle2.getunbundler(self.ui, stream)
440 bundle2.processbundle(self._repo, b)
440 bundle2.processbundle(self._repo, b)
441 raise
441 raise
442 except error.PushRaced as exc:
442 except error.PushRaced as exc:
443 raise error.ResponseError(
443 raise error.ResponseError(
444 _(b'push failed:'), stringutil.forcebytestr(exc)
444 _(b'push failed:'), stringutil.forcebytestr(exc)
445 )
445 )
446
446
447 # End of _basewirecommands interface.
447 # End of _basewirecommands interface.
448
448
449 # Begin of peer interface.
449 # Begin of peer interface.
450
450
451 def commandexecutor(self):
451 def commandexecutor(self):
452 return localcommandexecutor(self)
452 return localcommandexecutor(self)
453
453
454 # End of peer interface.
454 # End of peer interface.
455
455
456
456
457 @interfaceutil.implementer(repository.ipeerlegacycommands)
457 @interfaceutil.implementer(repository.ipeerlegacycommands)
458 class locallegacypeer(localpeer):
458 class locallegacypeer(localpeer):
459 """peer extension which implements legacy methods too; used for tests with
459 """peer extension which implements legacy methods too; used for tests with
460 restricted capabilities"""
460 restricted capabilities"""
461
461
462 def __init__(self, repo, path=None, remotehidden=False):
462 def __init__(self, repo, path=None, remotehidden=False):
463 super(locallegacypeer, self).__init__(
463 super(locallegacypeer, self).__init__(
464 repo, caps=legacycaps, path=path, remotehidden=remotehidden
464 repo, caps=legacycaps, path=path, remotehidden=remotehidden
465 )
465 )
466
466
467 # Begin of baselegacywirecommands interface.
467 # Begin of baselegacywirecommands interface.
468
468
469 def between(self, pairs):
469 def between(self, pairs):
470 return self._repo.between(pairs)
470 return self._repo.between(pairs)
471
471
472 def branches(self, nodes):
472 def branches(self, nodes):
473 return self._repo.branches(nodes)
473 return self._repo.branches(nodes)
474
474
475 def changegroup(self, nodes, source):
475 def changegroup(self, nodes, source):
476 outgoing = discovery.outgoing(
476 outgoing = discovery.outgoing(
477 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
477 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
478 )
478 )
479 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
479 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
480
480
481 def changegroupsubset(self, bases, heads, source):
481 def changegroupsubset(self, bases, heads, source):
482 outgoing = discovery.outgoing(
482 outgoing = discovery.outgoing(
483 self._repo, missingroots=bases, ancestorsof=heads
483 self._repo, missingroots=bases, ancestorsof=heads
484 )
484 )
485 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
485 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
486
486
487 # End of baselegacywirecommands interface.
487 # End of baselegacywirecommands interface.
488
488
489
489
490 # Functions receiving (ui, features) that extensions can register to impact
490 # Functions receiving (ui, features) that extensions can register to impact
491 # the ability to load repositories with custom requirements. Only
491 # the ability to load repositories with custom requirements. Only
492 # functions defined in loaded extensions are called.
492 # functions defined in loaded extensions are called.
493 #
493 #
494 # The function receives a set of requirement strings that the repository
494 # The function receives a set of requirement strings that the repository
495 # is capable of opening. Functions will typically add elements to the
495 # is capable of opening. Functions will typically add elements to the
496 # set to reflect that the extension knows how to handle that requirements.
496 # set to reflect that the extension knows how to handle that requirements.
497 featuresetupfuncs = set()
497 featuresetupfuncs = set()
498
498
499
499
500 def _getsharedvfs(hgvfs, requirements):
500 def _getsharedvfs(hgvfs, requirements):
501 """returns the vfs object pointing to root of shared source
501 """returns the vfs object pointing to root of shared source
502 repo for a shared repository
502 repo for a shared repository
503
503
504 hgvfs is vfs pointing at .hg/ of current repo (shared one)
504 hgvfs is vfs pointing at .hg/ of current repo (shared one)
505 requirements is a set of requirements of current repo (shared one)
505 requirements is a set of requirements of current repo (shared one)
506 """
506 """
507 # The ``shared`` or ``relshared`` requirements indicate the
507 # The ``shared`` or ``relshared`` requirements indicate the
508 # store lives in the path contained in the ``.hg/sharedpath`` file.
508 # store lives in the path contained in the ``.hg/sharedpath`` file.
509 # This is an absolute path for ``shared`` and relative to
509 # This is an absolute path for ``shared`` and relative to
510 # ``.hg/`` for ``relshared``.
510 # ``.hg/`` for ``relshared``.
511 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
511 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
512 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
512 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
513 sharedpath = util.normpath(hgvfs.join(sharedpath))
513 sharedpath = util.normpath(hgvfs.join(sharedpath))
514
514
515 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
515 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
516
516
517 if not sharedvfs.exists():
517 if not sharedvfs.exists():
518 raise error.RepoError(
518 raise error.RepoError(
519 _(b'.hg/sharedpath points to nonexistent directory %s')
519 _(b'.hg/sharedpath points to nonexistent directory %s')
520 % sharedvfs.base
520 % sharedvfs.base
521 )
521 )
522 return sharedvfs
522 return sharedvfs
523
523
524
524
525 def _readrequires(vfs, allowmissing):
525 def _readrequires(vfs, allowmissing):
526 """reads the require file present at root of this vfs
526 """reads the require file present at root of this vfs
527 and return a set of requirements
527 and return a set of requirements
528
528
529 If allowmissing is True, we suppress FileNotFoundError if raised"""
529 If allowmissing is True, we suppress FileNotFoundError if raised"""
530 # requires file contains a newline-delimited list of
530 # requires file contains a newline-delimited list of
531 # features/capabilities the opener (us) must have in order to use
531 # features/capabilities the opener (us) must have in order to use
532 # the repository. This file was introduced in Mercurial 0.9.2,
532 # the repository. This file was introduced in Mercurial 0.9.2,
533 # which means very old repositories may not have one. We assume
533 # which means very old repositories may not have one. We assume
534 # a missing file translates to no requirements.
534 # a missing file translates to no requirements.
535 read = vfs.tryread if allowmissing else vfs.read
535 read = vfs.tryread if allowmissing else vfs.read
536 return set(read(b'requires').splitlines())
536 return set(read(b'requires').splitlines())
537
537
538
538
539 def makelocalrepository(baseui, path: bytes, intents=None):
539 def makelocalrepository(baseui, path: bytes, intents=None):
540 """Create a local repository object.
540 """Create a local repository object.
541
541
542 Given arguments needed to construct a local repository, this function
542 Given arguments needed to construct a local repository, this function
543 performs various early repository loading functionality (such as
543 performs various early repository loading functionality (such as
544 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
544 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
545 the repository can be opened, derives a type suitable for representing
545 the repository can be opened, derives a type suitable for representing
546 that repository, and returns an instance of it.
546 that repository, and returns an instance of it.
547
547
548 The returned object conforms to the ``repository.completelocalrepository``
548 The returned object conforms to the ``repository.completelocalrepository``
549 interface.
549 interface.
550
550
551 The repository type is derived by calling a series of factory functions
551 The repository type is derived by calling a series of factory functions
552 for each aspect/interface of the final repository. These are defined by
552 for each aspect/interface of the final repository. These are defined by
553 ``REPO_INTERFACES``.
553 ``REPO_INTERFACES``.
554
554
555 Each factory function is called to produce a type implementing a specific
555 Each factory function is called to produce a type implementing a specific
556 interface. The cumulative list of returned types will be combined into a
556 interface. The cumulative list of returned types will be combined into a
557 new type and that type will be instantiated to represent the local
557 new type and that type will be instantiated to represent the local
558 repository.
558 repository.
559
559
560 The factory functions each receive various state that may be consulted
560 The factory functions each receive various state that may be consulted
561 as part of deriving a type.
561 as part of deriving a type.
562
562
563 Extensions should wrap these factory functions to customize repository type
563 Extensions should wrap these factory functions to customize repository type
564 creation. Note that an extension's wrapped function may be called even if
564 creation. Note that an extension's wrapped function may be called even if
565 that extension is not loaded for the repo being constructed. Extensions
565 that extension is not loaded for the repo being constructed. Extensions
566 should check if their ``__name__`` appears in the
566 should check if their ``__name__`` appears in the
567 ``extensionmodulenames`` set passed to the factory function and no-op if
567 ``extensionmodulenames`` set passed to the factory function and no-op if
568 not.
568 not.
569 """
569 """
570 ui = baseui.copy()
570 ui = baseui.copy()
571 # Prevent copying repo configuration.
571 # Prevent copying repo configuration.
572 ui.copy = baseui.copy
572 ui.copy = baseui.copy
573
573
574 # Working directory VFS rooted at repository root.
574 # Working directory VFS rooted at repository root.
575 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
575 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
576
576
577 # Main VFS for .hg/ directory.
577 # Main VFS for .hg/ directory.
578 hgpath = wdirvfs.join(b'.hg')
578 hgpath = wdirvfs.join(b'.hg')
579 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
579 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
580 # Whether this repository is shared one or not
580 # Whether this repository is shared one or not
581 shared = False
581 shared = False
582 # If this repository is shared, vfs pointing to shared repo
582 # If this repository is shared, vfs pointing to shared repo
583 sharedvfs = None
583 sharedvfs = None
584
584
585 # The .hg/ path should exist and should be a directory. All other
585 # The .hg/ path should exist and should be a directory. All other
586 # cases are errors.
586 # cases are errors.
587 if not hgvfs.isdir():
587 if not hgvfs.isdir():
588 try:
588 try:
589 hgvfs.stat()
589 hgvfs.stat()
590 except FileNotFoundError:
590 except FileNotFoundError:
591 pass
591 pass
592 except ValueError as e:
592 except ValueError as e:
593 # Can be raised on Python 3.8 when path is invalid.
593 # Can be raised on Python 3.8 when path is invalid.
594 raise error.Abort(
594 raise error.Abort(
595 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
595 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
596 )
596 )
597
597
598 raise error.RepoError(_(b'repository %s not found') % path)
598 raise error.RepoError(_(b'repository %s not found') % path)
599
599
600 requirements = _readrequires(hgvfs, True)
600 requirements = _readrequires(hgvfs, True)
601 shared = (
601 shared = (
602 requirementsmod.SHARED_REQUIREMENT in requirements
602 requirementsmod.SHARED_REQUIREMENT in requirements
603 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
603 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
604 )
604 )
605 storevfs = None
605 storevfs = None
606 if shared:
606 if shared:
607 # This is a shared repo
607 # This is a shared repo
608 sharedvfs = _getsharedvfs(hgvfs, requirements)
608 sharedvfs = _getsharedvfs(hgvfs, requirements)
609 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
609 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
610 else:
610 else:
611 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
611 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
612
612
613 # if .hg/requires contains the sharesafe requirement, it means
613 # if .hg/requires contains the sharesafe requirement, it means
614 # there exists a `.hg/store/requires` too and we should read it
614 # there exists a `.hg/store/requires` too and we should read it
615 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
615 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
616 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
616 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
617 # is not present, refer checkrequirementscompat() for that
617 # is not present, refer checkrequirementscompat() for that
618 #
618 #
619 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
619 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
620 # repository was shared the old way. We check the share source .hg/requires
620 # repository was shared the old way. We check the share source .hg/requires
621 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
621 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
622 # to be reshared
622 # to be reshared
623 hint = _(b"see `hg help config.format.use-share-safe` for more information")
623 hint = _(b"see `hg help config.format.use-share-safe` for more information")
624 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
624 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
625 if (
625 if (
626 shared
626 shared
627 and requirementsmod.SHARESAFE_REQUIREMENT
627 and requirementsmod.SHARESAFE_REQUIREMENT
628 not in _readrequires(sharedvfs, True)
628 not in _readrequires(sharedvfs, True)
629 ):
629 ):
630 mismatch_warn = ui.configbool(
630 mismatch_warn = ui.configbool(
631 b'share', b'safe-mismatch.source-not-safe.warn'
631 b'share', b'safe-mismatch.source-not-safe.warn'
632 )
632 )
633 mismatch_config = ui.config(
633 mismatch_config = ui.config(
634 b'share', b'safe-mismatch.source-not-safe'
634 b'share', b'safe-mismatch.source-not-safe'
635 )
635 )
636 mismatch_verbose_upgrade = ui.configbool(
636 mismatch_verbose_upgrade = ui.configbool(
637 b'share', b'safe-mismatch.source-not-safe:verbose-upgrade'
637 b'share', b'safe-mismatch.source-not-safe:verbose-upgrade'
638 )
638 )
639 if mismatch_config in (
639 if mismatch_config in (
640 b'downgrade-allow',
640 b'downgrade-allow',
641 b'allow',
641 b'allow',
642 b'downgrade-abort',
642 b'downgrade-abort',
643 ):
643 ):
644 # prevent cyclic import localrepo -> upgrade -> localrepo
644 # prevent cyclic import localrepo -> upgrade -> localrepo
645 from . import upgrade
645 from . import upgrade
646
646
647 upgrade.downgrade_share_to_non_safe(
647 upgrade.downgrade_share_to_non_safe(
648 ui,
648 ui,
649 hgvfs,
649 hgvfs,
650 sharedvfs,
650 sharedvfs,
651 requirements,
651 requirements,
652 mismatch_config,
652 mismatch_config,
653 mismatch_warn,
653 mismatch_warn,
654 mismatch_verbose_upgrade,
654 mismatch_verbose_upgrade,
655 )
655 )
656 elif mismatch_config == b'abort':
656 elif mismatch_config == b'abort':
657 raise error.Abort(
657 raise error.Abort(
658 _(b"share source does not support share-safe requirement"),
658 _(b"share source does not support share-safe requirement"),
659 hint=hint,
659 hint=hint,
660 )
660 )
661 else:
661 else:
662 raise error.Abort(
662 raise error.Abort(
663 _(
663 _(
664 b"share-safe mismatch with source.\nUnrecognized"
664 b"share-safe mismatch with source.\nUnrecognized"
665 b" value '%s' of `share.safe-mismatch.source-not-safe`"
665 b" value '%s' of `share.safe-mismatch.source-not-safe`"
666 b" set."
666 b" set."
667 )
667 )
668 % mismatch_config,
668 % mismatch_config,
669 hint=hint,
669 hint=hint,
670 )
670 )
671 else:
671 else:
672 requirements |= _readrequires(storevfs, False)
672 requirements |= _readrequires(storevfs, False)
673 elif shared:
673 elif shared:
674 sourcerequires = _readrequires(sharedvfs, False)
674 sourcerequires = _readrequires(sharedvfs, False)
675 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
675 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
676 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
676 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
677 mismatch_warn = ui.configbool(
677 mismatch_warn = ui.configbool(
678 b'share', b'safe-mismatch.source-safe.warn'
678 b'share', b'safe-mismatch.source-safe.warn'
679 )
679 )
680 mismatch_verbose_upgrade = ui.configbool(
680 mismatch_verbose_upgrade = ui.configbool(
681 b'share', b'safe-mismatch.source-safe:verbose-upgrade'
681 b'share', b'safe-mismatch.source-safe:verbose-upgrade'
682 )
682 )
683 if mismatch_config in (
683 if mismatch_config in (
684 b'upgrade-allow',
684 b'upgrade-allow',
685 b'allow',
685 b'allow',
686 b'upgrade-abort',
686 b'upgrade-abort',
687 ):
687 ):
688 # prevent cyclic import localrepo -> upgrade -> localrepo
688 # prevent cyclic import localrepo -> upgrade -> localrepo
689 from . import upgrade
689 from . import upgrade
690
690
691 upgrade.upgrade_share_to_safe(
691 upgrade.upgrade_share_to_safe(
692 ui,
692 ui,
693 hgvfs,
693 hgvfs,
694 storevfs,
694 storevfs,
695 requirements,
695 requirements,
696 mismatch_config,
696 mismatch_config,
697 mismatch_warn,
697 mismatch_warn,
698 mismatch_verbose_upgrade,
698 mismatch_verbose_upgrade,
699 )
699 )
700 elif mismatch_config == b'abort':
700 elif mismatch_config == b'abort':
701 raise error.Abort(
701 raise error.Abort(
702 _(
702 _(
703 b'version mismatch: source uses share-safe'
703 b'version mismatch: source uses share-safe'
704 b' functionality while the current share does not'
704 b' functionality while the current share does not'
705 ),
705 ),
706 hint=hint,
706 hint=hint,
707 )
707 )
708 else:
708 else:
709 raise error.Abort(
709 raise error.Abort(
710 _(
710 _(
711 b"share-safe mismatch with source.\nUnrecognized"
711 b"share-safe mismatch with source.\nUnrecognized"
712 b" value '%s' of `share.safe-mismatch.source-safe` set."
712 b" value '%s' of `share.safe-mismatch.source-safe` set."
713 )
713 )
714 % mismatch_config,
714 % mismatch_config,
715 hint=hint,
715 hint=hint,
716 )
716 )
717
717
718 # The .hg/hgrc file may load extensions or contain config options
718 # The .hg/hgrc file may load extensions or contain config options
719 # that influence repository construction. Attempt to load it and
719 # that influence repository construction. Attempt to load it and
720 # process any new extensions that it may have pulled in.
720 # process any new extensions that it may have pulled in.
721 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
721 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
722 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
722 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
723 extensions.loadall(ui)
723 extensions.loadall(ui)
724 extensions.populateui(ui)
724 extensions.populateui(ui)
725
725
726 # Set of module names of extensions loaded for this repository.
726 # Set of module names of extensions loaded for this repository.
727 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
727 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
728
728
729 supportedrequirements = gathersupportedrequirements(ui)
729 supportedrequirements = gathersupportedrequirements(ui)
730
730
731 # We first validate the requirements are known.
731 # We first validate the requirements are known.
732 ensurerequirementsrecognized(requirements, supportedrequirements)
732 ensurerequirementsrecognized(requirements, supportedrequirements)
733
733
734 # Then we validate that the known set is reasonable to use together.
734 # Then we validate that the known set is reasonable to use together.
735 ensurerequirementscompatible(ui, requirements)
735 ensurerequirementscompatible(ui, requirements)
736
736
737 # TODO there are unhandled edge cases related to opening repositories with
737 # TODO there are unhandled edge cases related to opening repositories with
738 # shared storage. If storage is shared, we should also test for requirements
738 # shared storage. If storage is shared, we should also test for requirements
739 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
739 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
740 # that repo, as that repo may load extensions needed to open it. This is a
740 # that repo, as that repo may load extensions needed to open it. This is a
741 # bit complicated because we don't want the other hgrc to overwrite settings
741 # bit complicated because we don't want the other hgrc to overwrite settings
742 # in this hgrc.
742 # in this hgrc.
743 #
743 #
744 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
744 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
745 # file when sharing repos. But if a requirement is added after the share is
745 # file when sharing repos. But if a requirement is added after the share is
746 # performed, thereby introducing a new requirement for the opener, we may
746 # performed, thereby introducing a new requirement for the opener, we may
747 # will not see that and could encounter a run-time error interacting with
747 # will not see that and could encounter a run-time error interacting with
748 # that shared store since it has an unknown-to-us requirement.
748 # that shared store since it has an unknown-to-us requirement.
749
749
750 # At this point, we know we should be capable of opening the repository.
750 # At this point, we know we should be capable of opening the repository.
751 # Now get on with doing that.
751 # Now get on with doing that.
752
752
753 features = set()
753 features = set()
754
754
755 # The "store" part of the repository holds versioned data. How it is
755 # The "store" part of the repository holds versioned data. How it is
756 # accessed is determined by various requirements. If `shared` or
756 # accessed is determined by various requirements. If `shared` or
757 # `relshared` requirements are present, this indicates current repository
757 # `relshared` requirements are present, this indicates current repository
758 # is a share and store exists in path mentioned in `.hg/sharedpath`
758 # is a share and store exists in path mentioned in `.hg/sharedpath`
759 if shared:
759 if shared:
760 storebasepath = sharedvfs.base
760 storebasepath = sharedvfs.base
761 cachepath = sharedvfs.join(b'cache')
761 cachepath = sharedvfs.join(b'cache')
762 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
762 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
763 else:
763 else:
764 storebasepath = hgvfs.base
764 storebasepath = hgvfs.base
765 cachepath = hgvfs.join(b'cache')
765 cachepath = hgvfs.join(b'cache')
766 wcachepath = hgvfs.join(b'wcache')
766 wcachepath = hgvfs.join(b'wcache')
767
767
768 # The store has changed over time and the exact layout is dictated by
768 # The store has changed over time and the exact layout is dictated by
769 # requirements. The store interface abstracts differences across all
769 # requirements. The store interface abstracts differences across all
770 # of them.
770 # of them.
771 store = makestore(
771 store = makestore(
772 requirements,
772 requirements,
773 storebasepath,
773 storebasepath,
774 lambda base: vfsmod.vfs(base, cacheaudited=True),
774 lambda base: vfsmod.vfs(base, cacheaudited=True),
775 )
775 )
776 hgvfs.createmode = store.createmode
776 hgvfs.createmode = store.createmode
777
777
778 storevfs = store.vfs
778 storevfs = store.vfs
779 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
779 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
780
780
781 if (
781 if (
782 requirementsmod.REVLOGV2_REQUIREMENT in requirements
782 requirementsmod.REVLOGV2_REQUIREMENT in requirements
783 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
783 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
784 ):
784 ):
785 features.add(repository.REPO_FEATURE_SIDE_DATA)
785 features.add(repository.REPO_FEATURE_SIDE_DATA)
786 # the revlogv2 docket introduced race condition that we need to fix
786 # the revlogv2 docket introduced race condition that we need to fix
787 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
787 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
788
788
789 # The cache vfs is used to manage cache files.
789 # The cache vfs is used to manage cache files.
790 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
790 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
791 cachevfs.createmode = store.createmode
791 cachevfs.createmode = store.createmode
792 # The cache vfs is used to manage cache files related to the working copy
792 # The cache vfs is used to manage cache files related to the working copy
793 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
793 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
794 wcachevfs.createmode = store.createmode
794 wcachevfs.createmode = store.createmode
795
795
796 # Now resolve the type for the repository object. We do this by repeatedly
796 # Now resolve the type for the repository object. We do this by repeatedly
797 # calling a factory function to produces types for specific aspects of the
797 # calling a factory function to produces types for specific aspects of the
798 # repo's operation. The aggregate returned types are used as base classes
798 # repo's operation. The aggregate returned types are used as base classes
799 # for a dynamically-derived type, which will represent our new repository.
799 # for a dynamically-derived type, which will represent our new repository.
800
800
801 bases = []
801 bases = []
802 extrastate = {}
802 extrastate = {}
803
803
804 for iface, fn in REPO_INTERFACES:
804 for iface, fn in REPO_INTERFACES:
805 # We pass all potentially useful state to give extensions tons of
805 # We pass all potentially useful state to give extensions tons of
806 # flexibility.
806 # flexibility.
807 typ = fn()(
807 typ = fn()(
808 ui=ui,
808 ui=ui,
809 intents=intents,
809 intents=intents,
810 requirements=requirements,
810 requirements=requirements,
811 features=features,
811 features=features,
812 wdirvfs=wdirvfs,
812 wdirvfs=wdirvfs,
813 hgvfs=hgvfs,
813 hgvfs=hgvfs,
814 store=store,
814 store=store,
815 storevfs=storevfs,
815 storevfs=storevfs,
816 storeoptions=storevfs.options,
816 storeoptions=storevfs.options,
817 cachevfs=cachevfs,
817 cachevfs=cachevfs,
818 wcachevfs=wcachevfs,
818 wcachevfs=wcachevfs,
819 extensionmodulenames=extensionmodulenames,
819 extensionmodulenames=extensionmodulenames,
820 extrastate=extrastate,
820 extrastate=extrastate,
821 baseclasses=bases,
821 baseclasses=bases,
822 )
822 )
823
823
824 if not isinstance(typ, type):
824 if not isinstance(typ, type):
825 raise error.ProgrammingError(
825 raise error.ProgrammingError(
826 b'unable to construct type for %s' % iface
826 b'unable to construct type for %s' % iface
827 )
827 )
828
828
829 bases.append(typ)
829 bases.append(typ)
830
830
831 # type() allows you to use characters in type names that wouldn't be
831 # type() allows you to use characters in type names that wouldn't be
832 # recognized as Python symbols in source code. We abuse that to add
832 # recognized as Python symbols in source code. We abuse that to add
833 # rich information about our constructed repo.
833 # rich information about our constructed repo.
834 name = pycompat.sysstr(
834 name = pycompat.sysstr(
835 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
835 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
836 )
836 )
837
837
838 cls = type(name, tuple(bases), {})
838 cls = type(name, tuple(bases), {})
839
839
840 return cls(
840 return cls(
841 baseui=baseui,
841 baseui=baseui,
842 ui=ui,
842 ui=ui,
843 origroot=path,
843 origroot=path,
844 wdirvfs=wdirvfs,
844 wdirvfs=wdirvfs,
845 hgvfs=hgvfs,
845 hgvfs=hgvfs,
846 requirements=requirements,
846 requirements=requirements,
847 supportedrequirements=supportedrequirements,
847 supportedrequirements=supportedrequirements,
848 sharedpath=storebasepath,
848 sharedpath=storebasepath,
849 store=store,
849 store=store,
850 cachevfs=cachevfs,
850 cachevfs=cachevfs,
851 wcachevfs=wcachevfs,
851 wcachevfs=wcachevfs,
852 features=features,
852 features=features,
853 intents=intents,
853 intents=intents,
854 )
854 )
855
855
856
856
857 def loadhgrc(
857 def loadhgrc(
858 ui,
858 ui,
859 wdirvfs: vfsmod.vfs,
859 wdirvfs: vfsmod.vfs,
860 hgvfs: vfsmod.vfs,
860 hgvfs: vfsmod.vfs,
861 requirements,
861 requirements,
862 sharedvfs: Optional[vfsmod.vfs] = None,
862 sharedvfs: Optional[vfsmod.vfs] = None,
863 ):
863 ):
864 """Load hgrc files/content into a ui instance.
864 """Load hgrc files/content into a ui instance.
865
865
866 This is called during repository opening to load any additional
866 This is called during repository opening to load any additional
867 config files or settings relevant to the current repository.
867 config files or settings relevant to the current repository.
868
868
869 Returns a bool indicating whether any additional configs were loaded.
869 Returns a bool indicating whether any additional configs were loaded.
870
870
871 Extensions should monkeypatch this function to modify how per-repo
871 Extensions should monkeypatch this function to modify how per-repo
872 configs are loaded. For example, an extension may wish to pull in
872 configs are loaded. For example, an extension may wish to pull in
873 configs from alternate files or sources.
873 configs from alternate files or sources.
874
874
875 sharedvfs is vfs object pointing to source repo if the current one is a
875 sharedvfs is vfs object pointing to source repo if the current one is a
876 shared one
876 shared one
877 """
877 """
878 if not rcutil.use_repo_hgrc():
878 if not rcutil.use_repo_hgrc():
879 return False
879 return False
880
880
881 ret = False
881 ret = False
882 # first load config from shared source if we has to
882 # first load config from shared source if we has to
883 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
883 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
884 try:
884 try:
885 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
885 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
886 ret = True
886 ret = True
887 except IOError:
887 except IOError:
888 pass
888 pass
889
889
890 try:
890 try:
891 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
891 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
892 ret = True
892 ret = True
893 except IOError:
893 except IOError:
894 pass
894 pass
895
895
896 try:
896 try:
897 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
897 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
898 ret = True
898 ret = True
899 except IOError:
899 except IOError:
900 pass
900 pass
901
901
902 return ret
902 return ret
903
903
904
904
905 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
905 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
906 """Perform additional actions after .hg/hgrc is loaded.
906 """Perform additional actions after .hg/hgrc is loaded.
907
907
908 This function is called during repository loading immediately after
908 This function is called during repository loading immediately after
909 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
909 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
910
910
911 The function can be used to validate configs, automatically add
911 The function can be used to validate configs, automatically add
912 options (including extensions) based on requirements, etc.
912 options (including extensions) based on requirements, etc.
913 """
913 """
914
914
915 # Map of requirements to list of extensions to load automatically when
915 # Map of requirements to list of extensions to load automatically when
916 # requirement is present.
916 # requirement is present.
917 autoextensions = {
917 autoextensions = {
918 b'git': [b'git'],
918 b'git': [b'git'],
919 b'largefiles': [b'largefiles'],
919 b'largefiles': [b'largefiles'],
920 b'lfs': [b'lfs'],
920 b'lfs': [b'lfs'],
921 }
921 }
922
922
923 for requirement, names in sorted(autoextensions.items()):
923 for requirement, names in sorted(autoextensions.items()):
924 if requirement not in requirements:
924 if requirement not in requirements:
925 continue
925 continue
926
926
927 for name in names:
927 for name in names:
928 if not ui.hasconfig(b'extensions', name):
928 if not ui.hasconfig(b'extensions', name):
929 ui.setconfig(b'extensions', name, b'', source=b'autoload')
929 ui.setconfig(b'extensions', name, b'', source=b'autoload')
930
930
931
931
932 def gathersupportedrequirements(ui):
932 def gathersupportedrequirements(ui):
933 """Determine the complete set of recognized requirements."""
933 """Determine the complete set of recognized requirements."""
934 # Start with all requirements supported by this file.
934 # Start with all requirements supported by this file.
935 supported = set(localrepository._basesupported)
935 supported = set(localrepository._basesupported)
936
936
937 # Execute ``featuresetupfuncs`` entries if they belong to an extension
937 # Execute ``featuresetupfuncs`` entries if they belong to an extension
938 # relevant to this ui instance.
938 # relevant to this ui instance.
939 modules = {m.__name__ for n, m in extensions.extensions(ui)}
939 modules = {m.__name__ for n, m in extensions.extensions(ui)}
940
940
941 for fn in featuresetupfuncs:
941 for fn in featuresetupfuncs:
942 if fn.__module__ in modules:
942 if fn.__module__ in modules:
943 fn(ui, supported)
943 fn(ui, supported)
944
944
945 # Add derived requirements from registered compression engines.
945 # Add derived requirements from registered compression engines.
946 for name in util.compengines:
946 for name in util.compengines:
947 engine = util.compengines[name]
947 engine = util.compengines[name]
948 if engine.available() and engine.revlogheader():
948 if engine.available() and engine.revlogheader():
949 supported.add(b'exp-compression-%s' % name)
949 supported.add(b'exp-compression-%s' % name)
950 if engine.name() == b'zstd':
950 if engine.name() == b'zstd':
951 supported.add(requirementsmod.REVLOG_COMPRESSION_ZSTD)
951 supported.add(requirementsmod.REVLOG_COMPRESSION_ZSTD)
952
952
953 return supported
953 return supported
954
954
955
955
956 def ensurerequirementsrecognized(requirements, supported):
956 def ensurerequirementsrecognized(requirements, supported):
957 """Validate that a set of local requirements is recognized.
957 """Validate that a set of local requirements is recognized.
958
958
959 Receives a set of requirements. Raises an ``error.RepoError`` if there
959 Receives a set of requirements. Raises an ``error.RepoError`` if there
960 exists any requirement in that set that currently loaded code doesn't
960 exists any requirement in that set that currently loaded code doesn't
961 recognize.
961 recognize.
962
962
963 Returns a set of supported requirements.
963 Returns a set of supported requirements.
964 """
964 """
965 missing = set()
965 missing = set()
966
966
967 for requirement in requirements:
967 for requirement in requirements:
968 if requirement in supported:
968 if requirement in supported:
969 continue
969 continue
970
970
971 if not requirement or not requirement[0:1].isalnum():
971 if not requirement or not requirement[0:1].isalnum():
972 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
972 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
973
973
974 missing.add(requirement)
974 missing.add(requirement)
975
975
976 if missing:
976 if missing:
977 raise error.RequirementError(
977 raise error.RequirementError(
978 _(b'repository requires features unknown to this Mercurial: %s')
978 _(b'repository requires features unknown to this Mercurial: %s')
979 % b' '.join(sorted(missing)),
979 % b' '.join(sorted(missing)),
980 hint=_(
980 hint=_(
981 b'see https://mercurial-scm.org/wiki/MissingRequirement '
981 b'see https://mercurial-scm.org/wiki/MissingRequirement '
982 b'for more information'
982 b'for more information'
983 ),
983 ),
984 )
984 )
985
985
986
986
987 def ensurerequirementscompatible(ui, requirements):
987 def ensurerequirementscompatible(ui, requirements):
988 """Validates that a set of recognized requirements is mutually compatible.
988 """Validates that a set of recognized requirements is mutually compatible.
989
989
990 Some requirements may not be compatible with others or require
990 Some requirements may not be compatible with others or require
991 config options that aren't enabled. This function is called during
991 config options that aren't enabled. This function is called during
992 repository opening to ensure that the set of requirements needed
992 repository opening to ensure that the set of requirements needed
993 to open a repository is sane and compatible with config options.
993 to open a repository is sane and compatible with config options.
994
994
995 Extensions can monkeypatch this function to perform additional
995 Extensions can monkeypatch this function to perform additional
996 checking.
996 checking.
997
997
998 ``error.RepoError`` should be raised on failure.
998 ``error.RepoError`` should be raised on failure.
999 """
999 """
1000 if (
1000 if (
1001 requirementsmod.SPARSE_REQUIREMENT in requirements
1001 requirementsmod.SPARSE_REQUIREMENT in requirements
1002 and not sparse.enabled
1002 and not sparse.enabled
1003 ):
1003 ):
1004 raise error.RepoError(
1004 raise error.RepoError(
1005 _(
1005 _(
1006 b'repository is using sparse feature but '
1006 b'repository is using sparse feature but '
1007 b'sparse is not enabled; enable the '
1007 b'sparse is not enabled; enable the '
1008 b'"sparse" extensions to access'
1008 b'"sparse" extensions to access'
1009 )
1009 )
1010 )
1010 )
1011
1011
1012
1012
1013 def makestore(requirements, path, vfstype):
1013 def makestore(requirements, path, vfstype):
1014 """Construct a storage object for a repository."""
1014 """Construct a storage object for a repository."""
1015 if requirementsmod.STORE_REQUIREMENT in requirements:
1015 if requirementsmod.STORE_REQUIREMENT in requirements:
1016 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
1016 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
1017 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1017 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1018 return storemod.fncachestore(path, vfstype, dotencode)
1018 return storemod.fncachestore(path, vfstype, dotencode)
1019
1019
1020 return storemod.encodedstore(path, vfstype)
1020 return storemod.encodedstore(path, vfstype)
1021
1021
1022 return storemod.basicstore(path, vfstype)
1022 return storemod.basicstore(path, vfstype)
1023
1023
1024
1024
1025 def resolvestorevfsoptions(ui, requirements, features):
1025 def resolvestorevfsoptions(ui, requirements, features):
1026 """Resolve the options to pass to the store vfs opener.
1026 """Resolve the options to pass to the store vfs opener.
1027
1027
1028 The returned dict is used to influence behavior of the storage layer.
1028 The returned dict is used to influence behavior of the storage layer.
1029 """
1029 """
1030 options = {}
1030 options = {}
1031
1031
1032 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1032 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1033 options[b'treemanifest'] = True
1033 options[b'treemanifest'] = True
1034
1034
1035 # experimental config: format.manifestcachesize
1035 # experimental config: format.manifestcachesize
1036 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1036 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1037 if manifestcachesize is not None:
1037 if manifestcachesize is not None:
1038 options[b'manifestcachesize'] = manifestcachesize
1038 options[b'manifestcachesize'] = manifestcachesize
1039
1039
1040 # In the absence of another requirement superseding a revlog-related
1040 # In the absence of another requirement superseding a revlog-related
1041 # requirement, we have to assume the repo is using revlog version 0.
1041 # requirement, we have to assume the repo is using revlog version 0.
1042 # This revlog format is super old and we don't bother trying to parse
1042 # This revlog format is super old and we don't bother trying to parse
1043 # opener options for it because those options wouldn't do anything
1043 # opener options for it because those options wouldn't do anything
1044 # meaningful on such old repos.
1044 # meaningful on such old repos.
1045 if (
1045 if (
1046 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1046 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1047 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1047 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1048 ):
1048 ):
1049 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1049 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1050 else: # explicitly mark repo as using revlogv0
1050 else: # explicitly mark repo as using revlogv0
1051 options[b'revlogv0'] = True
1051 options[b'revlogv0'] = True
1052
1052
1053 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1053 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1054 options[b'copies-storage'] = b'changeset-sidedata'
1054 options[b'copies-storage'] = b'changeset-sidedata'
1055 else:
1055 else:
1056 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1056 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1057 copiesextramode = (b'changeset-only', b'compatibility')
1057 copiesextramode = (b'changeset-only', b'compatibility')
1058 if writecopiesto in copiesextramode:
1058 if writecopiesto in copiesextramode:
1059 options[b'copies-storage'] = b'extra'
1059 options[b'copies-storage'] = b'extra'
1060
1060
1061 return options
1061 return options
1062
1062
1063
1063
1064 def resolverevlogstorevfsoptions(ui, requirements, features):
1064 def resolverevlogstorevfsoptions(ui, requirements, features):
1065 """Resolve opener options specific to revlogs."""
1065 """Resolve opener options specific to revlogs."""
1066
1066
1067 options = {}
1067 options = {}
1068 options[b'flagprocessors'] = {}
1068 options[b'flagprocessors'] = {}
1069
1069
1070 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1070 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1071 options[b'revlogv1'] = True
1071 options[b'revlogv1'] = True
1072 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1072 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1073 options[b'revlogv2'] = True
1073 options[b'revlogv2'] = True
1074 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1074 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1075 options[b'changelogv2'] = True
1075 options[b'changelogv2'] = True
1076 cmp_rank = ui.configbool(b'experimental', b'changelog-v2.compute-rank')
1076 cmp_rank = ui.configbool(b'experimental', b'changelog-v2.compute-rank')
1077 options[b'changelogv2.compute-rank'] = cmp_rank
1077 options[b'changelogv2.compute-rank'] = cmp_rank
1078
1078
1079 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1079 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1080 options[b'generaldelta'] = True
1080 options[b'generaldelta'] = True
1081
1081
1082 # experimental config: format.chunkcachesize
1082 # experimental config: format.chunkcachesize
1083 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1083 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1084 if chunkcachesize is not None:
1084 if chunkcachesize is not None:
1085 options[b'chunkcachesize'] = chunkcachesize
1085 options[b'chunkcachesize'] = chunkcachesize
1086
1086
1087 deltabothparents = ui.configbool(
1087 deltabothparents = ui.configbool(
1088 b'storage', b'revlog.optimize-delta-parent-choice'
1088 b'storage', b'revlog.optimize-delta-parent-choice'
1089 )
1089 )
1090 options[b'deltabothparents'] = deltabothparents
1090 options[b'deltabothparents'] = deltabothparents
1091 dps_cgds = ui.configint(
1091 dps_cgds = ui.configint(
1092 b'storage',
1092 b'storage',
1093 b'revlog.delta-parent-search.candidate-group-chunk-size',
1093 b'revlog.delta-parent-search.candidate-group-chunk-size',
1094 )
1094 )
1095 options[b'delta-parent-search.candidate-group-chunk-size'] = dps_cgds
1095 options[b'delta-parent-search.candidate-group-chunk-size'] = dps_cgds
1096 options[b'debug-delta'] = ui.configbool(b'debug', b'revlog.debug-delta')
1096 options[b'debug-delta'] = ui.configbool(b'debug', b'revlog.debug-delta')
1097
1097
1098 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1098 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1099 options[b'issue6528.fix-incoming'] = issue6528
1099 options[b'issue6528.fix-incoming'] = issue6528
1100
1100
1101 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1101 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1102 lazydeltabase = False
1102 lazydeltabase = False
1103 if lazydelta:
1103 if lazydelta:
1104 lazydeltabase = ui.configbool(
1104 lazydeltabase = ui.configbool(
1105 b'storage', b'revlog.reuse-external-delta-parent'
1105 b'storage', b'revlog.reuse-external-delta-parent'
1106 )
1106 )
1107 if lazydeltabase is None:
1107 if lazydeltabase is None:
1108 lazydeltabase = not scmutil.gddeltaconfig(ui)
1108 lazydeltabase = not scmutil.gddeltaconfig(ui)
1109 options[b'lazydelta'] = lazydelta
1109 options[b'lazydelta'] = lazydelta
1110 options[b'lazydeltabase'] = lazydeltabase
1110 options[b'lazydeltabase'] = lazydeltabase
1111
1111
1112 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1112 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1113 if 0 <= chainspan:
1113 if 0 <= chainspan:
1114 options[b'maxdeltachainspan'] = chainspan
1114 options[b'maxdeltachainspan'] = chainspan
1115
1115
1116 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1116 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1117 if mmapindexthreshold is not None:
1117 if mmapindexthreshold is not None:
1118 options[b'mmapindexthreshold'] = mmapindexthreshold
1118 options[b'mmapindexthreshold'] = mmapindexthreshold
1119
1119
1120 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1120 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1121 srdensitythres = float(
1121 srdensitythres = float(
1122 ui.config(b'experimental', b'sparse-read.density-threshold')
1122 ui.config(b'experimental', b'sparse-read.density-threshold')
1123 )
1123 )
1124 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1124 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1125 options[b'with-sparse-read'] = withsparseread
1125 options[b'with-sparse-read'] = withsparseread
1126 options[b'sparse-read-density-threshold'] = srdensitythres
1126 options[b'sparse-read-density-threshold'] = srdensitythres
1127 options[b'sparse-read-min-gap-size'] = srmingapsize
1127 options[b'sparse-read-min-gap-size'] = srmingapsize
1128
1128
1129 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1129 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1130 options[b'sparse-revlog'] = sparserevlog
1130 options[b'sparse-revlog'] = sparserevlog
1131 if sparserevlog:
1131 if sparserevlog:
1132 options[b'generaldelta'] = True
1132 options[b'generaldelta'] = True
1133
1133
1134 maxchainlen = None
1134 maxchainlen = None
1135 if sparserevlog:
1135 if sparserevlog:
1136 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1136 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1137 # experimental config: format.maxchainlen
1137 # experimental config: format.maxchainlen
1138 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1138 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1139 if maxchainlen is not None:
1139 if maxchainlen is not None:
1140 options[b'maxchainlen'] = maxchainlen
1140 options[b'maxchainlen'] = maxchainlen
1141
1141
1142 for r in requirements:
1142 for r in requirements:
1143 # we allow multiple compression engine requirement to co-exist because
1143 # we allow multiple compression engine requirement to co-exist because
1144 # strickly speaking, revlog seems to support mixed compression style.
1144 # strickly speaking, revlog seems to support mixed compression style.
1145 #
1145 #
1146 # The compression used for new entries will be "the last one"
1146 # The compression used for new entries will be "the last one"
1147 prefix = r.startswith
1147 prefix = r.startswith
1148 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1148 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1149 options[b'compengine'] = r.split(b'-', 2)[2]
1149 options[b'compengine'] = r.split(b'-', 2)[2]
1150
1150
1151 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1151 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1152 if options[b'zlib.level'] is not None:
1152 if options[b'zlib.level'] is not None:
1153 if not (0 <= options[b'zlib.level'] <= 9):
1153 if not (0 <= options[b'zlib.level'] <= 9):
1154 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1154 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1155 raise error.Abort(msg % options[b'zlib.level'])
1155 raise error.Abort(msg % options[b'zlib.level'])
1156 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1156 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1157 if options[b'zstd.level'] is not None:
1157 if options[b'zstd.level'] is not None:
1158 if not (0 <= options[b'zstd.level'] <= 22):
1158 if not (0 <= options[b'zstd.level'] <= 22):
1159 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1159 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1160 raise error.Abort(msg % options[b'zstd.level'])
1160 raise error.Abort(msg % options[b'zstd.level'])
1161
1161
1162 if requirementsmod.NARROW_REQUIREMENT in requirements:
1162 if requirementsmod.NARROW_REQUIREMENT in requirements:
1163 options[b'enableellipsis'] = True
1163 options[b'enableellipsis'] = True
1164
1164
1165 if ui.configbool(b'experimental', b'rust.index'):
1165 if ui.configbool(b'experimental', b'rust.index'):
1166 options[b'rust.index'] = True
1166 options[b'rust.index'] = True
1167 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1167 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1168 slow_path = ui.config(
1168 slow_path = ui.config(
1169 b'storage', b'revlog.persistent-nodemap.slow-path'
1169 b'storage', b'revlog.persistent-nodemap.slow-path'
1170 )
1170 )
1171 if slow_path not in (b'allow', b'warn', b'abort'):
1171 if slow_path not in (b'allow', b'warn', b'abort'):
1172 default = ui.config_default(
1172 default = ui.config_default(
1173 b'storage', b'revlog.persistent-nodemap.slow-path'
1173 b'storage', b'revlog.persistent-nodemap.slow-path'
1174 )
1174 )
1175 msg = _(
1175 msg = _(
1176 b'unknown value for config '
1176 b'unknown value for config '
1177 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1177 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1178 )
1178 )
1179 ui.warn(msg % slow_path)
1179 ui.warn(msg % slow_path)
1180 if not ui.quiet:
1180 if not ui.quiet:
1181 ui.warn(_(b'falling back to default value: %s\n') % default)
1181 ui.warn(_(b'falling back to default value: %s\n') % default)
1182 slow_path = default
1182 slow_path = default
1183
1183
1184 msg = _(
1184 msg = _(
1185 b"accessing `persistent-nodemap` repository without associated "
1185 b"accessing `persistent-nodemap` repository without associated "
1186 b"fast implementation."
1186 b"fast implementation."
1187 )
1187 )
1188 hint = _(
1188 hint = _(
1189 b"check `hg help config.format.use-persistent-nodemap` "
1189 b"check `hg help config.format.use-persistent-nodemap` "
1190 b"for details"
1190 b"for details"
1191 )
1191 )
1192 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1192 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1193 if slow_path == b'warn':
1193 if slow_path == b'warn':
1194 msg = b"warning: " + msg + b'\n'
1194 msg = b"warning: " + msg + b'\n'
1195 ui.warn(msg)
1195 ui.warn(msg)
1196 if not ui.quiet:
1196 if not ui.quiet:
1197 hint = b'(' + hint + b')\n'
1197 hint = b'(' + hint + b')\n'
1198 ui.warn(hint)
1198 ui.warn(hint)
1199 if slow_path == b'abort':
1199 if slow_path == b'abort':
1200 raise error.Abort(msg, hint=hint)
1200 raise error.Abort(msg, hint=hint)
1201 options[b'persistent-nodemap'] = True
1201 options[b'persistent-nodemap'] = True
1202 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1202 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1203 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1203 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1204 if slow_path not in (b'allow', b'warn', b'abort'):
1204 if slow_path not in (b'allow', b'warn', b'abort'):
1205 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1205 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1206 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1206 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1207 ui.warn(msg % slow_path)
1207 ui.warn(msg % slow_path)
1208 if not ui.quiet:
1208 if not ui.quiet:
1209 ui.warn(_(b'falling back to default value: %s\n') % default)
1209 ui.warn(_(b'falling back to default value: %s\n') % default)
1210 slow_path = default
1210 slow_path = default
1211
1211
1212 msg = _(
1212 msg = _(
1213 b"accessing `dirstate-v2` repository without associated "
1213 b"accessing `dirstate-v2` repository without associated "
1214 b"fast implementation."
1214 b"fast implementation."
1215 )
1215 )
1216 hint = _(
1216 hint = _(
1217 b"check `hg help config.format.use-dirstate-v2` " b"for details"
1217 b"check `hg help config.format.use-dirstate-v2` " b"for details"
1218 )
1218 )
1219 if not dirstate.HAS_FAST_DIRSTATE_V2:
1219 if not dirstate.HAS_FAST_DIRSTATE_V2:
1220 if slow_path == b'warn':
1220 if slow_path == b'warn':
1221 msg = b"warning: " + msg + b'\n'
1221 msg = b"warning: " + msg + b'\n'
1222 ui.warn(msg)
1222 ui.warn(msg)
1223 if not ui.quiet:
1223 if not ui.quiet:
1224 hint = b'(' + hint + b')\n'
1224 hint = b'(' + hint + b')\n'
1225 ui.warn(hint)
1225 ui.warn(hint)
1226 if slow_path == b'abort':
1226 if slow_path == b'abort':
1227 raise error.Abort(msg, hint=hint)
1227 raise error.Abort(msg, hint=hint)
1228 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1228 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1229 options[b'persistent-nodemap.mmap'] = True
1229 options[b'persistent-nodemap.mmap'] = True
1230 if ui.configbool(b'devel', b'persistent-nodemap'):
1230 if ui.configbool(b'devel', b'persistent-nodemap'):
1231 options[b'devel-force-nodemap'] = True
1231 options[b'devel-force-nodemap'] = True
1232
1232
1233 return options
1233 return options
1234
1234
1235
1235
1236 def makemain(**kwargs):
1236 def makemain(**kwargs):
1237 """Produce a type conforming to ``ilocalrepositorymain``."""
1237 """Produce a type conforming to ``ilocalrepositorymain``."""
1238 return localrepository
1238 return localrepository
1239
1239
1240
1240
1241 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1241 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1242 class revlogfilestorage:
1242 class revlogfilestorage:
1243 """File storage when using revlogs."""
1243 """File storage when using revlogs."""
1244
1244
1245 def file(self, path):
1245 def file(self, path):
1246 if path.startswith(b'/'):
1246 if path.startswith(b'/'):
1247 path = path[1:]
1247 path = path[1:]
1248
1248
1249 try_split = (
1249 try_split = (
1250 self.currenttransaction() is not None
1250 self.currenttransaction() is not None
1251 or txnutil.mayhavepending(self.root)
1251 or txnutil.mayhavepending(self.root)
1252 )
1252 )
1253
1253
1254 return filelog.filelog(self.svfs, path, try_split=try_split)
1254 return filelog.filelog(self.svfs, path, try_split=try_split)
1255
1255
1256
1256
1257 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1257 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1258 class revlognarrowfilestorage:
1258 class revlognarrowfilestorage:
1259 """File storage when using revlogs and narrow files."""
1259 """File storage when using revlogs and narrow files."""
1260
1260
1261 def file(self, path):
1261 def file(self, path):
1262 if path.startswith(b'/'):
1262 if path.startswith(b'/'):
1263 path = path[1:]
1263 path = path[1:]
1264
1264
1265 try_split = (
1265 try_split = (
1266 self.currenttransaction() is not None
1266 self.currenttransaction() is not None
1267 or txnutil.mayhavepending(self.root)
1267 or txnutil.mayhavepending(self.root)
1268 )
1268 )
1269 return filelog.narrowfilelog(
1269 return filelog.narrowfilelog(
1270 self.svfs, path, self._storenarrowmatch, try_split=try_split
1270 self.svfs, path, self._storenarrowmatch, try_split=try_split
1271 )
1271 )
1272
1272
1273
1273
1274 def makefilestorage(requirements, features, **kwargs):
1274 def makefilestorage(requirements, features, **kwargs):
1275 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1275 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1276 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1276 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1277 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1277 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1278
1278
1279 if requirementsmod.NARROW_REQUIREMENT in requirements:
1279 if requirementsmod.NARROW_REQUIREMENT in requirements:
1280 return revlognarrowfilestorage
1280 return revlognarrowfilestorage
1281 else:
1281 else:
1282 return revlogfilestorage
1282 return revlogfilestorage
1283
1283
1284
1284
1285 # List of repository interfaces and factory functions for them. Each
1285 # List of repository interfaces and factory functions for them. Each
1286 # will be called in order during ``makelocalrepository()`` to iteratively
1286 # will be called in order during ``makelocalrepository()`` to iteratively
1287 # derive the final type for a local repository instance. We capture the
1287 # derive the final type for a local repository instance. We capture the
1288 # function as a lambda so we don't hold a reference and the module-level
1288 # function as a lambda so we don't hold a reference and the module-level
1289 # functions can be wrapped.
1289 # functions can be wrapped.
1290 REPO_INTERFACES = [
1290 REPO_INTERFACES = [
1291 (repository.ilocalrepositorymain, lambda: makemain),
1291 (repository.ilocalrepositorymain, lambda: makemain),
1292 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1292 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1293 ]
1293 ]
1294
1294
1295
1295
1296 @interfaceutil.implementer(repository.ilocalrepositorymain)
1296 @interfaceutil.implementer(repository.ilocalrepositorymain)
1297 class localrepository:
1297 class localrepository:
1298 """Main class for representing local repositories.
1298 """Main class for representing local repositories.
1299
1299
1300 All local repositories are instances of this class.
1300 All local repositories are instances of this class.
1301
1301
1302 Constructed on its own, instances of this class are not usable as
1302 Constructed on its own, instances of this class are not usable as
1303 repository objects. To obtain a usable repository object, call
1303 repository objects. To obtain a usable repository object, call
1304 ``hg.repository()``, ``localrepo.instance()``, or
1304 ``hg.repository()``, ``localrepo.instance()``, or
1305 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1305 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1306 ``instance()`` adds support for creating new repositories.
1306 ``instance()`` adds support for creating new repositories.
1307 ``hg.repository()`` adds more extension integration, including calling
1307 ``hg.repository()`` adds more extension integration, including calling
1308 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1308 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1309 used.
1309 used.
1310 """
1310 """
1311
1311
1312 _basesupported = {
1312 _basesupported = {
1313 requirementsmod.ARCHIVED_PHASE_REQUIREMENT,
1313 requirementsmod.ARCHIVED_PHASE_REQUIREMENT,
1314 requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT,
1314 requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT,
1315 requirementsmod.CHANGELOGV2_REQUIREMENT,
1315 requirementsmod.CHANGELOGV2_REQUIREMENT,
1316 requirementsmod.COPIESSDC_REQUIREMENT,
1316 requirementsmod.COPIESSDC_REQUIREMENT,
1317 requirementsmod.DIRSTATE_TRACKED_HINT_V1,
1317 requirementsmod.DIRSTATE_TRACKED_HINT_V1,
1318 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1318 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1319 requirementsmod.DOTENCODE_REQUIREMENT,
1319 requirementsmod.DOTENCODE_REQUIREMENT,
1320 requirementsmod.FNCACHE_REQUIREMENT,
1320 requirementsmod.FNCACHE_REQUIREMENT,
1321 requirementsmod.GENERALDELTA_REQUIREMENT,
1321 requirementsmod.GENERALDELTA_REQUIREMENT,
1322 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1322 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1323 requirementsmod.NODEMAP_REQUIREMENT,
1323 requirementsmod.NODEMAP_REQUIREMENT,
1324 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1324 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1325 requirementsmod.REVLOGV1_REQUIREMENT,
1325 requirementsmod.REVLOGV1_REQUIREMENT,
1326 requirementsmod.REVLOGV2_REQUIREMENT,
1326 requirementsmod.REVLOGV2_REQUIREMENT,
1327 requirementsmod.SHARED_REQUIREMENT,
1327 requirementsmod.SHARED_REQUIREMENT,
1328 requirementsmod.SHARESAFE_REQUIREMENT,
1328 requirementsmod.SHARESAFE_REQUIREMENT,
1329 requirementsmod.SPARSE_REQUIREMENT,
1329 requirementsmod.SPARSE_REQUIREMENT,
1330 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1330 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1331 requirementsmod.STORE_REQUIREMENT,
1331 requirementsmod.STORE_REQUIREMENT,
1332 requirementsmod.TREEMANIFEST_REQUIREMENT,
1332 requirementsmod.TREEMANIFEST_REQUIREMENT,
1333 }
1333 }
1334
1334
1335 # list of prefix for file which can be written without 'wlock'
1335 # list of prefix for file which can be written without 'wlock'
1336 # Extensions should extend this list when needed
1336 # Extensions should extend this list when needed
1337 _wlockfreeprefix = {
1337 _wlockfreeprefix = {
1338 # We migh consider requiring 'wlock' for the next
1338 # We migh consider requiring 'wlock' for the next
1339 # two, but pretty much all the existing code assume
1339 # two, but pretty much all the existing code assume
1340 # wlock is not needed so we keep them excluded for
1340 # wlock is not needed so we keep them excluded for
1341 # now.
1341 # now.
1342 b'hgrc',
1342 b'hgrc',
1343 b'requires',
1343 b'requires',
1344 # XXX cache is a complicatged business someone
1344 # XXX cache is a complicatged business someone
1345 # should investigate this in depth at some point
1345 # should investigate this in depth at some point
1346 b'cache/',
1346 b'cache/',
1347 # XXX bisect was still a bit too messy at the time
1347 # XXX bisect was still a bit too messy at the time
1348 # this changeset was introduced. Someone should fix
1348 # this changeset was introduced. Someone should fix
1349 # the remainig bit and drop this line
1349 # the remainig bit and drop this line
1350 b'bisect.state',
1350 b'bisect.state',
1351 }
1351 }
1352
1352
1353 def __init__(
1353 def __init__(
1354 self,
1354 self,
1355 baseui,
1355 baseui,
1356 ui,
1356 ui,
1357 origroot: bytes,
1357 origroot: bytes,
1358 wdirvfs: vfsmod.vfs,
1358 wdirvfs: vfsmod.vfs,
1359 hgvfs: vfsmod.vfs,
1359 hgvfs: vfsmod.vfs,
1360 requirements,
1360 requirements,
1361 supportedrequirements,
1361 supportedrequirements,
1362 sharedpath: bytes,
1362 sharedpath: bytes,
1363 store,
1363 store,
1364 cachevfs: vfsmod.vfs,
1364 cachevfs: vfsmod.vfs,
1365 wcachevfs: vfsmod.vfs,
1365 wcachevfs: vfsmod.vfs,
1366 features,
1366 features,
1367 intents=None,
1367 intents=None,
1368 ):
1368 ):
1369 """Create a new local repository instance.
1369 """Create a new local repository instance.
1370
1370
1371 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1371 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1372 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1372 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1373 object.
1373 object.
1374
1374
1375 Arguments:
1375 Arguments:
1376
1376
1377 baseui
1377 baseui
1378 ``ui.ui`` instance that ``ui`` argument was based off of.
1378 ``ui.ui`` instance that ``ui`` argument was based off of.
1379
1379
1380 ui
1380 ui
1381 ``ui.ui`` instance for use by the repository.
1381 ``ui.ui`` instance for use by the repository.
1382
1382
1383 origroot
1383 origroot
1384 ``bytes`` path to working directory root of this repository.
1384 ``bytes`` path to working directory root of this repository.
1385
1385
1386 wdirvfs
1386 wdirvfs
1387 ``vfs.vfs`` rooted at the working directory.
1387 ``vfs.vfs`` rooted at the working directory.
1388
1388
1389 hgvfs
1389 hgvfs
1390 ``vfs.vfs`` rooted at .hg/
1390 ``vfs.vfs`` rooted at .hg/
1391
1391
1392 requirements
1392 requirements
1393 ``set`` of bytestrings representing repository opening requirements.
1393 ``set`` of bytestrings representing repository opening requirements.
1394
1394
1395 supportedrequirements
1395 supportedrequirements
1396 ``set`` of bytestrings representing repository requirements that we
1396 ``set`` of bytestrings representing repository requirements that we
1397 know how to open. May be a supetset of ``requirements``.
1397 know how to open. May be a supetset of ``requirements``.
1398
1398
1399 sharedpath
1399 sharedpath
1400 ``bytes`` Defining path to storage base directory. Points to a
1400 ``bytes`` Defining path to storage base directory. Points to a
1401 ``.hg/`` directory somewhere.
1401 ``.hg/`` directory somewhere.
1402
1402
1403 store
1403 store
1404 ``store.basicstore`` (or derived) instance providing access to
1404 ``store.basicstore`` (or derived) instance providing access to
1405 versioned storage.
1405 versioned storage.
1406
1406
1407 cachevfs
1407 cachevfs
1408 ``vfs.vfs`` used for cache files.
1408 ``vfs.vfs`` used for cache files.
1409
1409
1410 wcachevfs
1410 wcachevfs
1411 ``vfs.vfs`` used for cache files related to the working copy.
1411 ``vfs.vfs`` used for cache files related to the working copy.
1412
1412
1413 features
1413 features
1414 ``set`` of bytestrings defining features/capabilities of this
1414 ``set`` of bytestrings defining features/capabilities of this
1415 instance.
1415 instance.
1416
1416
1417 intents
1417 intents
1418 ``set`` of system strings indicating what this repo will be used
1418 ``set`` of system strings indicating what this repo will be used
1419 for.
1419 for.
1420 """
1420 """
1421 self.baseui = baseui
1421 self.baseui = baseui
1422 self.ui = ui
1422 self.ui = ui
1423 self.origroot = origroot
1423 self.origroot = origroot
1424 # vfs rooted at working directory.
1424 # vfs rooted at working directory.
1425 self.wvfs = wdirvfs
1425 self.wvfs = wdirvfs
1426 self.root = wdirvfs.base
1426 self.root = wdirvfs.base
1427 # vfs rooted at .hg/. Used to access most non-store paths.
1427 # vfs rooted at .hg/. Used to access most non-store paths.
1428 self.vfs = hgvfs
1428 self.vfs = hgvfs
1429 self.path = hgvfs.base
1429 self.path = hgvfs.base
1430 self.requirements = requirements
1430 self.requirements = requirements
1431 self.nodeconstants = sha1nodeconstants
1431 self.nodeconstants = sha1nodeconstants
1432 self.nullid = self.nodeconstants.nullid
1432 self.nullid = self.nodeconstants.nullid
1433 self.supported = supportedrequirements
1433 self.supported = supportedrequirements
1434 self.sharedpath = sharedpath
1434 self.sharedpath = sharedpath
1435 self.store = store
1435 self.store = store
1436 self.cachevfs = cachevfs
1436 self.cachevfs = cachevfs
1437 self.wcachevfs = wcachevfs
1437 self.wcachevfs = wcachevfs
1438 self.features = features
1438 self.features = features
1439
1439
1440 self.filtername = None
1440 self.filtername = None
1441
1441
1442 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1442 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1443 b'devel', b'check-locks'
1443 b'devel', b'check-locks'
1444 ):
1444 ):
1445 self.vfs.audit = self._getvfsward(self.vfs.audit)
1445 self.vfs.audit = self._getvfsward(self.vfs.audit)
1446 # A list of callback to shape the phase if no data were found.
1446 # A list of callback to shape the phase if no data were found.
1447 # Callback are in the form: func(repo, roots) --> processed root.
1447 # Callback are in the form: func(repo, roots) --> processed root.
1448 # This list it to be filled by extension during repo setup
1448 # This list it to be filled by extension during repo setup
1449 self._phasedefaults = []
1449 self._phasedefaults = []
1450
1450
1451 color.setup(self.ui)
1451 color.setup(self.ui)
1452
1452
1453 self.spath = self.store.path
1453 self.spath = self.store.path
1454 self.svfs = self.store.vfs
1454 self.svfs = self.store.vfs
1455 self.sjoin = self.store.join
1455 self.sjoin = self.store.join
1456 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1456 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1457 b'devel', b'check-locks'
1457 b'devel', b'check-locks'
1458 ):
1458 ):
1459 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1459 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1460 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1460 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1461 else: # standard vfs
1461 else: # standard vfs
1462 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1462 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1463
1463
1464 self._dirstatevalidatewarned = False
1464 self._dirstatevalidatewarned = False
1465
1465
1466 self._branchcaches = branchmap.BranchMapCache()
1466 self._branchcaches = branchmap.BranchMapCache()
1467 self._revbranchcache = None
1467 self._revbranchcache = None
1468 self._filterpats = {}
1468 self._filterpats = {}
1469 self._datafilters = {}
1469 self._datafilters = {}
1470 self._transref = self._lockref = self._wlockref = None
1470 self._transref = self._lockref = self._wlockref = None
1471
1471
1472 # A cache for various files under .hg/ that tracks file changes,
1472 # A cache for various files under .hg/ that tracks file changes,
1473 # (used by the filecache decorator)
1473 # (used by the filecache decorator)
1474 #
1474 #
1475 # Maps a property name to its util.filecacheentry
1475 # Maps a property name to its util.filecacheentry
1476 self._filecache = {}
1476 self._filecache = {}
1477
1477
1478 # hold sets of revision to be filtered
1478 # hold sets of revision to be filtered
1479 # should be cleared when something might have changed the filter value:
1479 # should be cleared when something might have changed the filter value:
1480 # - new changesets,
1480 # - new changesets,
1481 # - phase change,
1481 # - phase change,
1482 # - new obsolescence marker,
1482 # - new obsolescence marker,
1483 # - working directory parent change,
1483 # - working directory parent change,
1484 # - bookmark changes
1484 # - bookmark changes
1485 self.filteredrevcache = {}
1485 self.filteredrevcache = {}
1486
1486
1487 self._dirstate = None
1487 self._dirstate = None
1488 # post-dirstate-status hooks
1488 # post-dirstate-status hooks
1489 self._postdsstatus = []
1489 self._postdsstatus = []
1490
1490
1491 self._pending_narrow_pats = None
1491 self._pending_narrow_pats = None
1492 self._pending_narrow_pats_dirstate = None
1492 self._pending_narrow_pats_dirstate = None
1493
1493
1494 # generic mapping between names and nodes
1494 # generic mapping between names and nodes
1495 self.names = namespaces.namespaces()
1495 self.names = namespaces.namespaces()
1496
1496
1497 # Key to signature value.
1497 # Key to signature value.
1498 self._sparsesignaturecache = {}
1498 self._sparsesignaturecache = {}
1499 # Signature to cached matcher instance.
1499 # Signature to cached matcher instance.
1500 self._sparsematchercache = {}
1500 self._sparsematchercache = {}
1501
1501
1502 self._extrafilterid = repoview.extrafilter(ui)
1502 self._extrafilterid = repoview.extrafilter(ui)
1503
1503
1504 self.filecopiesmode = None
1504 self.filecopiesmode = None
1505 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1505 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1506 self.filecopiesmode = b'changeset-sidedata'
1506 self.filecopiesmode = b'changeset-sidedata'
1507
1507
1508 self._wanted_sidedata = set()
1508 self._wanted_sidedata = set()
1509 self._sidedata_computers = {}
1509 self._sidedata_computers = {}
1510 sidedatamod.set_sidedata_spec_for_repo(self)
1510 sidedatamod.set_sidedata_spec_for_repo(self)
1511
1511
1512 def _getvfsward(self, origfunc):
1512 def _getvfsward(self, origfunc):
1513 """build a ward for self.vfs"""
1513 """build a ward for self.vfs"""
1514 rref = weakref.ref(self)
1514 rref = weakref.ref(self)
1515
1515
1516 def checkvfs(path, mode=None):
1516 def checkvfs(path, mode=None):
1517 ret = origfunc(path, mode=mode)
1517 ret = origfunc(path, mode=mode)
1518 repo = rref()
1518 repo = rref()
1519 if (
1519 if (
1520 repo is None
1520 repo is None
1521 or not util.safehasattr(repo, b'_wlockref')
1521 or not util.safehasattr(repo, b'_wlockref')
1522 or not util.safehasattr(repo, b'_lockref')
1522 or not util.safehasattr(repo, b'_lockref')
1523 ):
1523 ):
1524 return
1524 return
1525 if mode in (None, b'r', b'rb'):
1525 if mode in (None, b'r', b'rb'):
1526 return
1526 return
1527 if path.startswith(repo.path):
1527 if path.startswith(repo.path):
1528 # truncate name relative to the repository (.hg)
1528 # truncate name relative to the repository (.hg)
1529 path = path[len(repo.path) + 1 :]
1529 path = path[len(repo.path) + 1 :]
1530 if path.startswith(b'cache/'):
1530 if path.startswith(b'cache/'):
1531 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1531 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1532 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1532 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1533 # path prefixes covered by 'lock'
1533 # path prefixes covered by 'lock'
1534 vfs_path_prefixes = (
1534 vfs_path_prefixes = (
1535 b'journal.',
1535 b'journal.',
1536 b'undo.',
1536 b'undo.',
1537 b'strip-backup/',
1537 b'strip-backup/',
1538 b'cache/',
1538 b'cache/',
1539 )
1539 )
1540 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1540 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1541 if repo._currentlock(repo._lockref) is None:
1541 if repo._currentlock(repo._lockref) is None:
1542 repo.ui.develwarn(
1542 repo.ui.develwarn(
1543 b'write with no lock: "%s"' % path,
1543 b'write with no lock: "%s"' % path,
1544 stacklevel=3,
1544 stacklevel=3,
1545 config=b'check-locks',
1545 config=b'check-locks',
1546 )
1546 )
1547 elif repo._currentlock(repo._wlockref) is None:
1547 elif repo._currentlock(repo._wlockref) is None:
1548 # rest of vfs files are covered by 'wlock'
1548 # rest of vfs files are covered by 'wlock'
1549 #
1549 #
1550 # exclude special files
1550 # exclude special files
1551 for prefix in self._wlockfreeprefix:
1551 for prefix in self._wlockfreeprefix:
1552 if path.startswith(prefix):
1552 if path.startswith(prefix):
1553 return
1553 return
1554 repo.ui.develwarn(
1554 repo.ui.develwarn(
1555 b'write with no wlock: "%s"' % path,
1555 b'write with no wlock: "%s"' % path,
1556 stacklevel=3,
1556 stacklevel=3,
1557 config=b'check-locks',
1557 config=b'check-locks',
1558 )
1558 )
1559 return ret
1559 return ret
1560
1560
1561 return checkvfs
1561 return checkvfs
1562
1562
1563 def _getsvfsward(self, origfunc):
1563 def _getsvfsward(self, origfunc):
1564 """build a ward for self.svfs"""
1564 """build a ward for self.svfs"""
1565 rref = weakref.ref(self)
1565 rref = weakref.ref(self)
1566
1566
1567 def checksvfs(path, mode=None):
1567 def checksvfs(path, mode=None):
1568 ret = origfunc(path, mode=mode)
1568 ret = origfunc(path, mode=mode)
1569 repo = rref()
1569 repo = rref()
1570 if repo is None or not util.safehasattr(repo, b'_lockref'):
1570 if repo is None or not util.safehasattr(repo, b'_lockref'):
1571 return
1571 return
1572 if mode in (None, b'r', b'rb'):
1572 if mode in (None, b'r', b'rb'):
1573 return
1573 return
1574 if path.startswith(repo.sharedpath):
1574 if path.startswith(repo.sharedpath):
1575 # truncate name relative to the repository (.hg)
1575 # truncate name relative to the repository (.hg)
1576 path = path[len(repo.sharedpath) + 1 :]
1576 path = path[len(repo.sharedpath) + 1 :]
1577 if repo._currentlock(repo._lockref) is None:
1577 if repo._currentlock(repo._lockref) is None:
1578 repo.ui.develwarn(
1578 repo.ui.develwarn(
1579 b'write with no lock: "%s"' % path, stacklevel=4
1579 b'write with no lock: "%s"' % path, stacklevel=4
1580 )
1580 )
1581 return ret
1581 return ret
1582
1582
1583 return checksvfs
1583 return checksvfs
1584
1584
1585 @property
1585 @property
1586 def vfs_map(self):
1586 def vfs_map(self):
1587 return {
1587 return {
1588 b'': self.svfs,
1588 b'': self.svfs,
1589 b'plain': self.vfs,
1589 b'plain': self.vfs,
1590 b'store': self.svfs,
1590 b'store': self.svfs,
1591 }
1591 }
1592
1592
1593 def close(self):
1593 def close(self):
1594 self._writecaches()
1594 self._writecaches()
1595
1595
1596 def _writecaches(self):
1596 def _writecaches(self):
1597 if self._revbranchcache:
1597 if self._revbranchcache:
1598 self._revbranchcache.write()
1598 self._revbranchcache.write()
1599
1599
1600 def _restrictcapabilities(self, caps):
1600 def _restrictcapabilities(self, caps):
1601 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1601 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1602 caps = set(caps)
1602 caps = set(caps)
1603 capsblob = bundle2.encodecaps(
1603 capsblob = bundle2.encodecaps(
1604 bundle2.getrepocaps(self, role=b'client')
1604 bundle2.getrepocaps(self, role=b'client')
1605 )
1605 )
1606 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1606 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1607 if self.ui.configbool(b'experimental', b'narrow'):
1607 if self.ui.configbool(b'experimental', b'narrow'):
1608 caps.add(wireprototypes.NARROWCAP)
1608 caps.add(wireprototypes.NARROWCAP)
1609 return caps
1609 return caps
1610
1610
1611 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1611 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1612 # self -> auditor -> self._checknested -> self
1612 # self -> auditor -> self._checknested -> self
1613
1613
1614 @property
1614 @property
1615 def auditor(self):
1615 def auditor(self):
1616 # This is only used by context.workingctx.match in order to
1616 # This is only used by context.workingctx.match in order to
1617 # detect files in subrepos.
1617 # detect files in subrepos.
1618 return pathutil.pathauditor(self.root, callback=self._checknested)
1618 return pathutil.pathauditor(self.root, callback=self._checknested)
1619
1619
1620 @property
1620 @property
1621 def nofsauditor(self):
1621 def nofsauditor(self):
1622 # This is only used by context.basectx.match in order to detect
1622 # This is only used by context.basectx.match in order to detect
1623 # files in subrepos.
1623 # files in subrepos.
1624 return pathutil.pathauditor(
1624 return pathutil.pathauditor(
1625 self.root, callback=self._checknested, realfs=False, cached=True
1625 self.root, callback=self._checknested, realfs=False, cached=True
1626 )
1626 )
1627
1627
1628 def _checknested(self, path):
1628 def _checknested(self, path):
1629 """Determine if path is a legal nested repository."""
1629 """Determine if path is a legal nested repository."""
1630 if not path.startswith(self.root):
1630 if not path.startswith(self.root):
1631 return False
1631 return False
1632 subpath = path[len(self.root) + 1 :]
1632 subpath = path[len(self.root) + 1 :]
1633 normsubpath = util.pconvert(subpath)
1633 normsubpath = util.pconvert(subpath)
1634
1634
1635 # XXX: Checking against the current working copy is wrong in
1635 # XXX: Checking against the current working copy is wrong in
1636 # the sense that it can reject things like
1636 # the sense that it can reject things like
1637 #
1637 #
1638 # $ hg cat -r 10 sub/x.txt
1638 # $ hg cat -r 10 sub/x.txt
1639 #
1639 #
1640 # if sub/ is no longer a subrepository in the working copy
1640 # if sub/ is no longer a subrepository in the working copy
1641 # parent revision.
1641 # parent revision.
1642 #
1642 #
1643 # However, it can of course also allow things that would have
1643 # However, it can of course also allow things that would have
1644 # been rejected before, such as the above cat command if sub/
1644 # been rejected before, such as the above cat command if sub/
1645 # is a subrepository now, but was a normal directory before.
1645 # is a subrepository now, but was a normal directory before.
1646 # The old path auditor would have rejected by mistake since it
1646 # The old path auditor would have rejected by mistake since it
1647 # panics when it sees sub/.hg/.
1647 # panics when it sees sub/.hg/.
1648 #
1648 #
1649 # All in all, checking against the working copy seems sensible
1649 # All in all, checking against the working copy seems sensible
1650 # since we want to prevent access to nested repositories on
1650 # since we want to prevent access to nested repositories on
1651 # the filesystem *now*.
1651 # the filesystem *now*.
1652 ctx = self[None]
1652 ctx = self[None]
1653 parts = util.splitpath(subpath)
1653 parts = util.splitpath(subpath)
1654 while parts:
1654 while parts:
1655 prefix = b'/'.join(parts)
1655 prefix = b'/'.join(parts)
1656 if prefix in ctx.substate:
1656 if prefix in ctx.substate:
1657 if prefix == normsubpath:
1657 if prefix == normsubpath:
1658 return True
1658 return True
1659 else:
1659 else:
1660 sub = ctx.sub(prefix)
1660 sub = ctx.sub(prefix)
1661 return sub.checknested(subpath[len(prefix) + 1 :])
1661 return sub.checknested(subpath[len(prefix) + 1 :])
1662 else:
1662 else:
1663 parts.pop()
1663 parts.pop()
1664 return False
1664 return False
1665
1665
1666 def peer(self, path=None, remotehidden=False):
1666 def peer(self, path=None, remotehidden=False):
1667 return localpeer(
1667 return localpeer(
1668 self, path=path, remotehidden=remotehidden
1668 self, path=path, remotehidden=remotehidden
1669 ) # not cached to avoid reference cycle
1669 ) # not cached to avoid reference cycle
1670
1670
1671 def unfiltered(self):
1671 def unfiltered(self):
1672 """Return unfiltered version of the repository
1672 """Return unfiltered version of the repository
1673
1673
1674 Intended to be overwritten by filtered repo."""
1674 Intended to be overwritten by filtered repo."""
1675 return self
1675 return self
1676
1676
1677 def filtered(self, name, visibilityexceptions=None):
1677 def filtered(self, name, visibilityexceptions=None):
1678 """Return a filtered version of a repository
1678 """Return a filtered version of a repository
1679
1679
1680 The `name` parameter is the identifier of the requested view. This
1680 The `name` parameter is the identifier of the requested view. This
1681 will return a repoview object set "exactly" to the specified view.
1681 will return a repoview object set "exactly" to the specified view.
1682
1682
1683 This function does not apply recursive filtering to a repository. For
1683 This function does not apply recursive filtering to a repository. For
1684 example calling `repo.filtered("served")` will return a repoview using
1684 example calling `repo.filtered("served")` will return a repoview using
1685 the "served" view, regardless of the initial view used by `repo`.
1685 the "served" view, regardless of the initial view used by `repo`.
1686
1686
1687 In other word, there is always only one level of `repoview` "filtering".
1687 In other word, there is always only one level of `repoview` "filtering".
1688 """
1688 """
1689 if self._extrafilterid is not None and b'%' not in name:
1689 if self._extrafilterid is not None and b'%' not in name:
1690 name = name + b'%' + self._extrafilterid
1690 name = name + b'%' + self._extrafilterid
1691
1691
1692 cls = repoview.newtype(self.unfiltered().__class__)
1692 cls = repoview.newtype(self.unfiltered().__class__)
1693 return cls(self, name, visibilityexceptions)
1693 return cls(self, name, visibilityexceptions)
1694
1694
1695 @mixedrepostorecache(
1695 @mixedrepostorecache(
1696 (b'bookmarks', b'plain'),
1696 (b'bookmarks', b'plain'),
1697 (b'bookmarks.current', b'plain'),
1697 (b'bookmarks.current', b'plain'),
1698 (b'bookmarks', b''),
1698 (b'bookmarks', b''),
1699 (b'00changelog.i', b''),
1699 (b'00changelog.i', b''),
1700 )
1700 )
1701 def _bookmarks(self):
1701 def _bookmarks(self):
1702 # Since the multiple files involved in the transaction cannot be
1702 # Since the multiple files involved in the transaction cannot be
1703 # written atomically (with current repository format), there is a race
1703 # written atomically (with current repository format), there is a race
1704 # condition here.
1704 # condition here.
1705 #
1705 #
1706 # 1) changelog content A is read
1706 # 1) changelog content A is read
1707 # 2) outside transaction update changelog to content B
1707 # 2) outside transaction update changelog to content B
1708 # 3) outside transaction update bookmark file referring to content B
1708 # 3) outside transaction update bookmark file referring to content B
1709 # 4) bookmarks file content is read and filtered against changelog-A
1709 # 4) bookmarks file content is read and filtered against changelog-A
1710 #
1710 #
1711 # When this happens, bookmarks against nodes missing from A are dropped.
1711 # When this happens, bookmarks against nodes missing from A are dropped.
1712 #
1712 #
1713 # Having this happening during read is not great, but it become worse
1713 # Having this happening during read is not great, but it become worse
1714 # when this happen during write because the bookmarks to the "unknown"
1714 # when this happen during write because the bookmarks to the "unknown"
1715 # nodes will be dropped for good. However, writes happen within locks.
1715 # nodes will be dropped for good. However, writes happen within locks.
1716 # This locking makes it possible to have a race free consistent read.
1716 # This locking makes it possible to have a race free consistent read.
1717 # For this purpose data read from disc before locking are
1717 # For this purpose data read from disc before locking are
1718 # "invalidated" right after the locks are taken. This invalidations are
1718 # "invalidated" right after the locks are taken. This invalidations are
1719 # "light", the `filecache` mechanism keep the data in memory and will
1719 # "light", the `filecache` mechanism keep the data in memory and will
1720 # reuse them if the underlying files did not changed. Not parsing the
1720 # reuse them if the underlying files did not changed. Not parsing the
1721 # same data multiple times helps performances.
1721 # same data multiple times helps performances.
1722 #
1722 #
1723 # Unfortunately in the case describe above, the files tracked by the
1723 # Unfortunately in the case describe above, the files tracked by the
1724 # bookmarks file cache might not have changed, but the in-memory
1724 # bookmarks file cache might not have changed, but the in-memory
1725 # content is still "wrong" because we used an older changelog content
1725 # content is still "wrong" because we used an older changelog content
1726 # to process the on-disk data. So after locking, the changelog would be
1726 # to process the on-disk data. So after locking, the changelog would be
1727 # refreshed but `_bookmarks` would be preserved.
1727 # refreshed but `_bookmarks` would be preserved.
1728 # Adding `00changelog.i` to the list of tracked file is not
1728 # Adding `00changelog.i` to the list of tracked file is not
1729 # enough, because at the time we build the content for `_bookmarks` in
1729 # enough, because at the time we build the content for `_bookmarks` in
1730 # (4), the changelog file has already diverged from the content used
1730 # (4), the changelog file has already diverged from the content used
1731 # for loading `changelog` in (1)
1731 # for loading `changelog` in (1)
1732 #
1732 #
1733 # To prevent the issue, we force the changelog to be explicitly
1733 # To prevent the issue, we force the changelog to be explicitly
1734 # reloaded while computing `_bookmarks`. The data race can still happen
1734 # reloaded while computing `_bookmarks`. The data race can still happen
1735 # without the lock (with a narrower window), but it would no longer go
1735 # without the lock (with a narrower window), but it would no longer go
1736 # undetected during the lock time refresh.
1736 # undetected during the lock time refresh.
1737 #
1737 #
1738 # The new schedule is as follow
1738 # The new schedule is as follow
1739 #
1739 #
1740 # 1) filecache logic detect that `_bookmarks` needs to be computed
1740 # 1) filecache logic detect that `_bookmarks` needs to be computed
1741 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1741 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1742 # 3) We force `changelog` filecache to be tested
1742 # 3) We force `changelog` filecache to be tested
1743 # 4) cachestat for `changelog` are captured (for changelog)
1743 # 4) cachestat for `changelog` are captured (for changelog)
1744 # 5) `_bookmarks` is computed and cached
1744 # 5) `_bookmarks` is computed and cached
1745 #
1745 #
1746 # The step in (3) ensure we have a changelog at least as recent as the
1746 # The step in (3) ensure we have a changelog at least as recent as the
1747 # cache stat computed in (1). As a result at locking time:
1747 # cache stat computed in (1). As a result at locking time:
1748 # * if the changelog did not changed since (1) -> we can reuse the data
1748 # * if the changelog did not changed since (1) -> we can reuse the data
1749 # * otherwise -> the bookmarks get refreshed.
1749 # * otherwise -> the bookmarks get refreshed.
1750 self._refreshchangelog()
1750 self._refreshchangelog()
1751 return bookmarks.bmstore(self)
1751 return bookmarks.bmstore(self)
1752
1752
1753 def _refreshchangelog(self):
1753 def _refreshchangelog(self):
1754 """make sure the in memory changelog match the on-disk one"""
1754 """make sure the in memory changelog match the on-disk one"""
1755 if 'changelog' in vars(self) and self.currenttransaction() is None:
1755 if 'changelog' in vars(self) and self.currenttransaction() is None:
1756 del self.changelog
1756 del self.changelog
1757
1757
1758 @property
1758 @property
1759 def _activebookmark(self):
1759 def _activebookmark(self):
1760 return self._bookmarks.active
1760 return self._bookmarks.active
1761
1761
1762 # _phasesets depend on changelog. what we need is to call
1762 # _phasesets depend on changelog. what we need is to call
1763 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1763 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1764 # can't be easily expressed in filecache mechanism.
1764 # can't be easily expressed in filecache mechanism.
1765 @storecache(b'phaseroots', b'00changelog.i')
1765 @storecache(b'phaseroots', b'00changelog.i')
1766 def _phasecache(self):
1766 def _phasecache(self):
1767 return phases.phasecache(self, self._phasedefaults)
1767 return phases.phasecache(self, self._phasedefaults)
1768
1768
1769 @storecache(b'obsstore')
1769 @storecache(b'obsstore')
1770 def obsstore(self):
1770 def obsstore(self):
1771 return obsolete.makestore(self.ui, self)
1771 return obsolete.makestore(self.ui, self)
1772
1772
1773 @changelogcache()
1773 @changelogcache()
1774 def changelog(repo):
1774 def changelog(repo):
1775 # load dirstate before changelog to avoid race see issue6303
1775 # load dirstate before changelog to avoid race see issue6303
1776 repo.dirstate.prefetch_parents()
1776 repo.dirstate.prefetch_parents()
1777 return repo.store.changelog(
1777 return repo.store.changelog(
1778 txnutil.mayhavepending(repo.root),
1778 txnutil.mayhavepending(repo.root),
1779 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1779 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1780 )
1780 )
1781
1781
1782 @manifestlogcache()
1782 @manifestlogcache()
1783 def manifestlog(self):
1783 def manifestlog(self):
1784 return self.store.manifestlog(self, self._storenarrowmatch)
1784 return self.store.manifestlog(self, self._storenarrowmatch)
1785
1785
1786 @unfilteredpropertycache
1786 @unfilteredpropertycache
1787 def dirstate(self):
1787 def dirstate(self):
1788 if self._dirstate is None:
1788 if self._dirstate is None:
1789 self._dirstate = self._makedirstate()
1789 self._dirstate = self._makedirstate()
1790 else:
1790 else:
1791 self._dirstate.refresh()
1791 self._dirstate.refresh()
1792 return self._dirstate
1792 return self._dirstate
1793
1793
1794 def _makedirstate(self):
1794 def _makedirstate(self):
1795 """Extension point for wrapping the dirstate per-repo."""
1795 """Extension point for wrapping the dirstate per-repo."""
1796 sparsematchfn = None
1796 sparsematchfn = None
1797 if sparse.use_sparse(self):
1797 if sparse.use_sparse(self):
1798 sparsematchfn = lambda: sparse.matcher(self)
1798 sparsematchfn = lambda: sparse.matcher(self)
1799 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1799 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1800 th = requirementsmod.DIRSTATE_TRACKED_HINT_V1
1800 th = requirementsmod.DIRSTATE_TRACKED_HINT_V1
1801 use_dirstate_v2 = v2_req in self.requirements
1801 use_dirstate_v2 = v2_req in self.requirements
1802 use_tracked_hint = th in self.requirements
1802 use_tracked_hint = th in self.requirements
1803
1803
1804 return dirstate.dirstate(
1804 return dirstate.dirstate(
1805 self.vfs,
1805 self.vfs,
1806 self.ui,
1806 self.ui,
1807 self.root,
1807 self.root,
1808 self._dirstatevalidate,
1808 self._dirstatevalidate,
1809 sparsematchfn,
1809 sparsematchfn,
1810 self.nodeconstants,
1810 self.nodeconstants,
1811 use_dirstate_v2,
1811 use_dirstate_v2,
1812 use_tracked_hint=use_tracked_hint,
1812 use_tracked_hint=use_tracked_hint,
1813 )
1813 )
1814
1814
1815 def _dirstatevalidate(self, node):
1815 def _dirstatevalidate(self, node):
1816 okay = True
1816 okay = True
1817 try:
1817 try:
1818 self.changelog.rev(node)
1818 self.changelog.rev(node)
1819 except error.LookupError:
1819 except error.LookupError:
1820 # If the parent are unknown it might just be because the changelog
1820 # If the parent are unknown it might just be because the changelog
1821 # in memory is lagging behind the dirstate in memory. So try to
1821 # in memory is lagging behind the dirstate in memory. So try to
1822 # refresh the changelog first.
1822 # refresh the changelog first.
1823 #
1823 #
1824 # We only do so if we don't hold the lock, if we do hold the lock
1824 # We only do so if we don't hold the lock, if we do hold the lock
1825 # the invalidation at that time should have taken care of this and
1825 # the invalidation at that time should have taken care of this and
1826 # something is very fishy.
1826 # something is very fishy.
1827 if self.currentlock() is None:
1827 if self.currentlock() is None:
1828 self.invalidate()
1828 self.invalidate()
1829 try:
1829 try:
1830 self.changelog.rev(node)
1830 self.changelog.rev(node)
1831 except error.LookupError:
1831 except error.LookupError:
1832 okay = False
1832 okay = False
1833 else:
1833 else:
1834 # XXX we should consider raising an error here.
1834 # XXX we should consider raising an error here.
1835 okay = False
1835 okay = False
1836 if okay:
1836 if okay:
1837 return node
1837 return node
1838 else:
1838 else:
1839 if not self._dirstatevalidatewarned:
1839 if not self._dirstatevalidatewarned:
1840 self._dirstatevalidatewarned = True
1840 self._dirstatevalidatewarned = True
1841 self.ui.warn(
1841 self.ui.warn(
1842 _(b"warning: ignoring unknown working parent %s!\n")
1842 _(b"warning: ignoring unknown working parent %s!\n")
1843 % short(node)
1843 % short(node)
1844 )
1844 )
1845 return self.nullid
1845 return self.nullid
1846
1846
1847 @storecache(narrowspec.FILENAME)
1847 @storecache(narrowspec.FILENAME)
1848 def narrowpats(self):
1848 def narrowpats(self):
1849 """matcher patterns for this repository's narrowspec
1849 """matcher patterns for this repository's narrowspec
1850
1850
1851 A tuple of (includes, excludes).
1851 A tuple of (includes, excludes).
1852 """
1852 """
1853 # the narrow management should probably move into its own object
1853 # the narrow management should probably move into its own object
1854 val = self._pending_narrow_pats
1854 val = self._pending_narrow_pats
1855 if val is None:
1855 if val is None:
1856 val = narrowspec.load(self)
1856 val = narrowspec.load(self)
1857 return val
1857 return val
1858
1858
1859 @storecache(narrowspec.FILENAME)
1859 @storecache(narrowspec.FILENAME)
1860 def _storenarrowmatch(self):
1860 def _storenarrowmatch(self):
1861 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1861 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1862 return matchmod.always()
1862 return matchmod.always()
1863 include, exclude = self.narrowpats
1863 include, exclude = self.narrowpats
1864 return narrowspec.match(self.root, include=include, exclude=exclude)
1864 return narrowspec.match(self.root, include=include, exclude=exclude)
1865
1865
1866 @storecache(narrowspec.FILENAME)
1866 @storecache(narrowspec.FILENAME)
1867 def _narrowmatch(self):
1867 def _narrowmatch(self):
1868 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1868 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1869 return matchmod.always()
1869 return matchmod.always()
1870 narrowspec.checkworkingcopynarrowspec(self)
1870 narrowspec.checkworkingcopynarrowspec(self)
1871 include, exclude = self.narrowpats
1871 include, exclude = self.narrowpats
1872 return narrowspec.match(self.root, include=include, exclude=exclude)
1872 return narrowspec.match(self.root, include=include, exclude=exclude)
1873
1873
1874 def narrowmatch(self, match=None, includeexact=False):
1874 def narrowmatch(self, match=None, includeexact=False):
1875 """matcher corresponding the the repo's narrowspec
1875 """matcher corresponding the the repo's narrowspec
1876
1876
1877 If `match` is given, then that will be intersected with the narrow
1877 If `match` is given, then that will be intersected with the narrow
1878 matcher.
1878 matcher.
1879
1879
1880 If `includeexact` is True, then any exact matches from `match` will
1880 If `includeexact` is True, then any exact matches from `match` will
1881 be included even if they're outside the narrowspec.
1881 be included even if they're outside the narrowspec.
1882 """
1882 """
1883 if match:
1883 if match:
1884 if includeexact and not self._narrowmatch.always():
1884 if includeexact and not self._narrowmatch.always():
1885 # do not exclude explicitly-specified paths so that they can
1885 # do not exclude explicitly-specified paths so that they can
1886 # be warned later on
1886 # be warned later on
1887 em = matchmod.exact(match.files())
1887 em = matchmod.exact(match.files())
1888 nm = matchmod.unionmatcher([self._narrowmatch, em])
1888 nm = matchmod.unionmatcher([self._narrowmatch, em])
1889 return matchmod.intersectmatchers(match, nm)
1889 return matchmod.intersectmatchers(match, nm)
1890 return matchmod.intersectmatchers(match, self._narrowmatch)
1890 return matchmod.intersectmatchers(match, self._narrowmatch)
1891 return self._narrowmatch
1891 return self._narrowmatch
1892
1892
1893 def setnarrowpats(self, newincludes, newexcludes):
1893 def setnarrowpats(self, newincludes, newexcludes):
1894 narrowspec.save(self, newincludes, newexcludes)
1894 narrowspec.save(self, newincludes, newexcludes)
1895 self.invalidate(clearfilecache=True)
1895 self.invalidate(clearfilecache=True)
1896
1896
1897 @unfilteredpropertycache
1897 @unfilteredpropertycache
1898 def _quick_access_changeid_null(self):
1898 def _quick_access_changeid_null(self):
1899 return {
1899 return {
1900 b'null': (nullrev, self.nodeconstants.nullid),
1900 b'null': (nullrev, self.nodeconstants.nullid),
1901 nullrev: (nullrev, self.nodeconstants.nullid),
1901 nullrev: (nullrev, self.nodeconstants.nullid),
1902 self.nullid: (nullrev, self.nullid),
1902 self.nullid: (nullrev, self.nullid),
1903 }
1903 }
1904
1904
1905 @unfilteredpropertycache
1905 @unfilteredpropertycache
1906 def _quick_access_changeid_wc(self):
1906 def _quick_access_changeid_wc(self):
1907 # also fast path access to the working copy parents
1907 # also fast path access to the working copy parents
1908 # however, only do it for filter that ensure wc is visible.
1908 # however, only do it for filter that ensure wc is visible.
1909 quick = self._quick_access_changeid_null.copy()
1909 quick = self._quick_access_changeid_null.copy()
1910 cl = self.unfiltered().changelog
1910 cl = self.unfiltered().changelog
1911 for node in self.dirstate.parents():
1911 for node in self.dirstate.parents():
1912 if node == self.nullid:
1912 if node == self.nullid:
1913 continue
1913 continue
1914 rev = cl.index.get_rev(node)
1914 rev = cl.index.get_rev(node)
1915 if rev is None:
1915 if rev is None:
1916 # unknown working copy parent case:
1916 # unknown working copy parent case:
1917 #
1917 #
1918 # skip the fast path and let higher code deal with it
1918 # skip the fast path and let higher code deal with it
1919 continue
1919 continue
1920 pair = (rev, node)
1920 pair = (rev, node)
1921 quick[rev] = pair
1921 quick[rev] = pair
1922 quick[node] = pair
1922 quick[node] = pair
1923 # also add the parents of the parents
1923 # also add the parents of the parents
1924 for r in cl.parentrevs(rev):
1924 for r in cl.parentrevs(rev):
1925 if r == nullrev:
1925 if r == nullrev:
1926 continue
1926 continue
1927 n = cl.node(r)
1927 n = cl.node(r)
1928 pair = (r, n)
1928 pair = (r, n)
1929 quick[r] = pair
1929 quick[r] = pair
1930 quick[n] = pair
1930 quick[n] = pair
1931 p1node = self.dirstate.p1()
1931 p1node = self.dirstate.p1()
1932 if p1node != self.nullid:
1932 if p1node != self.nullid:
1933 quick[b'.'] = quick[p1node]
1933 quick[b'.'] = quick[p1node]
1934 return quick
1934 return quick
1935
1935
1936 @unfilteredmethod
1936 @unfilteredmethod
1937 def _quick_access_changeid_invalidate(self):
1937 def _quick_access_changeid_invalidate(self):
1938 if '_quick_access_changeid_wc' in vars(self):
1938 if '_quick_access_changeid_wc' in vars(self):
1939 del self.__dict__['_quick_access_changeid_wc']
1939 del self.__dict__['_quick_access_changeid_wc']
1940
1940
1941 @property
1941 @property
1942 def _quick_access_changeid(self):
1942 def _quick_access_changeid(self):
1943 """an helper dictionnary for __getitem__ calls
1943 """an helper dictionnary for __getitem__ calls
1944
1944
1945 This contains a list of symbol we can recognise right away without
1945 This contains a list of symbol we can recognise right away without
1946 further processing.
1946 further processing.
1947 """
1947 """
1948 if self.filtername in repoview.filter_has_wc:
1948 if self.filtername in repoview.filter_has_wc:
1949 return self._quick_access_changeid_wc
1949 return self._quick_access_changeid_wc
1950 return self._quick_access_changeid_null
1950 return self._quick_access_changeid_null
1951
1951
1952 def __getitem__(self, changeid):
1952 def __getitem__(self, changeid):
1953 # dealing with special cases
1953 # dealing with special cases
1954 if changeid is None:
1954 if changeid is None:
1955 return context.workingctx(self)
1955 return context.workingctx(self)
1956 if isinstance(changeid, context.basectx):
1956 if isinstance(changeid, context.basectx):
1957 return changeid
1957 return changeid
1958
1958
1959 # dealing with multiple revisions
1959 # dealing with multiple revisions
1960 if isinstance(changeid, slice):
1960 if isinstance(changeid, slice):
1961 # wdirrev isn't contiguous so the slice shouldn't include it
1961 # wdirrev isn't contiguous so the slice shouldn't include it
1962 return [
1962 return [
1963 self[i]
1963 self[i]
1964 for i in range(*changeid.indices(len(self)))
1964 for i in range(*changeid.indices(len(self)))
1965 if i not in self.changelog.filteredrevs
1965 if i not in self.changelog.filteredrevs
1966 ]
1966 ]
1967
1967
1968 # dealing with some special values
1968 # dealing with some special values
1969 quick_access = self._quick_access_changeid.get(changeid)
1969 quick_access = self._quick_access_changeid.get(changeid)
1970 if quick_access is not None:
1970 if quick_access is not None:
1971 rev, node = quick_access
1971 rev, node = quick_access
1972 return context.changectx(self, rev, node, maybe_filtered=False)
1972 return context.changectx(self, rev, node, maybe_filtered=False)
1973 if changeid == b'tip':
1973 if changeid == b'tip':
1974 node = self.changelog.tip()
1974 node = self.changelog.tip()
1975 rev = self.changelog.rev(node)
1975 rev = self.changelog.rev(node)
1976 return context.changectx(self, rev, node)
1976 return context.changectx(self, rev, node)
1977
1977
1978 # dealing with arbitrary values
1978 # dealing with arbitrary values
1979 try:
1979 try:
1980 if isinstance(changeid, int):
1980 if isinstance(changeid, int):
1981 node = self.changelog.node(changeid)
1981 node = self.changelog.node(changeid)
1982 rev = changeid
1982 rev = changeid
1983 elif changeid == b'.':
1983 elif changeid == b'.':
1984 # this is a hack to delay/avoid loading obsmarkers
1984 # this is a hack to delay/avoid loading obsmarkers
1985 # when we know that '.' won't be hidden
1985 # when we know that '.' won't be hidden
1986 node = self.dirstate.p1()
1986 node = self.dirstate.p1()
1987 rev = self.unfiltered().changelog.rev(node)
1987 rev = self.unfiltered().changelog.rev(node)
1988 elif len(changeid) == self.nodeconstants.nodelen:
1988 elif len(changeid) == self.nodeconstants.nodelen:
1989 try:
1989 try:
1990 node = changeid
1990 node = changeid
1991 rev = self.changelog.rev(changeid)
1991 rev = self.changelog.rev(changeid)
1992 except error.FilteredLookupError:
1992 except error.FilteredLookupError:
1993 changeid = hex(changeid) # for the error message
1993 changeid = hex(changeid) # for the error message
1994 raise
1994 raise
1995 except LookupError:
1995 except LookupError:
1996 # check if it might have come from damaged dirstate
1996 # check if it might have come from damaged dirstate
1997 #
1997 #
1998 # XXX we could avoid the unfiltered if we had a recognizable
1998 # XXX we could avoid the unfiltered if we had a recognizable
1999 # exception for filtered changeset access
1999 # exception for filtered changeset access
2000 if (
2000 if (
2001 self.local()
2001 self.local()
2002 and changeid in self.unfiltered().dirstate.parents()
2002 and changeid in self.unfiltered().dirstate.parents()
2003 ):
2003 ):
2004 msg = _(b"working directory has unknown parent '%s'!")
2004 msg = _(b"working directory has unknown parent '%s'!")
2005 raise error.Abort(msg % short(changeid))
2005 raise error.Abort(msg % short(changeid))
2006 changeid = hex(changeid) # for the error message
2006 changeid = hex(changeid) # for the error message
2007 raise
2007 raise
2008
2008
2009 elif len(changeid) == 2 * self.nodeconstants.nodelen:
2009 elif len(changeid) == 2 * self.nodeconstants.nodelen:
2010 node = bin(changeid)
2010 node = bin(changeid)
2011 rev = self.changelog.rev(node)
2011 rev = self.changelog.rev(node)
2012 else:
2012 else:
2013 raise error.ProgrammingError(
2013 raise error.ProgrammingError(
2014 b"unsupported changeid '%s' of type %s"
2014 b"unsupported changeid '%s' of type %s"
2015 % (changeid, pycompat.bytestr(type(changeid)))
2015 % (changeid, pycompat.bytestr(type(changeid)))
2016 )
2016 )
2017
2017
2018 return context.changectx(self, rev, node)
2018 return context.changectx(self, rev, node)
2019
2019
2020 except (error.FilteredIndexError, error.FilteredLookupError):
2020 except (error.FilteredIndexError, error.FilteredLookupError):
2021 raise error.FilteredRepoLookupError(
2021 raise error.FilteredRepoLookupError(
2022 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
2022 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
2023 )
2023 )
2024 except (IndexError, LookupError):
2024 except (IndexError, LookupError):
2025 raise error.RepoLookupError(
2025 raise error.RepoLookupError(
2026 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
2026 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
2027 )
2027 )
2028 except error.WdirUnsupported:
2028 except error.WdirUnsupported:
2029 return context.workingctx(self)
2029 return context.workingctx(self)
2030
2030
2031 def __contains__(self, changeid):
2031 def __contains__(self, changeid):
2032 """True if the given changeid exists"""
2032 """True if the given changeid exists"""
2033 try:
2033 try:
2034 self[changeid]
2034 self[changeid]
2035 return True
2035 return True
2036 except error.RepoLookupError:
2036 except error.RepoLookupError:
2037 return False
2037 return False
2038
2038
2039 def __nonzero__(self):
2039 def __nonzero__(self):
2040 return True
2040 return True
2041
2041
2042 __bool__ = __nonzero__
2042 __bool__ = __nonzero__
2043
2043
2044 def __len__(self):
2044 def __len__(self):
2045 # no need to pay the cost of repoview.changelog
2045 # no need to pay the cost of repoview.changelog
2046 unfi = self.unfiltered()
2046 unfi = self.unfiltered()
2047 return len(unfi.changelog)
2047 return len(unfi.changelog)
2048
2048
2049 def __iter__(self):
2049 def __iter__(self):
2050 return iter(self.changelog)
2050 return iter(self.changelog)
2051
2051
2052 def revs(self, expr: bytes, *args):
2052 def revs(self, expr: bytes, *args):
2053 """Find revisions matching a revset.
2053 """Find revisions matching a revset.
2054
2054
2055 The revset is specified as a string ``expr`` that may contain
2055 The revset is specified as a string ``expr`` that may contain
2056 %-formatting to escape certain types. See ``revsetlang.formatspec``.
2056 %-formatting to escape certain types. See ``revsetlang.formatspec``.
2057
2057
2058 Revset aliases from the configuration are not expanded. To expand
2058 Revset aliases from the configuration are not expanded. To expand
2059 user aliases, consider calling ``scmutil.revrange()`` or
2059 user aliases, consider calling ``scmutil.revrange()`` or
2060 ``repo.anyrevs([expr], user=True)``.
2060 ``repo.anyrevs([expr], user=True)``.
2061
2061
2062 Returns a smartset.abstractsmartset, which is a list-like interface
2062 Returns a smartset.abstractsmartset, which is a list-like interface
2063 that contains integer revisions.
2063 that contains integer revisions.
2064 """
2064 """
2065 tree = revsetlang.spectree(expr, *args)
2065 tree = revsetlang.spectree(expr, *args)
2066 return revset.makematcher(tree)(self)
2066 return revset.makematcher(tree)(self)
2067
2067
2068 def set(self, expr: bytes, *args):
2068 def set(self, expr: bytes, *args):
2069 """Find revisions matching a revset and emit changectx instances.
2069 """Find revisions matching a revset and emit changectx instances.
2070
2070
2071 This is a convenience wrapper around ``revs()`` that iterates the
2071 This is a convenience wrapper around ``revs()`` that iterates the
2072 result and is a generator of changectx instances.
2072 result and is a generator of changectx instances.
2073
2073
2074 Revset aliases from the configuration are not expanded. To expand
2074 Revset aliases from the configuration are not expanded. To expand
2075 user aliases, consider calling ``scmutil.revrange()``.
2075 user aliases, consider calling ``scmutil.revrange()``.
2076 """
2076 """
2077 for r in self.revs(expr, *args):
2077 for r in self.revs(expr, *args):
2078 yield self[r]
2078 yield self[r]
2079
2079
2080 def anyrevs(self, specs: bytes, user=False, localalias=None):
2080 def anyrevs(self, specs: bytes, user=False, localalias=None):
2081 """Find revisions matching one of the given revsets.
2081 """Find revisions matching one of the given revsets.
2082
2082
2083 Revset aliases from the configuration are not expanded by default. To
2083 Revset aliases from the configuration are not expanded by default. To
2084 expand user aliases, specify ``user=True``. To provide some local
2084 expand user aliases, specify ``user=True``. To provide some local
2085 definitions overriding user aliases, set ``localalias`` to
2085 definitions overriding user aliases, set ``localalias`` to
2086 ``{name: definitionstring}``.
2086 ``{name: definitionstring}``.
2087 """
2087 """
2088 if specs == [b'null']:
2088 if specs == [b'null']:
2089 return revset.baseset([nullrev])
2089 return revset.baseset([nullrev])
2090 if specs == [b'.']:
2090 if specs == [b'.']:
2091 quick_data = self._quick_access_changeid.get(b'.')
2091 quick_data = self._quick_access_changeid.get(b'.')
2092 if quick_data is not None:
2092 if quick_data is not None:
2093 return revset.baseset([quick_data[0]])
2093 return revset.baseset([quick_data[0]])
2094 if user:
2094 if user:
2095 m = revset.matchany(
2095 m = revset.matchany(
2096 self.ui,
2096 self.ui,
2097 specs,
2097 specs,
2098 lookup=revset.lookupfn(self),
2098 lookup=revset.lookupfn(self),
2099 localalias=localalias,
2099 localalias=localalias,
2100 )
2100 )
2101 else:
2101 else:
2102 m = revset.matchany(None, specs, localalias=localalias)
2102 m = revset.matchany(None, specs, localalias=localalias)
2103 return m(self)
2103 return m(self)
2104
2104
2105 def url(self) -> bytes:
2105 def url(self) -> bytes:
2106 return b'file:' + self.root
2106 return b'file:' + self.root
2107
2107
2108 def hook(self, name, throw=False, **args):
2108 def hook(self, name, throw=False, **args):
2109 """Call a hook, passing this repo instance.
2109 """Call a hook, passing this repo instance.
2110
2110
2111 This a convenience method to aid invoking hooks. Extensions likely
2111 This a convenience method to aid invoking hooks. Extensions likely
2112 won't call this unless they have registered a custom hook or are
2112 won't call this unless they have registered a custom hook or are
2113 replacing code that is expected to call a hook.
2113 replacing code that is expected to call a hook.
2114 """
2114 """
2115 return hook.hook(self.ui, self, name, throw, **args)
2115 return hook.hook(self.ui, self, name, throw, **args)
2116
2116
2117 @filteredpropertycache
2117 @filteredpropertycache
2118 def _tagscache(self):
2118 def _tagscache(self):
2119 """Returns a tagscache object that contains various tags related
2119 """Returns a tagscache object that contains various tags related
2120 caches."""
2120 caches."""
2121
2121
2122 # This simplifies its cache management by having one decorated
2122 # This simplifies its cache management by having one decorated
2123 # function (this one) and the rest simply fetch things from it.
2123 # function (this one) and the rest simply fetch things from it.
2124 class tagscache:
2124 class tagscache:
2125 def __init__(self):
2125 def __init__(self):
2126 # These two define the set of tags for this repository. tags
2126 # These two define the set of tags for this repository. tags
2127 # maps tag name to node; tagtypes maps tag name to 'global' or
2127 # maps tag name to node; tagtypes maps tag name to 'global' or
2128 # 'local'. (Global tags are defined by .hgtags across all
2128 # 'local'. (Global tags are defined by .hgtags across all
2129 # heads, and local tags are defined in .hg/localtags.)
2129 # heads, and local tags are defined in .hg/localtags.)
2130 # They constitute the in-memory cache of tags.
2130 # They constitute the in-memory cache of tags.
2131 self.tags = self.tagtypes = None
2131 self.tags = self.tagtypes = None
2132
2132
2133 self.nodetagscache = self.tagslist = None
2133 self.nodetagscache = self.tagslist = None
2134
2134
2135 cache = tagscache()
2135 cache = tagscache()
2136 cache.tags, cache.tagtypes = self._findtags()
2136 cache.tags, cache.tagtypes = self._findtags()
2137
2137
2138 return cache
2138 return cache
2139
2139
2140 def tags(self):
2140 def tags(self):
2141 '''return a mapping of tag to node'''
2141 '''return a mapping of tag to node'''
2142 t = {}
2142 t = {}
2143 if self.changelog.filteredrevs:
2143 if self.changelog.filteredrevs:
2144 tags, tt = self._findtags()
2144 tags, tt = self._findtags()
2145 else:
2145 else:
2146 tags = self._tagscache.tags
2146 tags = self._tagscache.tags
2147 rev = self.changelog.rev
2147 rev = self.changelog.rev
2148 for k, v in tags.items():
2148 for k, v in tags.items():
2149 try:
2149 try:
2150 # ignore tags to unknown nodes
2150 # ignore tags to unknown nodes
2151 rev(v)
2151 rev(v)
2152 t[k] = v
2152 t[k] = v
2153 except (error.LookupError, ValueError):
2153 except (error.LookupError, ValueError):
2154 pass
2154 pass
2155 return t
2155 return t
2156
2156
2157 def _findtags(self):
2157 def _findtags(self):
2158 """Do the hard work of finding tags. Return a pair of dicts
2158 """Do the hard work of finding tags. Return a pair of dicts
2159 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2159 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2160 maps tag name to a string like \'global\' or \'local\'.
2160 maps tag name to a string like \'global\' or \'local\'.
2161 Subclasses or extensions are free to add their own tags, but
2161 Subclasses or extensions are free to add their own tags, but
2162 should be aware that the returned dicts will be retained for the
2162 should be aware that the returned dicts will be retained for the
2163 duration of the localrepo object."""
2163 duration of the localrepo object."""
2164
2164
2165 # XXX what tagtype should subclasses/extensions use? Currently
2165 # XXX what tagtype should subclasses/extensions use? Currently
2166 # mq and bookmarks add tags, but do not set the tagtype at all.
2166 # mq and bookmarks add tags, but do not set the tagtype at all.
2167 # Should each extension invent its own tag type? Should there
2167 # Should each extension invent its own tag type? Should there
2168 # be one tagtype for all such "virtual" tags? Or is the status
2168 # be one tagtype for all such "virtual" tags? Or is the status
2169 # quo fine?
2169 # quo fine?
2170
2170
2171 # map tag name to (node, hist)
2171 # map tag name to (node, hist)
2172 alltags = tagsmod.findglobaltags(self.ui, self)
2172 alltags = tagsmod.findglobaltags(self.ui, self)
2173 # map tag name to tag type
2173 # map tag name to tag type
2174 tagtypes = {tag: b'global' for tag in alltags}
2174 tagtypes = {tag: b'global' for tag in alltags}
2175
2175
2176 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2176 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2177
2177
2178 # Build the return dicts. Have to re-encode tag names because
2178 # Build the return dicts. Have to re-encode tag names because
2179 # the tags module always uses UTF-8 (in order not to lose info
2179 # the tags module always uses UTF-8 (in order not to lose info
2180 # writing to the cache), but the rest of Mercurial wants them in
2180 # writing to the cache), but the rest of Mercurial wants them in
2181 # local encoding.
2181 # local encoding.
2182 tags = {}
2182 tags = {}
2183 for name, (node, hist) in alltags.items():
2183 for name, (node, hist) in alltags.items():
2184 if node != self.nullid:
2184 if node != self.nullid:
2185 tags[encoding.tolocal(name)] = node
2185 tags[encoding.tolocal(name)] = node
2186 tags[b'tip'] = self.changelog.tip()
2186 tags[b'tip'] = self.changelog.tip()
2187 tagtypes = {
2187 tagtypes = {
2188 encoding.tolocal(name): value for (name, value) in tagtypes.items()
2188 encoding.tolocal(name): value for (name, value) in tagtypes.items()
2189 }
2189 }
2190 return (tags, tagtypes)
2190 return (tags, tagtypes)
2191
2191
2192 def tagtype(self, tagname):
2192 def tagtype(self, tagname):
2193 """
2193 """
2194 return the type of the given tag. result can be:
2194 return the type of the given tag. result can be:
2195
2195
2196 'local' : a local tag
2196 'local' : a local tag
2197 'global' : a global tag
2197 'global' : a global tag
2198 None : tag does not exist
2198 None : tag does not exist
2199 """
2199 """
2200
2200
2201 return self._tagscache.tagtypes.get(tagname)
2201 return self._tagscache.tagtypes.get(tagname)
2202
2202
2203 def tagslist(self):
2203 def tagslist(self):
2204 '''return a list of tags ordered by revision'''
2204 '''return a list of tags ordered by revision'''
2205 if not self._tagscache.tagslist:
2205 if not self._tagscache.tagslist:
2206 l = []
2206 l = []
2207 for t, n in self.tags().items():
2207 for t, n in self.tags().items():
2208 l.append((self.changelog.rev(n), t, n))
2208 l.append((self.changelog.rev(n), t, n))
2209 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2209 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2210
2210
2211 return self._tagscache.tagslist
2211 return self._tagscache.tagslist
2212
2212
2213 def nodetags(self, node):
2213 def nodetags(self, node):
2214 '''return the tags associated with a node'''
2214 '''return the tags associated with a node'''
2215 if not self._tagscache.nodetagscache:
2215 if not self._tagscache.nodetagscache:
2216 nodetagscache = {}
2216 nodetagscache = {}
2217 for t, n in self._tagscache.tags.items():
2217 for t, n in self._tagscache.tags.items():
2218 nodetagscache.setdefault(n, []).append(t)
2218 nodetagscache.setdefault(n, []).append(t)
2219 for tags in nodetagscache.values():
2219 for tags in nodetagscache.values():
2220 tags.sort()
2220 tags.sort()
2221 self._tagscache.nodetagscache = nodetagscache
2221 self._tagscache.nodetagscache = nodetagscache
2222 return self._tagscache.nodetagscache.get(node, [])
2222 return self._tagscache.nodetagscache.get(node, [])
2223
2223
2224 def nodebookmarks(self, node):
2224 def nodebookmarks(self, node):
2225 """return the list of bookmarks pointing to the specified node"""
2225 """return the list of bookmarks pointing to the specified node"""
2226 return self._bookmarks.names(node)
2226 return self._bookmarks.names(node)
2227
2227
2228 def branchmap(self):
2228 def branchmap(self):
2229 """returns a dictionary {branch: [branchheads]} with branchheads
2229 """returns a dictionary {branch: [branchheads]} with branchheads
2230 ordered by increasing revision number"""
2230 ordered by increasing revision number"""
2231 return self._branchcaches[self]
2231 return self._branchcaches[self]
2232
2232
2233 @unfilteredmethod
2233 @unfilteredmethod
2234 def revbranchcache(self):
2234 def revbranchcache(self):
2235 if not self._revbranchcache:
2235 if not self._revbranchcache:
2236 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2236 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2237 return self._revbranchcache
2237 return self._revbranchcache
2238
2238
2239 def register_changeset(self, rev, changelogrevision):
2239 def register_changeset(self, rev, changelogrevision):
2240 self.revbranchcache().setdata(rev, changelogrevision)
2240 self.revbranchcache().setdata(rev, changelogrevision)
2241
2241
2242 def branchtip(self, branch, ignoremissing=False):
2242 def branchtip(self, branch, ignoremissing=False):
2243 """return the tip node for a given branch
2243 """return the tip node for a given branch
2244
2244
2245 If ignoremissing is True, then this method will not raise an error.
2245 If ignoremissing is True, then this method will not raise an error.
2246 This is helpful for callers that only expect None for a missing branch
2246 This is helpful for callers that only expect None for a missing branch
2247 (e.g. namespace).
2247 (e.g. namespace).
2248
2248
2249 """
2249 """
2250 try:
2250 try:
2251 return self.branchmap().branchtip(branch)
2251 return self.branchmap().branchtip(branch)
2252 except KeyError:
2252 except KeyError:
2253 if not ignoremissing:
2253 if not ignoremissing:
2254 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2254 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2255 else:
2255 else:
2256 pass
2256 pass
2257
2257
2258 def lookup(self, key):
2258 def lookup(self, key):
2259 node = scmutil.revsymbol(self, key).node()
2259 node = scmutil.revsymbol(self, key).node()
2260 if node is None:
2260 if node is None:
2261 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2261 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2262 return node
2262 return node
2263
2263
2264 def lookupbranch(self, key):
2264 def lookupbranch(self, key):
2265 if self.branchmap().hasbranch(key):
2265 if self.branchmap().hasbranch(key):
2266 return key
2266 return key
2267
2267
2268 return scmutil.revsymbol(self, key).branch()
2268 return scmutil.revsymbol(self, key).branch()
2269
2269
2270 def known(self, nodes):
2270 def known(self, nodes):
2271 cl = self.changelog
2271 cl = self.changelog
2272 get_rev = cl.index.get_rev
2272 get_rev = cl.index.get_rev
2273 filtered = cl.filteredrevs
2273 filtered = cl.filteredrevs
2274 result = []
2274 result = []
2275 for n in nodes:
2275 for n in nodes:
2276 r = get_rev(n)
2276 r = get_rev(n)
2277 resp = not (r is None or r in filtered)
2277 resp = not (r is None or r in filtered)
2278 result.append(resp)
2278 result.append(resp)
2279 return result
2279 return result
2280
2280
2281 def local(self):
2281 def local(self):
2282 return self
2282 return self
2283
2283
2284 def publishing(self):
2284 def publishing(self):
2285 # it's safe (and desirable) to trust the publish flag unconditionally
2285 # it's safe (and desirable) to trust the publish flag unconditionally
2286 # so that we don't finalize changes shared between users via ssh or nfs
2286 # so that we don't finalize changes shared between users via ssh or nfs
2287 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2287 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2288
2288
2289 def cancopy(self):
2289 def cancopy(self):
2290 # so statichttprepo's override of local() works
2290 # so statichttprepo's override of local() works
2291 if not self.local():
2291 if not self.local():
2292 return False
2292 return False
2293 if not self.publishing():
2293 if not self.publishing():
2294 return True
2294 return True
2295 # if publishing we can't copy if there is filtered content
2295 # if publishing we can't copy if there is filtered content
2296 return not self.filtered(b'visible').changelog.filteredrevs
2296 return not self.filtered(b'visible').changelog.filteredrevs
2297
2297
2298 def shared(self):
2298 def shared(self):
2299 '''the type of shared repository (None if not shared)'''
2299 '''the type of shared repository (None if not shared)'''
2300 if self.sharedpath != self.path:
2300 if self.sharedpath != self.path:
2301 return b'store'
2301 return b'store'
2302 return None
2302 return None
2303
2303
2304 def wjoin(self, f: bytes, *insidef: bytes) -> bytes:
2304 def wjoin(self, f: bytes, *insidef: bytes) -> bytes:
2305 return self.vfs.reljoin(self.root, f, *insidef)
2305 return self.vfs.reljoin(self.root, f, *insidef)
2306
2306
2307 def setparents(self, p1, p2=None):
2307 def setparents(self, p1, p2=None):
2308 if p2 is None:
2308 if p2 is None:
2309 p2 = self.nullid
2309 p2 = self.nullid
2310 self[None].setparents(p1, p2)
2310 self[None].setparents(p1, p2)
2311 self._quick_access_changeid_invalidate()
2311 self._quick_access_changeid_invalidate()
2312
2312
2313 def filectx(self, path: bytes, changeid=None, fileid=None, changectx=None):
2313 def filectx(self, path: bytes, changeid=None, fileid=None, changectx=None):
2314 """changeid must be a changeset revision, if specified.
2314 """changeid must be a changeset revision, if specified.
2315 fileid can be a file revision or node."""
2315 fileid can be a file revision or node."""
2316 return context.filectx(
2316 return context.filectx(
2317 self, path, changeid, fileid, changectx=changectx
2317 self, path, changeid, fileid, changectx=changectx
2318 )
2318 )
2319
2319
2320 def getcwd(self) -> bytes:
2320 def getcwd(self) -> bytes:
2321 return self.dirstate.getcwd()
2321 return self.dirstate.getcwd()
2322
2322
2323 def pathto(self, f: bytes, cwd: Optional[bytes] = None) -> bytes:
2323 def pathto(self, f: bytes, cwd: Optional[bytes] = None) -> bytes:
2324 return self.dirstate.pathto(f, cwd)
2324 return self.dirstate.pathto(f, cwd)
2325
2325
2326 def _loadfilter(self, filter):
2326 def _loadfilter(self, filter):
2327 if filter not in self._filterpats:
2327 if filter not in self._filterpats:
2328 l = []
2328 l = []
2329 for pat, cmd in self.ui.configitems(filter):
2329 for pat, cmd in self.ui.configitems(filter):
2330 if cmd == b'!':
2330 if cmd == b'!':
2331 continue
2331 continue
2332 mf = matchmod.match(self.root, b'', [pat])
2332 mf = matchmod.match(self.root, b'', [pat])
2333 fn = None
2333 fn = None
2334 params = cmd
2334 params = cmd
2335 for name, filterfn in self._datafilters.items():
2335 for name, filterfn in self._datafilters.items():
2336 if cmd.startswith(name):
2336 if cmd.startswith(name):
2337 fn = filterfn
2337 fn = filterfn
2338 params = cmd[len(name) :].lstrip()
2338 params = cmd[len(name) :].lstrip()
2339 break
2339 break
2340 if not fn:
2340 if not fn:
2341 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2341 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2342 fn.__name__ = 'commandfilter'
2342 fn.__name__ = 'commandfilter'
2343 # Wrap old filters not supporting keyword arguments
2343 # Wrap old filters not supporting keyword arguments
2344 if not pycompat.getargspec(fn)[2]:
2344 if not pycompat.getargspec(fn)[2]:
2345 oldfn = fn
2345 oldfn = fn
2346 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2346 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2347 fn.__name__ = 'compat-' + oldfn.__name__
2347 fn.__name__ = 'compat-' + oldfn.__name__
2348 l.append((mf, fn, params))
2348 l.append((mf, fn, params))
2349 self._filterpats[filter] = l
2349 self._filterpats[filter] = l
2350 return self._filterpats[filter]
2350 return self._filterpats[filter]
2351
2351
2352 def _filter(self, filterpats, filename, data):
2352 def _filter(self, filterpats, filename, data):
2353 for mf, fn, cmd in filterpats:
2353 for mf, fn, cmd in filterpats:
2354 if mf(filename):
2354 if mf(filename):
2355 self.ui.debug(
2355 self.ui.debug(
2356 b"filtering %s through %s\n"
2356 b"filtering %s through %s\n"
2357 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2357 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2358 )
2358 )
2359 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2359 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2360 break
2360 break
2361
2361
2362 return data
2362 return data
2363
2363
2364 @unfilteredpropertycache
2364 @unfilteredpropertycache
2365 def _encodefilterpats(self):
2365 def _encodefilterpats(self):
2366 return self._loadfilter(b'encode')
2366 return self._loadfilter(b'encode')
2367
2367
2368 @unfilteredpropertycache
2368 @unfilteredpropertycache
2369 def _decodefilterpats(self):
2369 def _decodefilterpats(self):
2370 return self._loadfilter(b'decode')
2370 return self._loadfilter(b'decode')
2371
2371
2372 def adddatafilter(self, name, filter):
2372 def adddatafilter(self, name, filter):
2373 self._datafilters[name] = filter
2373 self._datafilters[name] = filter
2374
2374
2375 def wread(self, filename: bytes) -> bytes:
2375 def wread(self, filename: bytes) -> bytes:
2376 if self.wvfs.islink(filename):
2376 if self.wvfs.islink(filename):
2377 data = self.wvfs.readlink(filename)
2377 data = self.wvfs.readlink(filename)
2378 else:
2378 else:
2379 data = self.wvfs.read(filename)
2379 data = self.wvfs.read(filename)
2380 return self._filter(self._encodefilterpats, filename, data)
2380 return self._filter(self._encodefilterpats, filename, data)
2381
2381
2382 def wwrite(
2382 def wwrite(
2383 self,
2383 self,
2384 filename: bytes,
2384 filename: bytes,
2385 data: bytes,
2385 data: bytes,
2386 flags: bytes,
2386 flags: bytes,
2387 backgroundclose=False,
2387 backgroundclose=False,
2388 **kwargs
2388 **kwargs
2389 ) -> int:
2389 ) -> int:
2390 """write ``data`` into ``filename`` in the working directory
2390 """write ``data`` into ``filename`` in the working directory
2391
2391
2392 This returns length of written (maybe decoded) data.
2392 This returns length of written (maybe decoded) data.
2393 """
2393 """
2394 data = self._filter(self._decodefilterpats, filename, data)
2394 data = self._filter(self._decodefilterpats, filename, data)
2395 if b'l' in flags:
2395 if b'l' in flags:
2396 self.wvfs.symlink(data, filename)
2396 self.wvfs.symlink(data, filename)
2397 else:
2397 else:
2398 self.wvfs.write(
2398 self.wvfs.write(
2399 filename, data, backgroundclose=backgroundclose, **kwargs
2399 filename, data, backgroundclose=backgroundclose, **kwargs
2400 )
2400 )
2401 if b'x' in flags:
2401 if b'x' in flags:
2402 self.wvfs.setflags(filename, False, True)
2402 self.wvfs.setflags(filename, False, True)
2403 else:
2403 else:
2404 self.wvfs.setflags(filename, False, False)
2404 self.wvfs.setflags(filename, False, False)
2405 return len(data)
2405 return len(data)
2406
2406
2407 def wwritedata(self, filename: bytes, data: bytes) -> bytes:
2407 def wwritedata(self, filename: bytes, data: bytes) -> bytes:
2408 return self._filter(self._decodefilterpats, filename, data)
2408 return self._filter(self._decodefilterpats, filename, data)
2409
2409
2410 def currenttransaction(self):
2410 def currenttransaction(self):
2411 """return the current transaction or None if non exists"""
2411 """return the current transaction or None if non exists"""
2412 if self._transref:
2412 if self._transref:
2413 tr = self._transref()
2413 tr = self._transref()
2414 else:
2414 else:
2415 tr = None
2415 tr = None
2416
2416
2417 if tr and tr.running():
2417 if tr and tr.running():
2418 return tr
2418 return tr
2419 return None
2419 return None
2420
2420
2421 def transaction(self, desc, report=None):
2421 def transaction(self, desc, report=None):
2422 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2422 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2423 b'devel', b'check-locks'
2423 b'devel', b'check-locks'
2424 ):
2424 ):
2425 if self._currentlock(self._lockref) is None:
2425 if self._currentlock(self._lockref) is None:
2426 raise error.ProgrammingError(b'transaction requires locking')
2426 raise error.ProgrammingError(b'transaction requires locking')
2427 tr = self.currenttransaction()
2427 tr = self.currenttransaction()
2428 if tr is not None:
2428 if tr is not None:
2429 return tr.nest(name=desc)
2429 return tr.nest(name=desc)
2430
2430
2431 # abort here if the journal already exists
2431 # abort here if the journal already exists
2432 if self.svfs.exists(b"journal"):
2432 if self.svfs.exists(b"journal"):
2433 raise error.RepoError(
2433 raise error.RepoError(
2434 _(b"abandoned transaction found"),
2434 _(b"abandoned transaction found"),
2435 hint=_(b"run 'hg recover' to clean up transaction"),
2435 hint=_(b"run 'hg recover' to clean up transaction"),
2436 )
2436 )
2437
2437
2438 # At that point your dirstate should be clean:
2438 # At that point your dirstate should be clean:
2439 #
2439 #
2440 # - If you don't have the wlock, why would you still have a dirty
2440 # - If you don't have the wlock, why would you still have a dirty
2441 # dirstate ?
2441 # dirstate ?
2442 #
2442 #
2443 # - If you hold the wlock, you should not be opening a transaction in
2443 # - If you hold the wlock, you should not be opening a transaction in
2444 # the middle of a `distate.changing_*` block. The transaction needs to
2444 # the middle of a `distate.changing_*` block. The transaction needs to
2445 # be open before that and wrap the change-context.
2445 # be open before that and wrap the change-context.
2446 #
2446 #
2447 # - If you are not within a `dirstate.changing_*` context, why is our
2447 # - If you are not within a `dirstate.changing_*` context, why is our
2448 # dirstate dirty?
2448 # dirstate dirty?
2449 if self.dirstate._dirty:
2449 if self.dirstate._dirty:
2450 m = "cannot open a transaction with a dirty dirstate"
2450 m = "cannot open a transaction with a dirty dirstate"
2451 raise error.ProgrammingError(m)
2451 raise error.ProgrammingError(m)
2452
2452
2453 idbase = b"%.40f#%f" % (random.random(), time.time())
2453 idbase = b"%.40f#%f" % (random.random(), time.time())
2454 ha = hex(hashutil.sha1(idbase).digest())
2454 ha = hex(hashutil.sha1(idbase).digest())
2455 txnid = b'TXN:' + ha
2455 txnid = b'TXN:' + ha
2456 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2456 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2457
2457
2458 self._writejournal(desc)
2458 self._writejournal(desc)
2459 if report:
2459 if report:
2460 rp = report
2460 rp = report
2461 else:
2461 else:
2462 rp = self.ui.warn
2462 rp = self.ui.warn
2463 vfsmap = self.vfs_map
2463 vfsmap = self.vfs_map
2464 # we must avoid cyclic reference between repo and transaction.
2464 # we must avoid cyclic reference between repo and transaction.
2465 reporef = weakref.ref(self)
2465 reporef = weakref.ref(self)
2466 # Code to track tag movement
2466 # Code to track tag movement
2467 #
2467 #
2468 # Since tags are all handled as file content, it is actually quite hard
2468 # Since tags are all handled as file content, it is actually quite hard
2469 # to track these movement from a code perspective. So we fallback to a
2469 # to track these movement from a code perspective. So we fallback to a
2470 # tracking at the repository level. One could envision to track changes
2470 # tracking at the repository level. One could envision to track changes
2471 # to the '.hgtags' file through changegroup apply but that fails to
2471 # to the '.hgtags' file through changegroup apply but that fails to
2472 # cope with case where transaction expose new heads without changegroup
2472 # cope with case where transaction expose new heads without changegroup
2473 # being involved (eg: phase movement).
2473 # being involved (eg: phase movement).
2474 #
2474 #
2475 # For now, We gate the feature behind a flag since this likely comes
2475 # For now, We gate the feature behind a flag since this likely comes
2476 # with performance impacts. The current code run more often than needed
2476 # with performance impacts. The current code run more often than needed
2477 # and do not use caches as much as it could. The current focus is on
2477 # and do not use caches as much as it could. The current focus is on
2478 # the behavior of the feature so we disable it by default. The flag
2478 # the behavior of the feature so we disable it by default. The flag
2479 # will be removed when we are happy with the performance impact.
2479 # will be removed when we are happy with the performance impact.
2480 #
2480 #
2481 # Once this feature is no longer experimental move the following
2481 # Once this feature is no longer experimental move the following
2482 # documentation to the appropriate help section:
2482 # documentation to the appropriate help section:
2483 #
2483 #
2484 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2484 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2485 # tags (new or changed or deleted tags). In addition the details of
2485 # tags (new or changed or deleted tags). In addition the details of
2486 # these changes are made available in a file at:
2486 # these changes are made available in a file at:
2487 # ``REPOROOT/.hg/changes/tags.changes``.
2487 # ``REPOROOT/.hg/changes/tags.changes``.
2488 # Make sure you check for HG_TAG_MOVED before reading that file as it
2488 # Make sure you check for HG_TAG_MOVED before reading that file as it
2489 # might exist from a previous transaction even if no tag were touched
2489 # might exist from a previous transaction even if no tag were touched
2490 # in this one. Changes are recorded in a line base format::
2490 # in this one. Changes are recorded in a line base format::
2491 #
2491 #
2492 # <action> <hex-node> <tag-name>\n
2492 # <action> <hex-node> <tag-name>\n
2493 #
2493 #
2494 # Actions are defined as follow:
2494 # Actions are defined as follow:
2495 # "-R": tag is removed,
2495 # "-R": tag is removed,
2496 # "+A": tag is added,
2496 # "+A": tag is added,
2497 # "-M": tag is moved (old value),
2497 # "-M": tag is moved (old value),
2498 # "+M": tag is moved (new value),
2498 # "+M": tag is moved (new value),
2499 tracktags = lambda x: None
2499 tracktags = lambda x: None
2500 # experimental config: experimental.hook-track-tags
2500 # experimental config: experimental.hook-track-tags
2501 shouldtracktags = self.ui.configbool(
2501 shouldtracktags = self.ui.configbool(
2502 b'experimental', b'hook-track-tags'
2502 b'experimental', b'hook-track-tags'
2503 )
2503 )
2504 if desc != b'strip' and shouldtracktags:
2504 if desc != b'strip' and shouldtracktags:
2505 oldheads = self.changelog.headrevs()
2505 oldheads = self.changelog.headrevs()
2506
2506
2507 def tracktags(tr2):
2507 def tracktags(tr2):
2508 repo = reporef()
2508 repo = reporef()
2509 assert repo is not None # help pytype
2509 assert repo is not None # help pytype
2510 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2510 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2511 newheads = repo.changelog.headrevs()
2511 newheads = repo.changelog.headrevs()
2512 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2512 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2513 # notes: we compare lists here.
2513 # notes: we compare lists here.
2514 # As we do it only once buiding set would not be cheaper
2514 # As we do it only once buiding set would not be cheaper
2515 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2515 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2516 if changes:
2516 if changes:
2517 tr2.hookargs[b'tag_moved'] = b'1'
2517 tr2.hookargs[b'tag_moved'] = b'1'
2518 with repo.vfs(
2518 with repo.vfs(
2519 b'changes/tags.changes', b'w', atomictemp=True
2519 b'changes/tags.changes', b'w', atomictemp=True
2520 ) as changesfile:
2520 ) as changesfile:
2521 # note: we do not register the file to the transaction
2521 # note: we do not register the file to the transaction
2522 # because we needs it to still exist on the transaction
2522 # because we needs it to still exist on the transaction
2523 # is close (for txnclose hooks)
2523 # is close (for txnclose hooks)
2524 tagsmod.writediff(changesfile, changes)
2524 tagsmod.writediff(changesfile, changes)
2525
2525
2526 def validate(tr2):
2526 def validate(tr2):
2527 """will run pre-closing hooks"""
2527 """will run pre-closing hooks"""
2528 # XXX the transaction API is a bit lacking here so we take a hacky
2528 # XXX the transaction API is a bit lacking here so we take a hacky
2529 # path for now
2529 # path for now
2530 #
2530 #
2531 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2531 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2532 # dict is copied before these run. In addition we needs the data
2532 # dict is copied before these run. In addition we needs the data
2533 # available to in memory hooks too.
2533 # available to in memory hooks too.
2534 #
2534 #
2535 # Moreover, we also need to make sure this runs before txnclose
2535 # Moreover, we also need to make sure this runs before txnclose
2536 # hooks and there is no "pending" mechanism that would execute
2536 # hooks and there is no "pending" mechanism that would execute
2537 # logic only if hooks are about to run.
2537 # logic only if hooks are about to run.
2538 #
2538 #
2539 # Fixing this limitation of the transaction is also needed to track
2539 # Fixing this limitation of the transaction is also needed to track
2540 # other families of changes (bookmarks, phases, obsolescence).
2540 # other families of changes (bookmarks, phases, obsolescence).
2541 #
2541 #
2542 # This will have to be fixed before we remove the experimental
2542 # This will have to be fixed before we remove the experimental
2543 # gating.
2543 # gating.
2544 tracktags(tr2)
2544 tracktags(tr2)
2545 repo = reporef()
2545 repo = reporef()
2546 assert repo is not None # help pytype
2546 assert repo is not None # help pytype
2547
2547
2548 singleheadopt = (b'experimental', b'single-head-per-branch')
2548 singleheadopt = (b'experimental', b'single-head-per-branch')
2549 singlehead = repo.ui.configbool(*singleheadopt)
2549 singlehead = repo.ui.configbool(*singleheadopt)
2550 if singlehead:
2550 if singlehead:
2551 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2551 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2552 accountclosed = singleheadsub.get(
2552 accountclosed = singleheadsub.get(
2553 b"account-closed-heads", False
2553 b"account-closed-heads", False
2554 )
2554 )
2555 if singleheadsub.get(b"public-changes-only", False):
2555 if singleheadsub.get(b"public-changes-only", False):
2556 filtername = b"immutable"
2556 filtername = b"immutable"
2557 else:
2557 else:
2558 filtername = b"visible"
2558 filtername = b"visible"
2559 scmutil.enforcesinglehead(
2559 scmutil.enforcesinglehead(
2560 repo, tr2, desc, accountclosed, filtername
2560 repo, tr2, desc, accountclosed, filtername
2561 )
2561 )
2562 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2562 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2563 for name, (old, new) in sorted(
2563 for name, (old, new) in sorted(
2564 tr.changes[b'bookmarks'].items()
2564 tr.changes[b'bookmarks'].items()
2565 ):
2565 ):
2566 args = tr.hookargs.copy()
2566 args = tr.hookargs.copy()
2567 args.update(bookmarks.preparehookargs(name, old, new))
2567 args.update(bookmarks.preparehookargs(name, old, new))
2568 repo.hook(
2568 repo.hook(
2569 b'pretxnclose-bookmark',
2569 b'pretxnclose-bookmark',
2570 throw=True,
2570 throw=True,
2571 **pycompat.strkwargs(args)
2571 **pycompat.strkwargs(args)
2572 )
2572 )
2573 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2573 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2574 cl = repo.unfiltered().changelog
2574 cl = repo.unfiltered().changelog
2575 for revs, (old, new) in tr.changes[b'phases']:
2575 for revs, (old, new) in tr.changes[b'phases']:
2576 for rev in revs:
2576 for rev in revs:
2577 args = tr.hookargs.copy()
2577 args = tr.hookargs.copy()
2578 node = hex(cl.node(rev))
2578 node = hex(cl.node(rev))
2579 args.update(phases.preparehookargs(node, old, new))
2579 args.update(phases.preparehookargs(node, old, new))
2580 repo.hook(
2580 repo.hook(
2581 b'pretxnclose-phase',
2581 b'pretxnclose-phase',
2582 throw=True,
2582 throw=True,
2583 **pycompat.strkwargs(args)
2583 **pycompat.strkwargs(args)
2584 )
2584 )
2585
2585
2586 repo.hook(
2586 repo.hook(
2587 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2587 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2588 )
2588 )
2589
2589
2590 def releasefn(tr, success):
2590 def releasefn(tr, success):
2591 repo = reporef()
2591 repo = reporef()
2592 if repo is None:
2592 if repo is None:
2593 # If the repo has been GC'd (and this release function is being
2593 # If the repo has been GC'd (and this release function is being
2594 # called from transaction.__del__), there's not much we can do,
2594 # called from transaction.__del__), there's not much we can do,
2595 # so just leave the unfinished transaction there and let the
2595 # so just leave the unfinished transaction there and let the
2596 # user run `hg recover`.
2596 # user run `hg recover`.
2597 return
2597 return
2598 if success:
2598 if success:
2599 # this should be explicitly invoked here, because
2599 # this should be explicitly invoked here, because
2600 # in-memory changes aren't written out at closing
2600 # in-memory changes aren't written out at closing
2601 # transaction, if tr.addfilegenerator (via
2601 # transaction, if tr.addfilegenerator (via
2602 # dirstate.write or so) isn't invoked while
2602 # dirstate.write or so) isn't invoked while
2603 # transaction running
2603 # transaction running
2604 repo.dirstate.write(None)
2604 repo.dirstate.write(None)
2605 else:
2605 else:
2606 # discard all changes (including ones already written
2606 # discard all changes (including ones already written
2607 # out) in this transaction
2607 # out) in this transaction
2608 repo.invalidate(clearfilecache=True)
2608 repo.invalidate(clearfilecache=True)
2609
2609
2610 tr = transaction.transaction(
2610 tr = transaction.transaction(
2611 rp,
2611 rp,
2612 self.svfs,
2612 self.svfs,
2613 vfsmap,
2613 vfsmap,
2614 b"journal",
2614 b"journal",
2615 b"undo",
2615 b"undo",
2616 lambda: None,
2616 lambda: None,
2617 self.store.createmode,
2617 self.store.createmode,
2618 validator=validate,
2618 validator=validate,
2619 releasefn=releasefn,
2619 releasefn=releasefn,
2620 checkambigfiles=_cachedfiles,
2620 checkambigfiles=_cachedfiles,
2621 name=desc,
2621 name=desc,
2622 )
2622 )
2623 for vfs_id, path in self._journalfiles():
2623 for vfs_id, path in self._journalfiles():
2624 tr.add_journal(vfs_id, path)
2624 tr.add_journal(vfs_id, path)
2625 tr.changes[b'origrepolen'] = len(self)
2625 tr.changes[b'origrepolen'] = len(self)
2626 tr.changes[b'obsmarkers'] = set()
2626 tr.changes[b'obsmarkers'] = set()
2627 tr.changes[b'phases'] = []
2627 tr.changes[b'phases'] = []
2628 tr.changes[b'bookmarks'] = {}
2628 tr.changes[b'bookmarks'] = {}
2629
2629
2630 tr.hookargs[b'txnid'] = txnid
2630 tr.hookargs[b'txnid'] = txnid
2631 tr.hookargs[b'txnname'] = desc
2631 tr.hookargs[b'txnname'] = desc
2632 tr.hookargs[b'changes'] = tr.changes
2632 tr.hookargs[b'changes'] = tr.changes
2633 # note: writing the fncache only during finalize mean that the file is
2633 # note: writing the fncache only during finalize mean that the file is
2634 # outdated when running hooks. As fncache is used for streaming clone,
2634 # outdated when running hooks. As fncache is used for streaming clone,
2635 # this is not expected to break anything that happen during the hooks.
2635 # this is not expected to break anything that happen during the hooks.
2636 tr.addfinalize(b'flush-fncache', self.store.write)
2636 tr.addfinalize(b'flush-fncache', self.store.write)
2637
2637
2638 def txnclosehook(tr2):
2638 def txnclosehook(tr2):
2639 """To be run if transaction is successful, will schedule a hook run"""
2639 """To be run if transaction is successful, will schedule a hook run"""
2640 # Don't reference tr2 in hook() so we don't hold a reference.
2640 # Don't reference tr2 in hook() so we don't hold a reference.
2641 # This reduces memory consumption when there are multiple
2641 # This reduces memory consumption when there are multiple
2642 # transactions per lock. This can likely go away if issue5045
2642 # transactions per lock. This can likely go away if issue5045
2643 # fixes the function accumulation.
2643 # fixes the function accumulation.
2644 hookargs = tr2.hookargs
2644 hookargs = tr2.hookargs
2645
2645
2646 def hookfunc(unused_success):
2646 def hookfunc(unused_success):
2647 repo = reporef()
2647 repo = reporef()
2648 assert repo is not None # help pytype
2648 assert repo is not None # help pytype
2649
2649
2650 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2650 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2651 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2651 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2652 for name, (old, new) in bmchanges:
2652 for name, (old, new) in bmchanges:
2653 args = tr.hookargs.copy()
2653 args = tr.hookargs.copy()
2654 args.update(bookmarks.preparehookargs(name, old, new))
2654 args.update(bookmarks.preparehookargs(name, old, new))
2655 repo.hook(
2655 repo.hook(
2656 b'txnclose-bookmark',
2656 b'txnclose-bookmark',
2657 throw=False,
2657 throw=False,
2658 **pycompat.strkwargs(args)
2658 **pycompat.strkwargs(args)
2659 )
2659 )
2660
2660
2661 if hook.hashook(repo.ui, b'txnclose-phase'):
2661 if hook.hashook(repo.ui, b'txnclose-phase'):
2662 cl = repo.unfiltered().changelog
2662 cl = repo.unfiltered().changelog
2663 phasemv = sorted(
2663 phasemv = sorted(
2664 tr.changes[b'phases'], key=lambda r: r[0][0]
2664 tr.changes[b'phases'], key=lambda r: r[0][0]
2665 )
2665 )
2666 for revs, (old, new) in phasemv:
2666 for revs, (old, new) in phasemv:
2667 for rev in revs:
2667 for rev in revs:
2668 args = tr.hookargs.copy()
2668 args = tr.hookargs.copy()
2669 node = hex(cl.node(rev))
2669 node = hex(cl.node(rev))
2670 args.update(phases.preparehookargs(node, old, new))
2670 args.update(phases.preparehookargs(node, old, new))
2671 repo.hook(
2671 repo.hook(
2672 b'txnclose-phase',
2672 b'txnclose-phase',
2673 throw=False,
2673 throw=False,
2674 **pycompat.strkwargs(args)
2674 **pycompat.strkwargs(args)
2675 )
2675 )
2676
2676
2677 repo.hook(
2677 repo.hook(
2678 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2678 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2679 )
2679 )
2680
2680
2681 repo = reporef()
2681 repo = reporef()
2682 assert repo is not None # help pytype
2682 assert repo is not None # help pytype
2683 repo._afterlock(hookfunc)
2683 repo._afterlock(hookfunc)
2684
2684
2685 tr.addfinalize(b'txnclose-hook', txnclosehook)
2685 tr.addfinalize(b'txnclose-hook', txnclosehook)
2686 # Include a leading "-" to make it happen before the transaction summary
2686 # Include a leading "-" to make it happen before the transaction summary
2687 # reports registered via scmutil.registersummarycallback() whose names
2687 # reports registered via scmutil.registersummarycallback() whose names
2688 # are 00-txnreport etc. That way, the caches will be warm when the
2688 # are 00-txnreport etc. That way, the caches will be warm when the
2689 # callbacks run.
2689 # callbacks run.
2690 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2690 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2691
2691
2692 def txnaborthook(tr2):
2692 def txnaborthook(tr2):
2693 """To be run if transaction is aborted"""
2693 """To be run if transaction is aborted"""
2694 repo = reporef()
2694 repo = reporef()
2695 assert repo is not None # help pytype
2695 assert repo is not None # help pytype
2696 repo.hook(
2696 repo.hook(
2697 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2697 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2698 )
2698 )
2699
2699
2700 tr.addabort(b'txnabort-hook', txnaborthook)
2700 tr.addabort(b'txnabort-hook', txnaborthook)
2701 # avoid eager cache invalidation. in-memory data should be identical
2701 # avoid eager cache invalidation. in-memory data should be identical
2702 # to stored data if transaction has no error.
2702 # to stored data if transaction has no error.
2703 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2703 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2704 self._transref = weakref.ref(tr)
2704 self._transref = weakref.ref(tr)
2705 scmutil.registersummarycallback(self, tr, desc)
2705 scmutil.registersummarycallback(self, tr, desc)
2706 # This only exist to deal with the need of rollback to have viable
2706 # This only exist to deal with the need of rollback to have viable
2707 # parents at the end of the operation. So backup viable parents at the
2707 # parents at the end of the operation. So backup viable parents at the
2708 # time of this operation.
2708 # time of this operation.
2709 #
2709 #
2710 # We only do it when the `wlock` is taken, otherwise other might be
2710 # We only do it when the `wlock` is taken, otherwise other might be
2711 # altering the dirstate under us.
2711 # altering the dirstate under us.
2712 #
2712 #
2713 # This is really not a great way to do this (first, because we cannot
2713 # This is really not a great way to do this (first, because we cannot
2714 # always do it). There are more viable alternative that exists
2714 # always do it). There are more viable alternative that exists
2715 #
2715 #
2716 # - backing only the working copy parent in a dedicated files and doing
2716 # - backing only the working copy parent in a dedicated files and doing
2717 # a clean "keep-update" to them on `hg rollback`.
2717 # a clean "keep-update" to them on `hg rollback`.
2718 #
2718 #
2719 # - slightly changing the behavior an applying a logic similar to "hg
2719 # - slightly changing the behavior an applying a logic similar to "hg
2720 # strip" to pick a working copy destination on `hg rollback`
2720 # strip" to pick a working copy destination on `hg rollback`
2721 if self.currentwlock() is not None:
2721 if self.currentwlock() is not None:
2722 ds = self.dirstate
2722 ds = self.dirstate
2723 if not self.vfs.exists(b'branch'):
2723 if not self.vfs.exists(b'branch'):
2724 # force a file to be written if None exist
2724 # force a file to be written if None exist
2725 ds.setbranch(b'default', None)
2725 ds.setbranch(b'default', None)
2726
2726
2727 def backup_dirstate(tr):
2727 def backup_dirstate(tr):
2728 for f in ds.all_file_names():
2728 for f in ds.all_file_names():
2729 # hardlink backup is okay because `dirstate` is always
2729 # hardlink backup is okay because `dirstate` is always
2730 # atomically written and possible data file are append only
2730 # atomically written and possible data file are append only
2731 # and resistant to trailing data.
2731 # and resistant to trailing data.
2732 tr.addbackup(f, hardlink=True, location=b'plain')
2732 tr.addbackup(f, hardlink=True, location=b'plain')
2733
2733
2734 tr.addvalidator(b'dirstate-backup', backup_dirstate)
2734 tr.addvalidator(b'dirstate-backup', backup_dirstate)
2735 return tr
2735 return tr
2736
2736
2737 def _journalfiles(self):
2737 def _journalfiles(self):
2738 return (
2738 return (
2739 (self.svfs, b'journal'),
2739 (self.svfs, b'journal'),
2740 (self.vfs, b'journal.desc'),
2740 (self.vfs, b'journal.desc'),
2741 )
2741 )
2742
2742
2743 def undofiles(self):
2743 def undofiles(self):
2744 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2744 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2745
2745
2746 @unfilteredmethod
2746 @unfilteredmethod
2747 def _writejournal(self, desc):
2747 def _writejournal(self, desc):
2748 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2748 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2749
2749
2750 def recover(self):
2750 def recover(self):
2751 with self.lock():
2751 with self.lock():
2752 if self.svfs.exists(b"journal"):
2752 if self.svfs.exists(b"journal"):
2753 self.ui.status(_(b"rolling back interrupted transaction\n"))
2753 self.ui.status(_(b"rolling back interrupted transaction\n"))
2754 vfsmap = self.vfs_map
2754 vfsmap = self.vfs_map
2755 transaction.rollback(
2755 transaction.rollback(
2756 self.svfs,
2756 self.svfs,
2757 vfsmap,
2757 vfsmap,
2758 b"journal",
2758 b"journal",
2759 self.ui.warn,
2759 self.ui.warn,
2760 checkambigfiles=_cachedfiles,
2760 checkambigfiles=_cachedfiles,
2761 )
2761 )
2762 self.invalidate()
2762 self.invalidate()
2763 return True
2763 return True
2764 else:
2764 else:
2765 self.ui.warn(_(b"no interrupted transaction available\n"))
2765 self.ui.warn(_(b"no interrupted transaction available\n"))
2766 return False
2766 return False
2767
2767
2768 def rollback(self, dryrun=False, force=False):
2768 def rollback(self, dryrun=False, force=False):
2769 wlock = lock = None
2769 wlock = lock = None
2770 try:
2770 try:
2771 wlock = self.wlock()
2771 wlock = self.wlock()
2772 lock = self.lock()
2772 lock = self.lock()
2773 if self.svfs.exists(b"undo"):
2773 if self.svfs.exists(b"undo"):
2774 return self._rollback(dryrun, force)
2774 return self._rollback(dryrun, force)
2775 else:
2775 else:
2776 self.ui.warn(_(b"no rollback information available\n"))
2776 self.ui.warn(_(b"no rollback information available\n"))
2777 return 1
2777 return 1
2778 finally:
2778 finally:
2779 release(lock, wlock)
2779 release(lock, wlock)
2780
2780
2781 @unfilteredmethod # Until we get smarter cache management
2781 @unfilteredmethod # Until we get smarter cache management
2782 def _rollback(self, dryrun, force):
2782 def _rollback(self, dryrun, force):
2783 ui = self.ui
2783 ui = self.ui
2784
2784
2785 parents = self.dirstate.parents()
2785 parents = self.dirstate.parents()
2786 try:
2786 try:
2787 args = self.vfs.read(b'undo.desc').splitlines()
2787 args = self.vfs.read(b'undo.desc').splitlines()
2788 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2788 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2789 if len(args) >= 3:
2789 if len(args) >= 3:
2790 detail = args[2]
2790 detail = args[2]
2791 oldtip = oldlen - 1
2791 oldtip = oldlen - 1
2792
2792
2793 if detail and ui.verbose:
2793 if detail and ui.verbose:
2794 msg = _(
2794 msg = _(
2795 b'repository tip rolled back to revision %d'
2795 b'repository tip rolled back to revision %d'
2796 b' (undo %s: %s)\n'
2796 b' (undo %s: %s)\n'
2797 ) % (oldtip, desc, detail)
2797 ) % (oldtip, desc, detail)
2798 else:
2798 else:
2799 msg = _(
2799 msg = _(
2800 b'repository tip rolled back to revision %d (undo %s)\n'
2800 b'repository tip rolled back to revision %d (undo %s)\n'
2801 ) % (oldtip, desc)
2801 ) % (oldtip, desc)
2802 parentgone = any(self[p].rev() > oldtip for p in parents)
2802 parentgone = any(self[p].rev() > oldtip for p in parents)
2803 except IOError:
2803 except IOError:
2804 msg = _(b'rolling back unknown transaction\n')
2804 msg = _(b'rolling back unknown transaction\n')
2805 desc = None
2805 desc = None
2806 parentgone = True
2806 parentgone = True
2807
2807
2808 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2808 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2809 raise error.Abort(
2809 raise error.Abort(
2810 _(
2810 _(
2811 b'rollback of last commit while not checked out '
2811 b'rollback of last commit while not checked out '
2812 b'may lose data'
2812 b'may lose data'
2813 ),
2813 ),
2814 hint=_(b'use -f to force'),
2814 hint=_(b'use -f to force'),
2815 )
2815 )
2816
2816
2817 ui.status(msg)
2817 ui.status(msg)
2818 if dryrun:
2818 if dryrun:
2819 return 0
2819 return 0
2820
2820
2821 self.destroying()
2821 self.destroying()
2822 vfsmap = self.vfs_map
2822 vfsmap = self.vfs_map
2823 skip_journal_pattern = None
2823 skip_journal_pattern = None
2824 if not parentgone:
2824 if not parentgone:
2825 skip_journal_pattern = RE_SKIP_DIRSTATE_ROLLBACK
2825 skip_journal_pattern = RE_SKIP_DIRSTATE_ROLLBACK
2826 transaction.rollback(
2826 transaction.rollback(
2827 self.svfs,
2827 self.svfs,
2828 vfsmap,
2828 vfsmap,
2829 b'undo',
2829 b'undo',
2830 ui.warn,
2830 ui.warn,
2831 checkambigfiles=_cachedfiles,
2831 checkambigfiles=_cachedfiles,
2832 skip_journal_pattern=skip_journal_pattern,
2832 skip_journal_pattern=skip_journal_pattern,
2833 )
2833 )
2834 self.invalidate()
2834 self.invalidate()
2835 self.dirstate.invalidate()
2835 self.dirstate.invalidate()
2836
2836
2837 if parentgone:
2837 if parentgone:
2838 # replace this with some explicit parent update in the future.
2838 # replace this with some explicit parent update in the future.
2839 has_node = self.changelog.index.has_node
2839 has_node = self.changelog.index.has_node
2840 if not all(has_node(p) for p in self.dirstate._pl):
2840 if not all(has_node(p) for p in self.dirstate._pl):
2841 # There was no dirstate to backup initially, we need to drop
2841 # There was no dirstate to backup initially, we need to drop
2842 # the existing one.
2842 # the existing one.
2843 with self.dirstate.changing_parents(self):
2843 with self.dirstate.changing_parents(self):
2844 self.dirstate.setparents(self.nullid)
2844 self.dirstate.setparents(self.nullid)
2845 self.dirstate.clear()
2845 self.dirstate.clear()
2846
2846
2847 parents = tuple([p.rev() for p in self[None].parents()])
2847 parents = tuple([p.rev() for p in self[None].parents()])
2848 if len(parents) > 1:
2848 if len(parents) > 1:
2849 ui.status(
2849 ui.status(
2850 _(
2850 _(
2851 b'working directory now based on '
2851 b'working directory now based on '
2852 b'revisions %d and %d\n'
2852 b'revisions %d and %d\n'
2853 )
2853 )
2854 % parents
2854 % parents
2855 )
2855 )
2856 else:
2856 else:
2857 ui.status(
2857 ui.status(
2858 _(b'working directory now based on revision %d\n') % parents
2858 _(b'working directory now based on revision %d\n') % parents
2859 )
2859 )
2860 mergestatemod.mergestate.clean(self)
2860 mergestatemod.mergestate.clean(self)
2861
2861
2862 # TODO: if we know which new heads may result from this rollback, pass
2862 # TODO: if we know which new heads may result from this rollback, pass
2863 # them to destroy(), which will prevent the branchhead cache from being
2863 # them to destroy(), which will prevent the branchhead cache from being
2864 # invalidated.
2864 # invalidated.
2865 self.destroyed()
2865 self.destroyed()
2866 return 0
2866 return 0
2867
2867
2868 def _buildcacheupdater(self, newtransaction):
2868 def _buildcacheupdater(self, newtransaction):
2869 """called during transaction to build the callback updating cache
2869 """called during transaction to build the callback updating cache
2870
2870
2871 Lives on the repository to help extension who might want to augment
2871 Lives on the repository to help extension who might want to augment
2872 this logic. For this purpose, the created transaction is passed to the
2872 this logic. For this purpose, the created transaction is passed to the
2873 method.
2873 method.
2874 """
2874 """
2875 # we must avoid cyclic reference between repo and transaction.
2875 # we must avoid cyclic reference between repo and transaction.
2876 reporef = weakref.ref(self)
2876 reporef = weakref.ref(self)
2877
2877
2878 def updater(tr):
2878 def updater(tr):
2879 repo = reporef()
2879 repo = reporef()
2880 assert repo is not None # help pytype
2880 assert repo is not None # help pytype
2881 repo.updatecaches(tr)
2881 repo.updatecaches(tr)
2882
2882
2883 return updater
2883 return updater
2884
2884
2885 @unfilteredmethod
2885 @unfilteredmethod
2886 def updatecaches(self, tr=None, full=False, caches=None):
2886 def updatecaches(self, tr=None, full=False, caches=None):
2887 """warm appropriate caches
2887 """warm appropriate caches
2888
2888
2889 If this function is called after a transaction closed. The transaction
2889 If this function is called after a transaction closed. The transaction
2890 will be available in the 'tr' argument. This can be used to selectively
2890 will be available in the 'tr' argument. This can be used to selectively
2891 update caches relevant to the changes in that transaction.
2891 update caches relevant to the changes in that transaction.
2892
2892
2893 If 'full' is set, make sure all caches the function knows about have
2893 If 'full' is set, make sure all caches the function knows about have
2894 up-to-date data. Even the ones usually loaded more lazily.
2894 up-to-date data. Even the ones usually loaded more lazily.
2895
2895
2896 The `full` argument can take a special "post-clone" value. In this case
2896 The `full` argument can take a special "post-clone" value. In this case
2897 the cache warming is made after a clone and of the slower cache might
2897 the cache warming is made after a clone and of the slower cache might
2898 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2898 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2899 as we plan for a cleaner way to deal with this for 5.9.
2899 as we plan for a cleaner way to deal with this for 5.9.
2900 """
2900 """
2901 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2901 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2902 # During strip, many caches are invalid but
2902 # During strip, many caches are invalid but
2903 # later call to `destroyed` will refresh them.
2903 # later call to `destroyed` will refresh them.
2904 return
2904 return
2905
2905
2906 unfi = self.unfiltered()
2906 unfi = self.unfiltered()
2907
2907
2908 if full:
2908 if full:
2909 msg = (
2909 msg = (
2910 "`full` argument for `repo.updatecaches` is deprecated\n"
2910 "`full` argument for `repo.updatecaches` is deprecated\n"
2911 "(use `caches=repository.CACHE_ALL` instead)"
2911 "(use `caches=repository.CACHE_ALL` instead)"
2912 )
2912 )
2913 self.ui.deprecwarn(msg, b"5.9")
2913 self.ui.deprecwarn(msg, b"5.9")
2914 caches = repository.CACHES_ALL
2914 caches = repository.CACHES_ALL
2915 if full == b"post-clone":
2915 if full == b"post-clone":
2916 caches = repository.CACHES_POST_CLONE
2916 caches = repository.CACHES_POST_CLONE
2917 caches = repository.CACHES_ALL
2917 caches = repository.CACHES_ALL
2918 elif caches is None:
2918 elif caches is None:
2919 caches = repository.CACHES_DEFAULT
2919 caches = repository.CACHES_DEFAULT
2920
2920
2921 if repository.CACHE_BRANCHMAP_SERVED in caches:
2921 if repository.CACHE_BRANCHMAP_SERVED in caches:
2922 if tr is None or tr.changes[b'origrepolen'] < len(self):
2922 if tr is None or tr.changes[b'origrepolen'] < len(self):
2923 # accessing the 'served' branchmap should refresh all the others,
2923 # accessing the 'served' branchmap should refresh all the others,
2924 self.ui.debug(b'updating the branch cache\n')
2924 self.ui.debug(b'updating the branch cache\n')
2925 self.filtered(b'served').branchmap()
2925 self.filtered(b'served').branchmap()
2926 self.filtered(b'served.hidden').branchmap()
2926 self.filtered(b'served.hidden').branchmap()
2927 # flush all possibly delayed write.
2927 # flush all possibly delayed write.
2928 self._branchcaches.write_delayed(self)
2928 self._branchcaches.write_delayed(self)
2929
2929
2930 if repository.CACHE_CHANGELOG_CACHE in caches:
2930 if repository.CACHE_CHANGELOG_CACHE in caches:
2931 self.changelog.update_caches(transaction=tr)
2931 self.changelog.update_caches(transaction=tr)
2932
2932
2933 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2933 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2934 self.manifestlog.update_caches(transaction=tr)
2934 self.manifestlog.update_caches(transaction=tr)
2935
2935
2936 if repository.CACHE_REV_BRANCH in caches:
2936 if repository.CACHE_REV_BRANCH in caches:
2937 rbc = unfi.revbranchcache()
2937 rbc = unfi.revbranchcache()
2938 for r in unfi.changelog:
2938 for r in unfi.changelog:
2939 rbc.branchinfo(r)
2939 rbc.branchinfo(r)
2940 rbc.write()
2940 rbc.write()
2941
2941
2942 if repository.CACHE_FULL_MANIFEST in caches:
2942 if repository.CACHE_FULL_MANIFEST in caches:
2943 # ensure the working copy parents are in the manifestfulltextcache
2943 # ensure the working copy parents are in the manifestfulltextcache
2944 for ctx in self[b'.'].parents():
2944 for ctx in self[b'.'].parents():
2945 ctx.manifest() # accessing the manifest is enough
2945 ctx.manifest() # accessing the manifest is enough
2946
2946
2947 if repository.CACHE_FILE_NODE_TAGS in caches:
2947 if repository.CACHE_FILE_NODE_TAGS in caches:
2948 # accessing fnode cache warms the cache
2948 # accessing fnode cache warms the cache
2949 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2949 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2950
2950
2951 if repository.CACHE_TAGS_DEFAULT in caches:
2951 if repository.CACHE_TAGS_DEFAULT in caches:
2952 # accessing tags warm the cache
2952 # accessing tags warm the cache
2953 self.tags()
2953 self.tags()
2954 if repository.CACHE_TAGS_SERVED in caches:
2954 if repository.CACHE_TAGS_SERVED in caches:
2955 self.filtered(b'served').tags()
2955 self.filtered(b'served').tags()
2956
2956
2957 if repository.CACHE_BRANCHMAP_ALL in caches:
2957 if repository.CACHE_BRANCHMAP_ALL in caches:
2958 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2958 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2959 # so we're forcing a write to cause these caches to be warmed up
2959 # so we're forcing a write to cause these caches to be warmed up
2960 # even if they haven't explicitly been requested yet (if they've
2960 # even if they haven't explicitly been requested yet (if they've
2961 # never been used by hg, they won't ever have been written, even if
2961 # never been used by hg, they won't ever have been written, even if
2962 # they're a subset of another kind of cache that *has* been used).
2962 # they're a subset of another kind of cache that *has* been used).
2963 for filt in repoview.filtertable.keys():
2963 for filt in repoview.filtertable.keys():
2964 filtered = self.filtered(filt)
2964 filtered = self.filtered(filt)
2965 filtered.branchmap().write(filtered)
2965 filtered.branchmap().write(filtered)
2966
2966
2967 def invalidatecaches(self):
2967 def invalidatecaches(self):
2968 if '_tagscache' in vars(self):
2968 if '_tagscache' in vars(self):
2969 # can't use delattr on proxy
2969 # can't use delattr on proxy
2970 del self.__dict__['_tagscache']
2970 del self.__dict__['_tagscache']
2971
2971
2972 self._branchcaches.clear()
2972 self._branchcaches.clear()
2973 self.invalidatevolatilesets()
2973 self.invalidatevolatilesets()
2974 self._sparsesignaturecache.clear()
2974 self._sparsesignaturecache.clear()
2975
2975
2976 def invalidatevolatilesets(self):
2976 def invalidatevolatilesets(self):
2977 self.filteredrevcache.clear()
2977 self.filteredrevcache.clear()
2978 obsolete.clearobscaches(self)
2978 obsolete.clearobscaches(self)
2979 self._quick_access_changeid_invalidate()
2979 self._quick_access_changeid_invalidate()
2980
2980
2981 def invalidatedirstate(self):
2981 def invalidatedirstate(self):
2982 """Invalidates the dirstate, causing the next call to dirstate
2982 """Invalidates the dirstate, causing the next call to dirstate
2983 to check if it was modified since the last time it was read,
2983 to check if it was modified since the last time it was read,
2984 rereading it if it has.
2984 rereading it if it has.
2985
2985
2986 This is different to dirstate.invalidate() that it doesn't always
2986 This is different to dirstate.invalidate() that it doesn't always
2987 rereads the dirstate. Use dirstate.invalidate() if you want to
2987 rereads the dirstate. Use dirstate.invalidate() if you want to
2988 explicitly read the dirstate again (i.e. restoring it to a previous
2988 explicitly read the dirstate again (i.e. restoring it to a previous
2989 known good state)."""
2989 known good state)."""
2990 unfi = self.unfiltered()
2990 unfi = self.unfiltered()
2991 if 'dirstate' in unfi.__dict__:
2991 if 'dirstate' in unfi.__dict__:
2992 assert not self.dirstate.is_changing_any
2992 assert not self.dirstate.is_changing_any
2993 del unfi.__dict__['dirstate']
2993 del unfi.__dict__['dirstate']
2994
2994
2995 def invalidate(self, clearfilecache=False):
2995 def invalidate(self, clearfilecache=False):
2996 """Invalidates both store and non-store parts other than dirstate
2996 """Invalidates both store and non-store parts other than dirstate
2997
2997
2998 If a transaction is running, invalidation of store is omitted,
2998 If a transaction is running, invalidation of store is omitted,
2999 because discarding in-memory changes might cause inconsistency
2999 because discarding in-memory changes might cause inconsistency
3000 (e.g. incomplete fncache causes unintentional failure, but
3000 (e.g. incomplete fncache causes unintentional failure, but
3001 redundant one doesn't).
3001 redundant one doesn't).
3002 """
3002 """
3003 unfiltered = self.unfiltered() # all file caches are stored unfiltered
3003 unfiltered = self.unfiltered() # all file caches are stored unfiltered
3004 for k in list(self._filecache.keys()):
3004 for k in list(self._filecache.keys()):
3005 if (
3005 if (
3006 k == b'changelog'
3006 k == b'changelog'
3007 and self.currenttransaction()
3007 and self.currenttransaction()
3008 and self.changelog._delayed
3008 and self.changelog._delayed
3009 ):
3009 ):
3010 # The changelog object may store unwritten revisions. We don't
3010 # The changelog object may store unwritten revisions. We don't
3011 # want to lose them.
3011 # want to lose them.
3012 # TODO: Solve the problem instead of working around it.
3012 # TODO: Solve the problem instead of working around it.
3013 continue
3013 continue
3014
3014
3015 if clearfilecache:
3015 if clearfilecache:
3016 del self._filecache[k]
3016 del self._filecache[k]
3017 try:
3017 try:
3018 delattr(unfiltered, k)
3018 delattr(unfiltered, k)
3019 except AttributeError:
3019 except AttributeError:
3020 pass
3020 pass
3021 self.invalidatecaches()
3021 self.invalidatecaches()
3022 if not self.currenttransaction():
3022 if not self.currenttransaction():
3023 # TODO: Changing contents of store outside transaction
3023 # TODO: Changing contents of store outside transaction
3024 # causes inconsistency. We should make in-memory store
3024 # causes inconsistency. We should make in-memory store
3025 # changes detectable, and abort if changed.
3025 # changes detectable, and abort if changed.
3026 self.store.invalidatecaches()
3026 self.store.invalidatecaches()
3027
3027
3028 def invalidateall(self):
3028 def invalidateall(self):
3029 """Fully invalidates both store and non-store parts, causing the
3029 """Fully invalidates both store and non-store parts, causing the
3030 subsequent operation to reread any outside changes."""
3030 subsequent operation to reread any outside changes."""
3031 # extension should hook this to invalidate its caches
3031 # extension should hook this to invalidate its caches
3032 self.invalidate()
3032 self.invalidate()
3033 self.invalidatedirstate()
3033 self.invalidatedirstate()
3034
3034
3035 @unfilteredmethod
3035 @unfilteredmethod
3036 def _refreshfilecachestats(self, tr):
3036 def _refreshfilecachestats(self, tr):
3037 """Reload stats of cached files so that they are flagged as valid"""
3037 """Reload stats of cached files so that they are flagged as valid"""
3038 for k, ce in self._filecache.items():
3038 for k, ce in self._filecache.items():
3039 k = pycompat.sysstr(k)
3039 k = pycompat.sysstr(k)
3040 if k == 'dirstate' or k not in self.__dict__:
3040 if k == 'dirstate' or k not in self.__dict__:
3041 continue
3041 continue
3042 ce.refresh()
3042 ce.refresh()
3043
3043
3044 def _lock(
3044 def _lock(
3045 self,
3045 self,
3046 vfs,
3046 vfs,
3047 lockname,
3047 lockname,
3048 wait,
3048 wait,
3049 releasefn,
3049 releasefn,
3050 acquirefn,
3050 acquirefn,
3051 desc,
3051 desc,
3052 ):
3052 ):
3053 timeout = 0
3053 timeout = 0
3054 warntimeout = 0
3054 warntimeout = 0
3055 if wait:
3055 if wait:
3056 timeout = self.ui.configint(b"ui", b"timeout")
3056 timeout = self.ui.configint(b"ui", b"timeout")
3057 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
3057 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
3058 # internal config: ui.signal-safe-lock
3058 # internal config: ui.signal-safe-lock
3059 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
3059 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
3060
3060
3061 l = lockmod.trylock(
3061 l = lockmod.trylock(
3062 self.ui,
3062 self.ui,
3063 vfs,
3063 vfs,
3064 lockname,
3064 lockname,
3065 timeout,
3065 timeout,
3066 warntimeout,
3066 warntimeout,
3067 releasefn=releasefn,
3067 releasefn=releasefn,
3068 acquirefn=acquirefn,
3068 acquirefn=acquirefn,
3069 desc=desc,
3069 desc=desc,
3070 signalsafe=signalsafe,
3070 signalsafe=signalsafe,
3071 )
3071 )
3072 return l
3072 return l
3073
3073
3074 def _afterlock(self, callback):
3074 def _afterlock(self, callback):
3075 """add a callback to be run when the repository is fully unlocked
3075 """add a callback to be run when the repository is fully unlocked
3076
3076
3077 The callback will be executed when the outermost lock is released
3077 The callback will be executed when the outermost lock is released
3078 (with wlock being higher level than 'lock')."""
3078 (with wlock being higher level than 'lock')."""
3079 for ref in (self._wlockref, self._lockref):
3079 for ref in (self._wlockref, self._lockref):
3080 l = ref and ref()
3080 l = ref and ref()
3081 if l and l.held:
3081 if l and l.held:
3082 l.postrelease.append(callback)
3082 l.postrelease.append(callback)
3083 break
3083 break
3084 else: # no lock have been found.
3084 else: # no lock have been found.
3085 callback(True)
3085 callback(True)
3086
3086
3087 def lock(self, wait=True):
3087 def lock(self, wait=True):
3088 """Lock the repository store (.hg/store) and return a weak reference
3088 """Lock the repository store (.hg/store) and return a weak reference
3089 to the lock. Use this before modifying the store (e.g. committing or
3089 to the lock. Use this before modifying the store (e.g. committing or
3090 stripping). If you are opening a transaction, get a lock as well.)
3090 stripping). If you are opening a transaction, get a lock as well.)
3091
3091
3092 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3092 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3093 'wlock' first to avoid a dead-lock hazard."""
3093 'wlock' first to avoid a dead-lock hazard."""
3094 l = self._currentlock(self._lockref)
3094 l = self._currentlock(self._lockref)
3095 if l is not None:
3095 if l is not None:
3096 l.lock()
3096 l.lock()
3097 return l
3097 return l
3098
3098
3099 l = self._lock(
3099 l = self._lock(
3100 vfs=self.svfs,
3100 vfs=self.svfs,
3101 lockname=b"lock",
3101 lockname=b"lock",
3102 wait=wait,
3102 wait=wait,
3103 releasefn=None,
3103 releasefn=None,
3104 acquirefn=self.invalidate,
3104 acquirefn=self.invalidate,
3105 desc=_(b'repository %s') % self.origroot,
3105 desc=_(b'repository %s') % self.origroot,
3106 )
3106 )
3107 self._lockref = weakref.ref(l)
3107 self._lockref = weakref.ref(l)
3108 return l
3108 return l
3109
3109
3110 def wlock(self, wait=True):
3110 def wlock(self, wait=True):
3111 """Lock the non-store parts of the repository (everything under
3111 """Lock the non-store parts of the repository (everything under
3112 .hg except .hg/store) and return a weak reference to the lock.
3112 .hg except .hg/store) and return a weak reference to the lock.
3113
3113
3114 Use this before modifying files in .hg.
3114 Use this before modifying files in .hg.
3115
3115
3116 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3116 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3117 'wlock' first to avoid a dead-lock hazard."""
3117 'wlock' first to avoid a dead-lock hazard."""
3118 l = self._wlockref() if self._wlockref else None
3118 l = self._wlockref() if self._wlockref else None
3119 if l is not None and l.held:
3119 if l is not None and l.held:
3120 l.lock()
3120 l.lock()
3121 return l
3121 return l
3122
3122
3123 # We do not need to check for non-waiting lock acquisition. Such
3123 # We do not need to check for non-waiting lock acquisition. Such
3124 # acquisition would not cause dead-lock as they would just fail.
3124 # acquisition would not cause dead-lock as they would just fail.
3125 if wait and (
3125 if wait and (
3126 self.ui.configbool(b'devel', b'all-warnings')
3126 self.ui.configbool(b'devel', b'all-warnings')
3127 or self.ui.configbool(b'devel', b'check-locks')
3127 or self.ui.configbool(b'devel', b'check-locks')
3128 ):
3128 ):
3129 if self._currentlock(self._lockref) is not None:
3129 if self._currentlock(self._lockref) is not None:
3130 self.ui.develwarn(b'"wlock" acquired after "lock"')
3130 self.ui.develwarn(b'"wlock" acquired after "lock"')
3131
3131
3132 def unlock():
3132 def unlock():
3133 if self.dirstate.is_changing_any:
3133 if self.dirstate.is_changing_any:
3134 msg = b"wlock release in the middle of a changing parents"
3134 msg = b"wlock release in the middle of a changing parents"
3135 self.ui.develwarn(msg)
3135 self.ui.develwarn(msg)
3136 self.dirstate.invalidate()
3136 self.dirstate.invalidate()
3137 else:
3137 else:
3138 if self.dirstate._dirty:
3138 if self.dirstate._dirty:
3139 msg = b"dirty dirstate on wlock release"
3139 msg = b"dirty dirstate on wlock release"
3140 self.ui.develwarn(msg)
3140 self.ui.develwarn(msg)
3141 self.dirstate.write(None)
3141 self.dirstate.write(None)
3142
3142
3143 unfi = self.unfiltered()
3143 unfi = self.unfiltered()
3144 if 'dirstate' in unfi.__dict__:
3144 if 'dirstate' in unfi.__dict__:
3145 del unfi.__dict__['dirstate']
3145 del unfi.__dict__['dirstate']
3146
3146
3147 l = self._lock(
3147 l = self._lock(
3148 self.vfs,
3148 self.vfs,
3149 b"wlock",
3149 b"wlock",
3150 wait,
3150 wait,
3151 unlock,
3151 unlock,
3152 self.invalidatedirstate,
3152 self.invalidatedirstate,
3153 _(b'working directory of %s') % self.origroot,
3153 _(b'working directory of %s') % self.origroot,
3154 )
3154 )
3155 self._wlockref = weakref.ref(l)
3155 self._wlockref = weakref.ref(l)
3156 return l
3156 return l
3157
3157
3158 def _currentlock(self, lockref):
3158 def _currentlock(self, lockref):
3159 """Returns the lock if it's held, or None if it's not."""
3159 """Returns the lock if it's held, or None if it's not."""
3160 if lockref is None:
3160 if lockref is None:
3161 return None
3161 return None
3162 l = lockref()
3162 l = lockref()
3163 if l is None or not l.held:
3163 if l is None or not l.held:
3164 return None
3164 return None
3165 return l
3165 return l
3166
3166
3167 def currentwlock(self):
3167 def currentwlock(self):
3168 """Returns the wlock if it's held, or None if it's not."""
3168 """Returns the wlock if it's held, or None if it's not."""
3169 return self._currentlock(self._wlockref)
3169 return self._currentlock(self._wlockref)
3170
3170
3171 def currentlock(self):
3171 def currentlock(self):
3172 """Returns the lock if it's held, or None if it's not."""
3172 """Returns the lock if it's held, or None if it's not."""
3173 return self._currentlock(self._lockref)
3173 return self._currentlock(self._lockref)
3174
3174
3175 def checkcommitpatterns(self, wctx, match, status, fail):
3175 def checkcommitpatterns(self, wctx, match, status, fail):
3176 """check for commit arguments that aren't committable"""
3176 """check for commit arguments that aren't committable"""
3177 if match.isexact() or match.prefix():
3177 if match.isexact() or match.prefix():
3178 matched = set(status.modified + status.added + status.removed)
3178 matched = set(status.modified + status.added + status.removed)
3179
3179
3180 for f in match.files():
3180 for f in match.files():
3181 f = self.dirstate.normalize(f)
3181 f = self.dirstate.normalize(f)
3182 if f == b'.' or f in matched or f in wctx.substate:
3182 if f == b'.' or f in matched or f in wctx.substate:
3183 continue
3183 continue
3184 if f in status.deleted:
3184 if f in status.deleted:
3185 fail(f, _(b'file not found!'))
3185 fail(f, _(b'file not found!'))
3186 # Is it a directory that exists or used to exist?
3186 # Is it a directory that exists or used to exist?
3187 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3187 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3188 d = f + b'/'
3188 d = f + b'/'
3189 for mf in matched:
3189 for mf in matched:
3190 if mf.startswith(d):
3190 if mf.startswith(d):
3191 break
3191 break
3192 else:
3192 else:
3193 fail(f, _(b"no match under directory!"))
3193 fail(f, _(b"no match under directory!"))
3194 elif f not in self.dirstate:
3194 elif f not in self.dirstate:
3195 fail(f, _(b"file not tracked!"))
3195 fail(f, _(b"file not tracked!"))
3196
3196
3197 @unfilteredmethod
3197 @unfilteredmethod
3198 def commit(
3198 def commit(
3199 self,
3199 self,
3200 text=b"",
3200 text=b"",
3201 user=None,
3201 user=None,
3202 date=None,
3202 date=None,
3203 match=None,
3203 match=None,
3204 force=False,
3204 force=False,
3205 editor=None,
3205 editor=None,
3206 extra=None,
3206 extra=None,
3207 ):
3207 ):
3208 """Add a new revision to current repository.
3208 """Add a new revision to current repository.
3209
3209
3210 Revision information is gathered from the working directory,
3210 Revision information is gathered from the working directory,
3211 match can be used to filter the committed files. If editor is
3211 match can be used to filter the committed files. If editor is
3212 supplied, it is called to get a commit message.
3212 supplied, it is called to get a commit message.
3213 """
3213 """
3214 if extra is None:
3214 if extra is None:
3215 extra = {}
3215 extra = {}
3216
3216
3217 def fail(f, msg):
3217 def fail(f, msg):
3218 raise error.InputError(b'%s: %s' % (f, msg))
3218 raise error.InputError(b'%s: %s' % (f, msg))
3219
3219
3220 if not match:
3220 if not match:
3221 match = matchmod.always()
3221 match = matchmod.always()
3222
3222
3223 if not force:
3223 if not force:
3224 match.bad = fail
3224 match.bad = fail
3225
3225
3226 # lock() for recent changelog (see issue4368)
3226 # lock() for recent changelog (see issue4368)
3227 with self.wlock(), self.lock():
3227 with self.wlock(), self.lock():
3228 wctx = self[None]
3228 wctx = self[None]
3229 merge = len(wctx.parents()) > 1
3229 merge = len(wctx.parents()) > 1
3230
3230
3231 if not force and merge and not match.always():
3231 if not force and merge and not match.always():
3232 raise error.Abort(
3232 raise error.Abort(
3233 _(
3233 _(
3234 b'cannot partially commit a merge '
3234 b'cannot partially commit a merge '
3235 b'(do not specify files or patterns)'
3235 b'(do not specify files or patterns)'
3236 )
3236 )
3237 )
3237 )
3238
3238
3239 status = self.status(match=match, clean=force)
3239 status = self.status(match=match, clean=force)
3240 if force:
3240 if force:
3241 status.modified.extend(
3241 status.modified.extend(
3242 status.clean
3242 status.clean
3243 ) # mq may commit clean files
3243 ) # mq may commit clean files
3244
3244
3245 # check subrepos
3245 # check subrepos
3246 subs, commitsubs, newstate = subrepoutil.precommit(
3246 subs, commitsubs, newstate = subrepoutil.precommit(
3247 self.ui, wctx, status, match, force=force
3247 self.ui, wctx, status, match, force=force
3248 )
3248 )
3249
3249
3250 # make sure all explicit patterns are matched
3250 # make sure all explicit patterns are matched
3251 if not force:
3251 if not force:
3252 self.checkcommitpatterns(wctx, match, status, fail)
3252 self.checkcommitpatterns(wctx, match, status, fail)
3253
3253
3254 cctx = context.workingcommitctx(
3254 cctx = context.workingcommitctx(
3255 self, status, text, user, date, extra
3255 self, status, text, user, date, extra
3256 )
3256 )
3257
3257
3258 ms = mergestatemod.mergestate.read(self)
3258 ms = mergestatemod.mergestate.read(self)
3259 mergeutil.checkunresolved(ms)
3259 mergeutil.checkunresolved(ms)
3260
3260
3261 # internal config: ui.allowemptycommit
3261 # internal config: ui.allowemptycommit
3262 if cctx.isempty() and not self.ui.configbool(
3262 if cctx.isempty() and not self.ui.configbool(
3263 b'ui', b'allowemptycommit'
3263 b'ui', b'allowemptycommit'
3264 ):
3264 ):
3265 self.ui.debug(b'nothing to commit, clearing merge state\n')
3265 self.ui.debug(b'nothing to commit, clearing merge state\n')
3266 ms.reset()
3266 ms.reset()
3267 return None
3267 return None
3268
3268
3269 if merge and cctx.deleted():
3269 if merge and cctx.deleted():
3270 raise error.Abort(_(b"cannot commit merge with missing files"))
3270 raise error.Abort(_(b"cannot commit merge with missing files"))
3271
3271
3272 if editor:
3272 if editor:
3273 cctx._text = editor(self, cctx, subs)
3273 cctx._text = editor(self, cctx, subs)
3274 edited = text != cctx._text
3274 edited = text != cctx._text
3275
3275
3276 # Save commit message in case this transaction gets rolled back
3276 # Save commit message in case this transaction gets rolled back
3277 # (e.g. by a pretxncommit hook). Leave the content alone on
3277 # (e.g. by a pretxncommit hook). Leave the content alone on
3278 # the assumption that the user will use the same editor again.
3278 # the assumption that the user will use the same editor again.
3279 msg_path = self.savecommitmessage(cctx._text)
3279 msg_path = self.savecommitmessage(cctx._text)
3280
3280
3281 # commit subs and write new state
3281 # commit subs and write new state
3282 if subs:
3282 if subs:
3283 uipathfn = scmutil.getuipathfn(self)
3283 uipathfn = scmutil.getuipathfn(self)
3284 for s in sorted(commitsubs):
3284 for s in sorted(commitsubs):
3285 sub = wctx.sub(s)
3285 sub = wctx.sub(s)
3286 self.ui.status(
3286 self.ui.status(
3287 _(b'committing subrepository %s\n')
3287 _(b'committing subrepository %s\n')
3288 % uipathfn(subrepoutil.subrelpath(sub))
3288 % uipathfn(subrepoutil.subrelpath(sub))
3289 )
3289 )
3290 sr = sub.commit(cctx._text, user, date)
3290 sr = sub.commit(cctx._text, user, date)
3291 newstate[s] = (newstate[s][0], sr)
3291 newstate[s] = (newstate[s][0], sr)
3292 subrepoutil.writestate(self, newstate)
3292 subrepoutil.writestate(self, newstate)
3293
3293
3294 p1, p2 = self.dirstate.parents()
3294 p1, p2 = self.dirstate.parents()
3295 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3295 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3296 try:
3296 try:
3297 self.hook(
3297 self.hook(
3298 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3298 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3299 )
3299 )
3300 with self.transaction(b'commit'):
3300 with self.transaction(b'commit'):
3301 ret = self.commitctx(cctx, True)
3301 ret = self.commitctx(cctx, True)
3302 # update bookmarks, dirstate and mergestate
3302 # update bookmarks, dirstate and mergestate
3303 bookmarks.update(self, [p1, p2], ret)
3303 bookmarks.update(self, [p1, p2], ret)
3304 cctx.markcommitted(ret)
3304 cctx.markcommitted(ret)
3305 ms.reset()
3305 ms.reset()
3306 except: # re-raises
3306 except: # re-raises
3307 if edited:
3307 if edited:
3308 self.ui.write(
3308 self.ui.write(
3309 _(b'note: commit message saved in %s\n') % msg_path
3309 _(b'note: commit message saved in %s\n') % msg_path
3310 )
3310 )
3311 self.ui.write(
3311 self.ui.write(
3312 _(
3312 _(
3313 b"note: use 'hg commit --logfile "
3313 b"note: use 'hg commit --logfile "
3314 b"%s --edit' to reuse it\n"
3314 b"%s --edit' to reuse it\n"
3315 )
3315 )
3316 % msg_path
3316 % msg_path
3317 )
3317 )
3318 raise
3318 raise
3319
3319
3320 def commithook(unused_success):
3320 def commithook(unused_success):
3321 # hack for command that use a temporary commit (eg: histedit)
3321 # hack for command that use a temporary commit (eg: histedit)
3322 # temporary commit got stripped before hook release
3322 # temporary commit got stripped before hook release
3323 if self.changelog.hasnode(ret):
3323 if self.changelog.hasnode(ret):
3324 self.hook(
3324 self.hook(
3325 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3325 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3326 )
3326 )
3327
3327
3328 self._afterlock(commithook)
3328 self._afterlock(commithook)
3329 return ret
3329 return ret
3330
3330
3331 @unfilteredmethod
3331 @unfilteredmethod
3332 def commitctx(self, ctx, error=False, origctx=None):
3332 def commitctx(self, ctx, error=False, origctx=None):
3333 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3333 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3334
3334
3335 @unfilteredmethod
3335 @unfilteredmethod
3336 def destroying(self):
3336 def destroying(self):
3337 """Inform the repository that nodes are about to be destroyed.
3337 """Inform the repository that nodes are about to be destroyed.
3338 Intended for use by strip and rollback, so there's a common
3338 Intended for use by strip and rollback, so there's a common
3339 place for anything that has to be done before destroying history.
3339 place for anything that has to be done before destroying history.
3340
3340
3341 This is mostly useful for saving state that is in memory and waiting
3341 This is mostly useful for saving state that is in memory and waiting
3342 to be flushed when the current lock is released. Because a call to
3342 to be flushed when the current lock is released. Because a call to
3343 destroyed is imminent, the repo will be invalidated causing those
3343 destroyed is imminent, the repo will be invalidated causing those
3344 changes to stay in memory (waiting for the next unlock), or vanish
3344 changes to stay in memory (waiting for the next unlock), or vanish
3345 completely.
3345 completely.
3346 """
3346 """
3347 # When using the same lock to commit and strip, the phasecache is left
3347 # When using the same lock to commit and strip, the phasecache is left
3348 # dirty after committing. Then when we strip, the repo is invalidated,
3348 # dirty after committing. Then when we strip, the repo is invalidated,
3349 # causing those changes to disappear.
3349 # causing those changes to disappear.
3350 if '_phasecache' in vars(self):
3350 if '_phasecache' in vars(self):
3351 self._phasecache.write()
3351 self._phasecache.write()
3352
3352
3353 @unfilteredmethod
3353 @unfilteredmethod
3354 def destroyed(self):
3354 def destroyed(self):
3355 """Inform the repository that nodes have been destroyed.
3355 """Inform the repository that nodes have been destroyed.
3356 Intended for use by strip and rollback, so there's a common
3356 Intended for use by strip and rollback, so there's a common
3357 place for anything that has to be done after destroying history.
3357 place for anything that has to be done after destroying history.
3358 """
3358 """
3359 # When one tries to:
3359 # When one tries to:
3360 # 1) destroy nodes thus calling this method (e.g. strip)
3360 # 1) destroy nodes thus calling this method (e.g. strip)
3361 # 2) use phasecache somewhere (e.g. commit)
3361 # 2) use phasecache somewhere (e.g. commit)
3362 #
3362 #
3363 # then 2) will fail because the phasecache contains nodes that were
3363 # then 2) will fail because the phasecache contains nodes that were
3364 # removed. We can either remove phasecache from the filecache,
3364 # removed. We can either remove phasecache from the filecache,
3365 # causing it to reload next time it is accessed, or simply filter
3365 # causing it to reload next time it is accessed, or simply filter
3366 # the removed nodes now and write the updated cache.
3366 # the removed nodes now and write the updated cache.
3367 self._phasecache.filterunknown(self)
3367 self._phasecache.filterunknown(self)
3368 self._phasecache.write()
3368 self._phasecache.write()
3369
3369
3370 # refresh all repository caches
3370 # refresh all repository caches
3371 self.updatecaches()
3371 self.updatecaches()
3372
3372
3373 # Ensure the persistent tag cache is updated. Doing it now
3373 # Ensure the persistent tag cache is updated. Doing it now
3374 # means that the tag cache only has to worry about destroyed
3374 # means that the tag cache only has to worry about destroyed
3375 # heads immediately after a strip/rollback. That in turn
3375 # heads immediately after a strip/rollback. That in turn
3376 # guarantees that "cachetip == currenttip" (comparing both rev
3376 # guarantees that "cachetip == currenttip" (comparing both rev
3377 # and node) always means no nodes have been added or destroyed.
3377 # and node) always means no nodes have been added or destroyed.
3378
3378
3379 # XXX this is suboptimal when qrefresh'ing: we strip the current
3379 # XXX this is suboptimal when qrefresh'ing: we strip the current
3380 # head, refresh the tag cache, then immediately add a new head.
3380 # head, refresh the tag cache, then immediately add a new head.
3381 # But I think doing it this way is necessary for the "instant
3381 # But I think doing it this way is necessary for the "instant
3382 # tag cache retrieval" case to work.
3382 # tag cache retrieval" case to work.
3383 self.invalidate()
3383 self.invalidate()
3384
3384
3385 def status(
3385 def status(
3386 self,
3386 self,
3387 node1=b'.',
3387 node1=b'.',
3388 node2=None,
3388 node2=None,
3389 match=None,
3389 match=None,
3390 ignored=False,
3390 ignored=False,
3391 clean=False,
3391 clean=False,
3392 unknown=False,
3392 unknown=False,
3393 listsubrepos=False,
3393 listsubrepos=False,
3394 ):
3394 ):
3395 '''a convenience method that calls node1.status(node2)'''
3395 '''a convenience method that calls node1.status(node2)'''
3396 return self[node1].status(
3396 return self[node1].status(
3397 node2, match, ignored, clean, unknown, listsubrepos
3397 node2, match, ignored, clean, unknown, listsubrepos
3398 )
3398 )
3399
3399
3400 def addpostdsstatus(self, ps):
3400 def addpostdsstatus(self, ps):
3401 """Add a callback to run within the wlock, at the point at which status
3401 """Add a callback to run within the wlock, at the point at which status
3402 fixups happen.
3402 fixups happen.
3403
3403
3404 On status completion, callback(wctx, status) will be called with the
3404 On status completion, callback(wctx, status) will be called with the
3405 wlock held, unless the dirstate has changed from underneath or the wlock
3405 wlock held, unless the dirstate has changed from underneath or the wlock
3406 couldn't be grabbed.
3406 couldn't be grabbed.
3407
3407
3408 Callbacks should not capture and use a cached copy of the dirstate --
3408 Callbacks should not capture and use a cached copy of the dirstate --
3409 it might change in the meanwhile. Instead, they should access the
3409 it might change in the meanwhile. Instead, they should access the
3410 dirstate via wctx.repo().dirstate.
3410 dirstate via wctx.repo().dirstate.
3411
3411
3412 This list is emptied out after each status run -- extensions should
3412 This list is emptied out after each status run -- extensions should
3413 make sure it adds to this list each time dirstate.status is called.
3413 make sure it adds to this list each time dirstate.status is called.
3414 Extensions should also make sure they don't call this for statuses
3414 Extensions should also make sure they don't call this for statuses
3415 that don't involve the dirstate.
3415 that don't involve the dirstate.
3416 """
3416 """
3417
3417
3418 # The list is located here for uniqueness reasons -- it is actually
3418 # The list is located here for uniqueness reasons -- it is actually
3419 # managed by the workingctx, but that isn't unique per-repo.
3419 # managed by the workingctx, but that isn't unique per-repo.
3420 self._postdsstatus.append(ps)
3420 self._postdsstatus.append(ps)
3421
3421
3422 def postdsstatus(self):
3422 def postdsstatus(self):
3423 """Used by workingctx to get the list of post-dirstate-status hooks."""
3423 """Used by workingctx to get the list of post-dirstate-status hooks."""
3424 return self._postdsstatus
3424 return self._postdsstatus
3425
3425
3426 def clearpostdsstatus(self):
3426 def clearpostdsstatus(self):
3427 """Used by workingctx to clear post-dirstate-status hooks."""
3427 """Used by workingctx to clear post-dirstate-status hooks."""
3428 del self._postdsstatus[:]
3428 del self._postdsstatus[:]
3429
3429
3430 def heads(self, start=None):
3430 def heads(self, start=None):
3431 if start is None:
3431 if start is None:
3432 cl = self.changelog
3432 cl = self.changelog
3433 headrevs = reversed(cl.headrevs())
3433 headrevs = reversed(cl.headrevs())
3434 return [cl.node(rev) for rev in headrevs]
3434 return [cl.node(rev) for rev in headrevs]
3435
3435
3436 heads = self.changelog.heads(start)
3436 heads = self.changelog.heads(start)
3437 # sort the output in rev descending order
3437 # sort the output in rev descending order
3438 return sorted(heads, key=self.changelog.rev, reverse=True)
3438 return sorted(heads, key=self.changelog.rev, reverse=True)
3439
3439
3440 def branchheads(self, branch=None, start=None, closed=False):
3440 def branchheads(self, branch=None, start=None, closed=False):
3441 """return a (possibly filtered) list of heads for the given branch
3441 """return a (possibly filtered) list of heads for the given branch
3442
3442
3443 Heads are returned in topological order, from newest to oldest.
3443 Heads are returned in topological order, from newest to oldest.
3444 If branch is None, use the dirstate branch.
3444 If branch is None, use the dirstate branch.
3445 If start is not None, return only heads reachable from start.
3445 If start is not None, return only heads reachable from start.
3446 If closed is True, return heads that are marked as closed as well.
3446 If closed is True, return heads that are marked as closed as well.
3447 """
3447 """
3448 if branch is None:
3448 if branch is None:
3449 branch = self[None].branch()
3449 branch = self[None].branch()
3450 branches = self.branchmap()
3450 branches = self.branchmap()
3451 if not branches.hasbranch(branch):
3451 if not branches.hasbranch(branch):
3452 return []
3452 return []
3453 # the cache returns heads ordered lowest to highest
3453 # the cache returns heads ordered lowest to highest
3454 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3454 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3455 if start is not None:
3455 if start is not None:
3456 # filter out the heads that cannot be reached from startrev
3456 # filter out the heads that cannot be reached from startrev
3457 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3457 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3458 bheads = [h for h in bheads if h in fbheads]
3458 bheads = [h for h in bheads if h in fbheads]
3459 return bheads
3459 return bheads
3460
3460
3461 def branches(self, nodes):
3461 def branches(self, nodes):
3462 if not nodes:
3462 if not nodes:
3463 nodes = [self.changelog.tip()]
3463 nodes = [self.changelog.tip()]
3464 b = []
3464 b = []
3465 for n in nodes:
3465 for n in nodes:
3466 t = n
3466 t = n
3467 while True:
3467 while True:
3468 p = self.changelog.parents(n)
3468 p = self.changelog.parents(n)
3469 if p[1] != self.nullid or p[0] == self.nullid:
3469 if p[1] != self.nullid or p[0] == self.nullid:
3470 b.append((t, n, p[0], p[1]))
3470 b.append((t, n, p[0], p[1]))
3471 break
3471 break
3472 n = p[0]
3472 n = p[0]
3473 return b
3473 return b
3474
3474
3475 def between(self, pairs):
3475 def between(self, pairs):
3476 r = []
3476 r = []
3477
3477
3478 for top, bottom in pairs:
3478 for top, bottom in pairs:
3479 n, l, i = top, [], 0
3479 n, l, i = top, [], 0
3480 f = 1
3480 f = 1
3481
3481
3482 while n != bottom and n != self.nullid:
3482 while n != bottom and n != self.nullid:
3483 p = self.changelog.parents(n)[0]
3483 p = self.changelog.parents(n)[0]
3484 if i == f:
3484 if i == f:
3485 l.append(n)
3485 l.append(n)
3486 f = f * 2
3486 f = f * 2
3487 n = p
3487 n = p
3488 i += 1
3488 i += 1
3489
3489
3490 r.append(l)
3490 r.append(l)
3491
3491
3492 return r
3492 return r
3493
3493
3494 def checkpush(self, pushop):
3494 def checkpush(self, pushop):
3495 """Extensions can override this function if additional checks have
3495 """Extensions can override this function if additional checks have
3496 to be performed before pushing, or call it if they override push
3496 to be performed before pushing, or call it if they override push
3497 command.
3497 command.
3498 """
3498 """
3499
3499
3500 @unfilteredpropertycache
3500 @unfilteredpropertycache
3501 def prepushoutgoinghooks(self):
3501 def prepushoutgoinghooks(self):
3502 """Return util.hooks consists of a pushop with repo, remote, outgoing
3502 """Return util.hooks consists of a pushop with repo, remote, outgoing
3503 methods, which are called before pushing changesets.
3503 methods, which are called before pushing changesets.
3504 """
3504 """
3505 return util.hooks()
3505 return util.hooks()
3506
3506
3507 def pushkey(self, namespace, key, old, new):
3507 def pushkey(self, namespace, key, old, new):
3508 try:
3508 try:
3509 tr = self.currenttransaction()
3509 tr = self.currenttransaction()
3510 hookargs = {}
3510 hookargs = {}
3511 if tr is not None:
3511 if tr is not None:
3512 hookargs.update(tr.hookargs)
3512 hookargs.update(tr.hookargs)
3513 hookargs = pycompat.strkwargs(hookargs)
3513 hookargs = pycompat.strkwargs(hookargs)
3514 hookargs['namespace'] = namespace
3514 hookargs['namespace'] = namespace
3515 hookargs['key'] = key
3515 hookargs['key'] = key
3516 hookargs['old'] = old
3516 hookargs['old'] = old
3517 hookargs['new'] = new
3517 hookargs['new'] = new
3518 self.hook(b'prepushkey', throw=True, **hookargs)
3518 self.hook(b'prepushkey', throw=True, **hookargs)
3519 except error.HookAbort as exc:
3519 except error.HookAbort as exc:
3520 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3520 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3521 if exc.hint:
3521 if exc.hint:
3522 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3522 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3523 return False
3523 return False
3524 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3524 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3525 ret = pushkey.push(self, namespace, key, old, new)
3525 ret = pushkey.push(self, namespace, key, old, new)
3526
3526
3527 def runhook(unused_success):
3527 def runhook(unused_success):
3528 self.hook(
3528 self.hook(
3529 b'pushkey',
3529 b'pushkey',
3530 namespace=namespace,
3530 namespace=namespace,
3531 key=key,
3531 key=key,
3532 old=old,
3532 old=old,
3533 new=new,
3533 new=new,
3534 ret=ret,
3534 ret=ret,
3535 )
3535 )
3536
3536
3537 self._afterlock(runhook)
3537 self._afterlock(runhook)
3538 return ret
3538 return ret
3539
3539
3540 def listkeys(self, namespace):
3540 def listkeys(self, namespace):
3541 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3541 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3542 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3542 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3543 values = pushkey.list(self, namespace)
3543 values = pushkey.list(self, namespace)
3544 self.hook(b'listkeys', namespace=namespace, values=values)
3544 self.hook(b'listkeys', namespace=namespace, values=values)
3545 return values
3545 return values
3546
3546
3547 def debugwireargs(self, one, two, three=None, four=None, five=None):
3547 def debugwireargs(self, one, two, three=None, four=None, five=None):
3548 '''used to test argument passing over the wire'''
3548 '''used to test argument passing over the wire'''
3549 return b"%s %s %s %s %s" % (
3549 return b"%s %s %s %s %s" % (
3550 one,
3550 one,
3551 two,
3551 two,
3552 pycompat.bytestr(three),
3552 pycompat.bytestr(three),
3553 pycompat.bytestr(four),
3553 pycompat.bytestr(four),
3554 pycompat.bytestr(five),
3554 pycompat.bytestr(five),
3555 )
3555 )
3556
3556
3557 def savecommitmessage(self, text):
3557 def savecommitmessage(self, text):
3558 fp = self.vfs(b'last-message.txt', b'wb')
3558 fp = self.vfs(b'last-message.txt', b'wb')
3559 try:
3559 try:
3560 fp.write(text)
3560 fp.write(text)
3561 finally:
3561 finally:
3562 fp.close()
3562 fp.close()
3563 return self.pathto(fp.name[len(self.root) + 1 :])
3563 return self.pathto(fp.name[len(self.root) + 1 :])
3564
3564
3565 def register_wanted_sidedata(self, category):
3565 def register_wanted_sidedata(self, category):
3566 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3566 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3567 # Only revlogv2 repos can want sidedata.
3567 # Only revlogv2 repos can want sidedata.
3568 return
3568 return
3569 self._wanted_sidedata.add(pycompat.bytestr(category))
3569 self._wanted_sidedata.add(pycompat.bytestr(category))
3570
3570
3571 def register_sidedata_computer(
3571 def register_sidedata_computer(
3572 self, kind, category, keys, computer, flags, replace=False
3572 self, kind, category, keys, computer, flags, replace=False
3573 ):
3573 ):
3574 if kind not in revlogconst.ALL_KINDS:
3574 if kind not in revlogconst.ALL_KINDS:
3575 msg = _(b"unexpected revlog kind '%s'.")
3575 msg = _(b"unexpected revlog kind '%s'.")
3576 raise error.ProgrammingError(msg % kind)
3576 raise error.ProgrammingError(msg % kind)
3577 category = pycompat.bytestr(category)
3577 category = pycompat.bytestr(category)
3578 already_registered = category in self._sidedata_computers.get(kind, [])
3578 already_registered = category in self._sidedata_computers.get(kind, [])
3579 if already_registered and not replace:
3579 if already_registered and not replace:
3580 msg = _(
3580 msg = _(
3581 b"cannot register a sidedata computer twice for category '%s'."
3581 b"cannot register a sidedata computer twice for category '%s'."
3582 )
3582 )
3583 raise error.ProgrammingError(msg % category)
3583 raise error.ProgrammingError(msg % category)
3584 if replace and not already_registered:
3584 if replace and not already_registered:
3585 msg = _(
3585 msg = _(
3586 b"cannot replace a sidedata computer that isn't registered "
3586 b"cannot replace a sidedata computer that isn't registered "
3587 b"for category '%s'."
3587 b"for category '%s'."
3588 )
3588 )
3589 raise error.ProgrammingError(msg % category)
3589 raise error.ProgrammingError(msg % category)
3590 self._sidedata_computers.setdefault(kind, {})
3590 self._sidedata_computers.setdefault(kind, {})
3591 self._sidedata_computers[kind][category] = (keys, computer, flags)
3591 self._sidedata_computers[kind][category] = (keys, computer, flags)
3592
3592
3593
3593
3594 def undoname(fn: bytes) -> bytes:
3594 def undoname(fn: bytes) -> bytes:
3595 base, name = os.path.split(fn)
3595 base, name = os.path.split(fn)
3596 assert name.startswith(b'journal')
3596 assert name.startswith(b'journal')
3597 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3597 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3598
3598
3599
3599
3600 def instance(ui, path: bytes, create, intents=None, createopts=None):
3600 def instance(ui, path: bytes, create, intents=None, createopts=None):
3601 # prevent cyclic import localrepo -> upgrade -> localrepo
3601 # prevent cyclic import localrepo -> upgrade -> localrepo
3602 from . import upgrade
3602 from . import upgrade
3603
3603
3604 localpath = urlutil.urllocalpath(path)
3604 localpath = urlutil.urllocalpath(path)
3605 if create:
3605 if create:
3606 createrepository(ui, localpath, createopts=createopts)
3606 createrepository(ui, localpath, createopts=createopts)
3607
3607
3608 def repo_maker():
3608 def repo_maker():
3609 return makelocalrepository(ui, localpath, intents=intents)
3609 return makelocalrepository(ui, localpath, intents=intents)
3610
3610
3611 repo = repo_maker()
3611 repo = repo_maker()
3612 repo = upgrade.may_auto_upgrade(repo, repo_maker)
3612 repo = upgrade.may_auto_upgrade(repo, repo_maker)
3613 return repo
3613 return repo
3614
3614
3615
3615
3616 def islocal(path: bytes) -> bool:
3616 def islocal(path: bytes) -> bool:
3617 return True
3617 return True
3618
3618
3619
3619
3620 def defaultcreateopts(ui, createopts=None):
3620 def defaultcreateopts(ui, createopts=None):
3621 """Populate the default creation options for a repository.
3621 """Populate the default creation options for a repository.
3622
3622
3623 A dictionary of explicitly requested creation options can be passed
3623 A dictionary of explicitly requested creation options can be passed
3624 in. Missing keys will be populated.
3624 in. Missing keys will be populated.
3625 """
3625 """
3626 createopts = dict(createopts or {})
3626 createopts = dict(createopts or {})
3627
3627
3628 if b'backend' not in createopts:
3628 if b'backend' not in createopts:
3629 # experimental config: storage.new-repo-backend
3629 # experimental config: storage.new-repo-backend
3630 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3630 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3631
3631
3632 return createopts
3632 return createopts
3633
3633
3634
3634
3635 def clone_requirements(ui, createopts, srcrepo):
3635 def clone_requirements(ui, createopts, srcrepo):
3636 """clone the requirements of a local repo for a local clone
3636 """clone the requirements of a local repo for a local clone
3637
3637
3638 The store requirements are unchanged while the working copy requirements
3638 The store requirements are unchanged while the working copy requirements
3639 depends on the configuration
3639 depends on the configuration
3640 """
3640 """
3641 target_requirements = set()
3641 target_requirements = set()
3642 if not srcrepo.requirements:
3642 if not srcrepo.requirements:
3643 # this is a legacy revlog "v0" repository, we cannot do anything fancy
3643 # this is a legacy revlog "v0" repository, we cannot do anything fancy
3644 # with it.
3644 # with it.
3645 return target_requirements
3645 return target_requirements
3646 createopts = defaultcreateopts(ui, createopts=createopts)
3646 createopts = defaultcreateopts(ui, createopts=createopts)
3647 for r in newreporequirements(ui, createopts):
3647 for r in newreporequirements(ui, createopts):
3648 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3648 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3649 target_requirements.add(r)
3649 target_requirements.add(r)
3650
3650
3651 for r in srcrepo.requirements:
3651 for r in srcrepo.requirements:
3652 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3652 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3653 target_requirements.add(r)
3653 target_requirements.add(r)
3654 return target_requirements
3654 return target_requirements
3655
3655
3656
3656
3657 def newreporequirements(ui, createopts):
3657 def newreporequirements(ui, createopts):
3658 """Determine the set of requirements for a new local repository.
3658 """Determine the set of requirements for a new local repository.
3659
3659
3660 Extensions can wrap this function to specify custom requirements for
3660 Extensions can wrap this function to specify custom requirements for
3661 new repositories.
3661 new repositories.
3662 """
3662 """
3663
3663
3664 if b'backend' not in createopts:
3664 if b'backend' not in createopts:
3665 raise error.ProgrammingError(
3665 raise error.ProgrammingError(
3666 b'backend key not present in createopts; '
3666 b'backend key not present in createopts; '
3667 b'was defaultcreateopts() called?'
3667 b'was defaultcreateopts() called?'
3668 )
3668 )
3669
3669
3670 if createopts[b'backend'] != b'revlogv1':
3670 if createopts[b'backend'] != b'revlogv1':
3671 raise error.Abort(
3671 raise error.Abort(
3672 _(
3672 _(
3673 b'unable to determine repository requirements for '
3673 b'unable to determine repository requirements for '
3674 b'storage backend: %s'
3674 b'storage backend: %s'
3675 )
3675 )
3676 % createopts[b'backend']
3676 % createopts[b'backend']
3677 )
3677 )
3678
3678
3679 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3679 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3680 if ui.configbool(b'format', b'usestore'):
3680 if ui.configbool(b'format', b'usestore'):
3681 requirements.add(requirementsmod.STORE_REQUIREMENT)
3681 requirements.add(requirementsmod.STORE_REQUIREMENT)
3682 if ui.configbool(b'format', b'usefncache'):
3682 if ui.configbool(b'format', b'usefncache'):
3683 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3683 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3684 if ui.configbool(b'format', b'dotencode'):
3684 if ui.configbool(b'format', b'dotencode'):
3685 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3685 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3686
3686
3687 compengines = ui.configlist(b'format', b'revlog-compression')
3687 compengines = ui.configlist(b'format', b'revlog-compression')
3688 for compengine in compengines:
3688 for compengine in compengines:
3689 if compengine in util.compengines:
3689 if compengine in util.compengines:
3690 engine = util.compengines[compengine]
3690 engine = util.compengines[compengine]
3691 if engine.available() and engine.revlogheader():
3691 if engine.available() and engine.revlogheader():
3692 break
3692 break
3693 else:
3693 else:
3694 raise error.Abort(
3694 raise error.Abort(
3695 _(
3695 _(
3696 b'compression engines %s defined by '
3696 b'compression engines %s defined by '
3697 b'format.revlog-compression not available'
3697 b'format.revlog-compression not available'
3698 )
3698 )
3699 % b', '.join(b'"%s"' % e for e in compengines),
3699 % b', '.join(b'"%s"' % e for e in compengines),
3700 hint=_(
3700 hint=_(
3701 b'run "hg debuginstall" to list available '
3701 b'run "hg debuginstall" to list available '
3702 b'compression engines'
3702 b'compression engines'
3703 ),
3703 ),
3704 )
3704 )
3705
3705
3706 # zlib is the historical default and doesn't need an explicit requirement.
3706 # zlib is the historical default and doesn't need an explicit requirement.
3707 if compengine == b'zstd':
3707 if compengine == b'zstd':
3708 requirements.add(b'revlog-compression-zstd')
3708 requirements.add(b'revlog-compression-zstd')
3709 elif compengine != b'zlib':
3709 elif compengine != b'zlib':
3710 requirements.add(b'exp-compression-%s' % compengine)
3710 requirements.add(b'exp-compression-%s' % compengine)
3711
3711
3712 if scmutil.gdinitconfig(ui):
3712 if scmutil.gdinitconfig(ui):
3713 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3713 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3714 if ui.configbool(b'format', b'sparse-revlog'):
3714 if ui.configbool(b'format', b'sparse-revlog'):
3715 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3715 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3716
3716
3717 # experimental config: format.use-dirstate-v2
3717 # experimental config: format.use-dirstate-v2
3718 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3718 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3719 if ui.configbool(b'format', b'use-dirstate-v2'):
3719 if ui.configbool(b'format', b'use-dirstate-v2'):
3720 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3720 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3721
3721
3722 # experimental config: format.exp-use-copies-side-data-changeset
3722 # experimental config: format.exp-use-copies-side-data-changeset
3723 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3723 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3724 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3724 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3725 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3725 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3726 if ui.configbool(b'experimental', b'treemanifest'):
3726 if ui.configbool(b'experimental', b'treemanifest'):
3727 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3727 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3728
3728
3729 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3729 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3730 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3730 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3731 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3731 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3732
3732
3733 revlogv2 = ui.config(b'experimental', b'revlogv2')
3733 revlogv2 = ui.config(b'experimental', b'revlogv2')
3734 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3734 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3735 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3735 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3736 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3736 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3737 # experimental config: format.internal-phase
3737 # experimental config: format.internal-phase
3738 if ui.configbool(b'format', b'use-internal-phase'):
3738 if ui.configbool(b'format', b'use-internal-phase'):
3739 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3739 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3740
3740
3741 # experimental config: format.exp-archived-phase
3741 # experimental config: format.exp-archived-phase
3742 if ui.configbool(b'format', b'exp-archived-phase'):
3742 if ui.configbool(b'format', b'exp-archived-phase'):
3743 requirements.add(requirementsmod.ARCHIVED_PHASE_REQUIREMENT)
3743 requirements.add(requirementsmod.ARCHIVED_PHASE_REQUIREMENT)
3744
3744
3745 if createopts.get(b'narrowfiles'):
3745 if createopts.get(b'narrowfiles'):
3746 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3746 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3747
3747
3748 if createopts.get(b'lfs'):
3748 if createopts.get(b'lfs'):
3749 requirements.add(b'lfs')
3749 requirements.add(b'lfs')
3750
3750
3751 if ui.configbool(b'format', b'bookmarks-in-store'):
3751 if ui.configbool(b'format', b'bookmarks-in-store'):
3752 requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3752 requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3753
3753
3754 if ui.configbool(b'format', b'use-persistent-nodemap'):
3754 if ui.configbool(b'format', b'use-persistent-nodemap'):
3755 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3755 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3756
3756
3757 # if share-safe is enabled, let's create the new repository with the new
3757 # if share-safe is enabled, let's create the new repository with the new
3758 # requirement
3758 # requirement
3759 if ui.configbool(b'format', b'use-share-safe'):
3759 if ui.configbool(b'format', b'use-share-safe'):
3760 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3760 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3761
3761
3762 # if we are creating a share-repoΒΉ we have to handle requirement
3762 # if we are creating a share-repoΒΉ we have to handle requirement
3763 # differently.
3763 # differently.
3764 #
3764 #
3765 # [1] (i.e. reusing the store from another repository, just having a
3765 # [1] (i.e. reusing the store from another repository, just having a
3766 # working copy)
3766 # working copy)
3767 if b'sharedrepo' in createopts:
3767 if b'sharedrepo' in createopts:
3768 source_requirements = set(createopts[b'sharedrepo'].requirements)
3768 source_requirements = set(createopts[b'sharedrepo'].requirements)
3769
3769
3770 if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements:
3770 if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements:
3771 # share to an old school repository, we have to copy the
3771 # share to an old school repository, we have to copy the
3772 # requirements and hope for the best.
3772 # requirements and hope for the best.
3773 requirements = source_requirements
3773 requirements = source_requirements
3774 else:
3774 else:
3775 # We have control on the working copy only, so "copy" the non
3775 # We have control on the working copy only, so "copy" the non
3776 # working copy part over, ignoring previous logic.
3776 # working copy part over, ignoring previous logic.
3777 to_drop = set()
3777 to_drop = set()
3778 for req in requirements:
3778 for req in requirements:
3779 if req in requirementsmod.WORKING_DIR_REQUIREMENTS:
3779 if req in requirementsmod.WORKING_DIR_REQUIREMENTS:
3780 continue
3780 continue
3781 if req in source_requirements:
3781 if req in source_requirements:
3782 continue
3782 continue
3783 to_drop.add(req)
3783 to_drop.add(req)
3784 requirements -= to_drop
3784 requirements -= to_drop
3785 requirements |= source_requirements
3785 requirements |= source_requirements
3786
3786
3787 if createopts.get(b'sharedrelative'):
3787 if createopts.get(b'sharedrelative'):
3788 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3788 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3789 else:
3789 else:
3790 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3790 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3791
3791
3792 if ui.configbool(b'format', b'use-dirstate-tracked-hint'):
3792 if ui.configbool(b'format', b'use-dirstate-tracked-hint'):
3793 version = ui.configint(b'format', b'use-dirstate-tracked-hint.version')
3793 version = ui.configint(b'format', b'use-dirstate-tracked-hint.version')
3794 msg = _(b"ignoring unknown tracked key version: %d\n")
3794 msg = _(b"ignoring unknown tracked key version: %d\n")
3795 hint = _(
3795 hint = _(
3796 b"see `hg help config.format.use-dirstate-tracked-hint-version"
3796 b"see `hg help config.format.use-dirstate-tracked-hint-version"
3797 )
3797 )
3798 if version != 1:
3798 if version != 1:
3799 ui.warn(msg % version, hint=hint)
3799 ui.warn(msg % version, hint=hint)
3800 else:
3800 else:
3801 requirements.add(requirementsmod.DIRSTATE_TRACKED_HINT_V1)
3801 requirements.add(requirementsmod.DIRSTATE_TRACKED_HINT_V1)
3802
3802
3803 return requirements
3803 return requirements
3804
3804
3805
3805
3806 def checkrequirementscompat(ui, requirements):
3806 def checkrequirementscompat(ui, requirements):
3807 """Checks compatibility of repository requirements enabled and disabled.
3807 """Checks compatibility of repository requirements enabled and disabled.
3808
3808
3809 Returns a set of requirements which needs to be dropped because dependend
3809 Returns a set of requirements which needs to be dropped because dependend
3810 requirements are not enabled. Also warns users about it"""
3810 requirements are not enabled. Also warns users about it"""
3811
3811
3812 dropped = set()
3812 dropped = set()
3813
3813
3814 if requirementsmod.STORE_REQUIREMENT not in requirements:
3814 if requirementsmod.STORE_REQUIREMENT not in requirements:
3815 if requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3815 if requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3816 ui.warn(
3816 ui.warn(
3817 _(
3817 _(
3818 b'ignoring enabled \'format.bookmarks-in-store\' config '
3818 b'ignoring enabled \'format.bookmarks-in-store\' config '
3819 b'beacuse it is incompatible with disabled '
3819 b'beacuse it is incompatible with disabled '
3820 b'\'format.usestore\' config\n'
3820 b'\'format.usestore\' config\n'
3821 )
3821 )
3822 )
3822 )
3823 dropped.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3823 dropped.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3824
3824
3825 if (
3825 if (
3826 requirementsmod.SHARED_REQUIREMENT in requirements
3826 requirementsmod.SHARED_REQUIREMENT in requirements
3827 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3827 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3828 ):
3828 ):
3829 raise error.Abort(
3829 raise error.Abort(
3830 _(
3830 _(
3831 b"cannot create shared repository as source was created"
3831 b"cannot create shared repository as source was created"
3832 b" with 'format.usestore' config disabled"
3832 b" with 'format.usestore' config disabled"
3833 )
3833 )
3834 )
3834 )
3835
3835
3836 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3836 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3837 if ui.hasconfig(b'format', b'use-share-safe'):
3837 if ui.hasconfig(b'format', b'use-share-safe'):
3838 msg = _(
3838 msg = _(
3839 b"ignoring enabled 'format.use-share-safe' config because "
3839 b"ignoring enabled 'format.use-share-safe' config because "
3840 b"it is incompatible with disabled 'format.usestore'"
3840 b"it is incompatible with disabled 'format.usestore'"
3841 b" config\n"
3841 b" config\n"
3842 )
3842 )
3843 ui.warn(msg)
3843 ui.warn(msg)
3844 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3844 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3845
3845
3846 return dropped
3846 return dropped
3847
3847
3848
3848
3849 def filterknowncreateopts(ui, createopts):
3849 def filterknowncreateopts(ui, createopts):
3850 """Filters a dict of repo creation options against options that are known.
3850 """Filters a dict of repo creation options against options that are known.
3851
3851
3852 Receives a dict of repo creation options and returns a dict of those
3852 Receives a dict of repo creation options and returns a dict of those
3853 options that we don't know how to handle.
3853 options that we don't know how to handle.
3854
3854
3855 This function is called as part of repository creation. If the
3855 This function is called as part of repository creation. If the
3856 returned dict contains any items, repository creation will not
3856 returned dict contains any items, repository creation will not
3857 be allowed, as it means there was a request to create a repository
3857 be allowed, as it means there was a request to create a repository
3858 with options not recognized by loaded code.
3858 with options not recognized by loaded code.
3859
3859
3860 Extensions can wrap this function to filter out creation options
3860 Extensions can wrap this function to filter out creation options
3861 they know how to handle.
3861 they know how to handle.
3862 """
3862 """
3863 known = {
3863 known = {
3864 b'backend',
3864 b'backend',
3865 b'lfs',
3865 b'lfs',
3866 b'narrowfiles',
3866 b'narrowfiles',
3867 b'sharedrepo',
3867 b'sharedrepo',
3868 b'sharedrelative',
3868 b'sharedrelative',
3869 b'shareditems',
3869 b'shareditems',
3870 b'shallowfilestore',
3870 b'shallowfilestore',
3871 }
3871 }
3872
3872
3873 return {k: v for k, v in createopts.items() if k not in known}
3873 return {k: v for k, v in createopts.items() if k not in known}
3874
3874
3875
3875
3876 def createrepository(ui, path: bytes, createopts=None, requirements=None):
3876 def createrepository(ui, path: bytes, createopts=None, requirements=None):
3877 """Create a new repository in a vfs.
3877 """Create a new repository in a vfs.
3878
3878
3879 ``path`` path to the new repo's working directory.
3879 ``path`` path to the new repo's working directory.
3880 ``createopts`` options for the new repository.
3880 ``createopts`` options for the new repository.
3881 ``requirement`` predefined set of requirements.
3881 ``requirement`` predefined set of requirements.
3882 (incompatible with ``createopts``)
3882 (incompatible with ``createopts``)
3883
3883
3884 The following keys for ``createopts`` are recognized:
3884 The following keys for ``createopts`` are recognized:
3885
3885
3886 backend
3886 backend
3887 The storage backend to use.
3887 The storage backend to use.
3888 lfs
3888 lfs
3889 Repository will be created with ``lfs`` requirement. The lfs extension
3889 Repository will be created with ``lfs`` requirement. The lfs extension
3890 will automatically be loaded when the repository is accessed.
3890 will automatically be loaded when the repository is accessed.
3891 narrowfiles
3891 narrowfiles
3892 Set up repository to support narrow file storage.
3892 Set up repository to support narrow file storage.
3893 sharedrepo
3893 sharedrepo
3894 Repository object from which storage should be shared.
3894 Repository object from which storage should be shared.
3895 sharedrelative
3895 sharedrelative
3896 Boolean indicating if the path to the shared repo should be
3896 Boolean indicating if the path to the shared repo should be
3897 stored as relative. By default, the pointer to the "parent" repo
3897 stored as relative. By default, the pointer to the "parent" repo
3898 is stored as an absolute path.
3898 is stored as an absolute path.
3899 shareditems
3899 shareditems
3900 Set of items to share to the new repository (in addition to storage).
3900 Set of items to share to the new repository (in addition to storage).
3901 shallowfilestore
3901 shallowfilestore
3902 Indicates that storage for files should be shallow (not all ancestor
3902 Indicates that storage for files should be shallow (not all ancestor
3903 revisions are known).
3903 revisions are known).
3904 """
3904 """
3905
3905
3906 if requirements is not None:
3906 if requirements is not None:
3907 if createopts is not None:
3907 if createopts is not None:
3908 msg = b'cannot specify both createopts and requirements'
3908 msg = b'cannot specify both createopts and requirements'
3909 raise error.ProgrammingError(msg)
3909 raise error.ProgrammingError(msg)
3910 createopts = {}
3910 createopts = {}
3911 else:
3911 else:
3912 createopts = defaultcreateopts(ui, createopts=createopts)
3912 createopts = defaultcreateopts(ui, createopts=createopts)
3913
3913
3914 unknownopts = filterknowncreateopts(ui, createopts)
3914 unknownopts = filterknowncreateopts(ui, createopts)
3915
3915
3916 if not isinstance(unknownopts, dict):
3916 if not isinstance(unknownopts, dict):
3917 raise error.ProgrammingError(
3917 raise error.ProgrammingError(
3918 b'filterknowncreateopts() did not return a dict'
3918 b'filterknowncreateopts() did not return a dict'
3919 )
3919 )
3920
3920
3921 if unknownopts:
3921 if unknownopts:
3922 raise error.Abort(
3922 raise error.Abort(
3923 _(
3923 _(
3924 b'unable to create repository because of unknown '
3924 b'unable to create repository because of unknown '
3925 b'creation option: %s'
3925 b'creation option: %s'
3926 )
3926 )
3927 % b', '.join(sorted(unknownopts)),
3927 % b', '.join(sorted(unknownopts)),
3928 hint=_(b'is a required extension not loaded?'),
3928 hint=_(b'is a required extension not loaded?'),
3929 )
3929 )
3930
3930
3931 requirements = newreporequirements(ui, createopts=createopts)
3931 requirements = newreporequirements(ui, createopts=createopts)
3932 requirements -= checkrequirementscompat(ui, requirements)
3932 requirements -= checkrequirementscompat(ui, requirements)
3933
3933
3934 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3934 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3935
3935
3936 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3936 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3937 if hgvfs.exists():
3937 if hgvfs.exists():
3938 raise error.RepoError(_(b'repository %s already exists') % path)
3938 raise error.RepoError(_(b'repository %s already exists') % path)
3939
3939
3940 if b'sharedrepo' in createopts:
3940 if b'sharedrepo' in createopts:
3941 sharedpath = createopts[b'sharedrepo'].sharedpath
3941 sharedpath = createopts[b'sharedrepo'].sharedpath
3942
3942
3943 if createopts.get(b'sharedrelative'):
3943 if createopts.get(b'sharedrelative'):
3944 try:
3944 try:
3945 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3945 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3946 sharedpath = util.pconvert(sharedpath)
3946 sharedpath = util.pconvert(sharedpath)
3947 except (IOError, ValueError) as e:
3947 except (IOError, ValueError) as e:
3948 # ValueError is raised on Windows if the drive letters differ
3948 # ValueError is raised on Windows if the drive letters differ
3949 # on each path.
3949 # on each path.
3950 raise error.Abort(
3950 raise error.Abort(
3951 _(b'cannot calculate relative path'),
3951 _(b'cannot calculate relative path'),
3952 hint=stringutil.forcebytestr(e),
3952 hint=stringutil.forcebytestr(e),
3953 )
3953 )
3954
3954
3955 if not wdirvfs.exists():
3955 if not wdirvfs.exists():
3956 wdirvfs.makedirs()
3956 wdirvfs.makedirs()
3957
3957
3958 hgvfs.makedir(notindexed=True)
3958 hgvfs.makedir(notindexed=True)
3959 if b'sharedrepo' not in createopts:
3959 if b'sharedrepo' not in createopts:
3960 hgvfs.mkdir(b'cache')
3960 hgvfs.mkdir(b'cache')
3961 hgvfs.mkdir(b'wcache')
3961 hgvfs.mkdir(b'wcache')
3962
3962
3963 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3963 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3964 if has_store and b'sharedrepo' not in createopts:
3964 if has_store and b'sharedrepo' not in createopts:
3965 hgvfs.mkdir(b'store')
3965 hgvfs.mkdir(b'store')
3966
3966
3967 # We create an invalid changelog outside the store so very old
3967 # We create an invalid changelog outside the store so very old
3968 # Mercurial versions (which didn't know about the requirements
3968 # Mercurial versions (which didn't know about the requirements
3969 # file) encounter an error on reading the changelog. This
3969 # file) encounter an error on reading the changelog. This
3970 # effectively locks out old clients and prevents them from
3970 # effectively locks out old clients and prevents them from
3971 # mucking with a repo in an unknown format.
3971 # mucking with a repo in an unknown format.
3972 #
3972 #
3973 # The revlog header has version 65535, which won't be recognized by
3973 # The revlog header has version 65535, which won't be recognized by
3974 # such old clients.
3974 # such old clients.
3975 hgvfs.append(
3975 hgvfs.append(
3976 b'00changelog.i',
3976 b'00changelog.i',
3977 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3977 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3978 b'layout',
3978 b'layout',
3979 )
3979 )
3980
3980
3981 # Filter the requirements into working copy and store ones
3981 # Filter the requirements into working copy and store ones
3982 wcreq, storereq = scmutil.filterrequirements(requirements)
3982 wcreq, storereq = scmutil.filterrequirements(requirements)
3983 # write working copy ones
3983 # write working copy ones
3984 scmutil.writerequires(hgvfs, wcreq)
3984 scmutil.writerequires(hgvfs, wcreq)
3985 # If there are store requirements and the current repository
3985 # If there are store requirements and the current repository
3986 # is not a shared one, write stored requirements
3986 # is not a shared one, write stored requirements
3987 # For new shared repository, we don't need to write the store
3987 # For new shared repository, we don't need to write the store
3988 # requirements as they are already present in store requires
3988 # requirements as they are already present in store requires
3989 if storereq and b'sharedrepo' not in createopts:
3989 if storereq and b'sharedrepo' not in createopts:
3990 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3990 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3991 scmutil.writerequires(storevfs, storereq)
3991 scmutil.writerequires(storevfs, storereq)
3992
3992
3993 # Write out file telling readers where to find the shared store.
3993 # Write out file telling readers where to find the shared store.
3994 if b'sharedrepo' in createopts:
3994 if b'sharedrepo' in createopts:
3995 hgvfs.write(b'sharedpath', sharedpath)
3995 hgvfs.write(b'sharedpath', sharedpath)
3996
3996
3997 if createopts.get(b'shareditems'):
3997 if createopts.get(b'shareditems'):
3998 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3998 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3999 hgvfs.write(b'shared', shared)
3999 hgvfs.write(b'shared', shared)
4000
4000
4001
4001
4002 def poisonrepository(repo):
4002 def poisonrepository(repo):
4003 """Poison a repository instance so it can no longer be used."""
4003 """Poison a repository instance so it can no longer be used."""
4004 # Perform any cleanup on the instance.
4004 # Perform any cleanup on the instance.
4005 repo.close()
4005 repo.close()
4006
4006
4007 # Our strategy is to replace the type of the object with one that
4007 # Our strategy is to replace the type of the object with one that
4008 # has all attribute lookups result in error.
4008 # has all attribute lookups result in error.
4009 #
4009 #
4010 # But we have to allow the close() method because some constructors
4010 # But we have to allow the close() method because some constructors
4011 # of repos call close() on repo references.
4011 # of repos call close() on repo references.
4012 class poisonedrepository:
4012 class poisonedrepository:
4013 def __getattribute__(self, item):
4013 def __getattribute__(self, item):
4014 if item == 'close':
4014 if item == 'close':
4015 return object.__getattribute__(self, item)
4015 return object.__getattribute__(self, item)
4016
4016
4017 raise error.ProgrammingError(
4017 raise error.ProgrammingError(
4018 b'repo instances should not be used after unshare'
4018 b'repo instances should not be used after unshare'
4019 )
4019 )
4020
4020
4021 def close(self):
4021 def close(self):
4022 pass
4022 pass
4023
4023
4024 # We may have a repoview, which intercepts __setattr__. So be sure
4024 # We may have a repoview, which intercepts __setattr__. So be sure
4025 # we operate at the lowest level possible.
4025 # we operate at the lowest level possible.
4026 object.__setattr__(repo, '__class__', poisonedrepository)
4026 object.__setattr__(repo, '__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now