##// END OF EJS Templates
typing: make the localrepo classes known to pytype...
Matt Harbison -
r52788:ee7e106b default
parent child Browse files
Show More
@@ -1,4037 +1,4091
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 # coding: utf-8
2 # coding: utf-8
3 #
3 #
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import annotations
9 from __future__ import annotations
10
10
11 import functools
11 import functools
12 import os
12 import os
13 import random
13 import random
14 import re
14 import re
15 import sys
15 import sys
16 import time
16 import time
17 import typing
17 import weakref
18 import weakref
18
19
19 from concurrent import futures
20 from concurrent import futures
20 from typing import (
21 from typing import (
21 Optional,
22 Optional,
22 )
23 )
23
24
24 from .i18n import _
25 from .i18n import _
25 from .node import (
26 from .node import (
26 bin,
27 bin,
27 hex,
28 hex,
28 nullrev,
29 nullrev,
29 sha1nodeconstants,
30 sha1nodeconstants,
30 short,
31 short,
31 )
32 )
32 from . import (
33 from . import (
33 bookmarks,
34 bookmarks,
34 branchmap,
35 branchmap,
35 bundle2,
36 bundle2,
36 bundlecaches,
37 bundlecaches,
37 changegroup,
38 changegroup,
38 color,
39 color,
39 commit,
40 commit,
40 context,
41 context,
41 dirstate,
42 dirstate,
42 discovery,
43 discovery,
43 encoding,
44 encoding,
44 error,
45 error,
45 exchange,
46 exchange,
46 extensions,
47 extensions,
47 filelog,
48 filelog,
48 hook,
49 hook,
49 lock as lockmod,
50 lock as lockmod,
50 match as matchmod,
51 match as matchmod,
51 mergestate as mergestatemod,
52 mergestate as mergestatemod,
52 mergeutil,
53 mergeutil,
53 namespaces,
54 namespaces,
54 narrowspec,
55 narrowspec,
55 obsolete,
56 obsolete,
56 pathutil,
57 pathutil,
57 phases,
58 phases,
58 policy,
59 policy,
59 pushkey,
60 pushkey,
60 pycompat,
61 pycompat,
61 rcutil,
62 rcutil,
62 repoview,
63 repoview,
63 requirements as requirementsmod,
64 requirements as requirementsmod,
64 revlog,
65 revlog,
65 revset,
66 revset,
66 revsetlang,
67 revsetlang,
67 scmutil,
68 scmutil,
68 sparse,
69 sparse,
69 store as storemod,
70 store as storemod,
70 subrepoutil,
71 subrepoutil,
71 tags as tagsmod,
72 tags as tagsmod,
72 transaction,
73 transaction,
73 txnutil,
74 txnutil,
74 util,
75 util,
75 vfs as vfsmod,
76 vfs as vfsmod,
76 wireprototypes,
77 wireprototypes,
77 )
78 )
78
79
79 from .interfaces import (
80 from .interfaces import (
80 repository,
81 repository,
81 util as interfaceutil,
82 util as interfaceutil,
82 )
83 )
83
84
84 from .utils import (
85 from .utils import (
85 hashutil,
86 hashutil,
86 procutil,
87 procutil,
87 stringutil,
88 stringutil,
88 urlutil,
89 urlutil,
89 )
90 )
90
91
91 from .revlogutils import (
92 from .revlogutils import (
92 concurrency_checker as revlogchecker,
93 concurrency_checker as revlogchecker,
93 constants as revlogconst,
94 constants as revlogconst,
94 sidedata as sidedatamod,
95 sidedata as sidedatamod,
95 )
96 )
96
97
97 release = lockmod.release
98 release = lockmod.release
98 urlerr = util.urlerr
99 urlerr = util.urlerr
99 urlreq = util.urlreq
100 urlreq = util.urlreq
100
101
101 RE_SKIP_DIRSTATE_ROLLBACK = re.compile(
102 RE_SKIP_DIRSTATE_ROLLBACK = re.compile(
102 b"^((dirstate|narrowspec.dirstate).*|branch$)"
103 b"^((dirstate|narrowspec.dirstate).*|branch$)"
103 )
104 )
104
105
105 # set of (path, vfs-location) tuples. vfs-location is:
106 # set of (path, vfs-location) tuples. vfs-location is:
106 # - 'plain for vfs relative paths
107 # - 'plain for vfs relative paths
107 # - '' for svfs relative paths
108 # - '' for svfs relative paths
108 _cachedfiles = set()
109 _cachedfiles = set()
109
110
110
111
111 class _basefilecache(scmutil.filecache):
112 class _basefilecache(scmutil.filecache):
112 """All filecache usage on repo are done for logic that should be unfiltered"""
113 """All filecache usage on repo are done for logic that should be unfiltered"""
113
114
114 def __get__(self, repo, type=None):
115 def __get__(self, repo, type=None):
115 if repo is None:
116 if repo is None:
116 return self
117 return self
117 # proxy to unfiltered __dict__ since filtered repo has no entry
118 # proxy to unfiltered __dict__ since filtered repo has no entry
118 unfi = repo.unfiltered()
119 unfi = repo.unfiltered()
119 try:
120 try:
120 return unfi.__dict__[self.sname]
121 return unfi.__dict__[self.sname]
121 except KeyError:
122 except KeyError:
122 pass
123 pass
123 return super(_basefilecache, self).__get__(unfi, type)
124 return super(_basefilecache, self).__get__(unfi, type)
124
125
125 def set(self, repo, value):
126 def set(self, repo, value):
126 return super(_basefilecache, self).set(repo.unfiltered(), value)
127 return super(_basefilecache, self).set(repo.unfiltered(), value)
127
128
128
129
129 class repofilecache(_basefilecache):
130 class repofilecache(_basefilecache):
130 """filecache for files in .hg but outside of .hg/store"""
131 """filecache for files in .hg but outside of .hg/store"""
131
132
132 def __init__(self, *paths):
133 def __init__(self, *paths):
133 super(repofilecache, self).__init__(*paths)
134 super(repofilecache, self).__init__(*paths)
134 for path in paths:
135 for path in paths:
135 _cachedfiles.add((path, b'plain'))
136 _cachedfiles.add((path, b'plain'))
136
137
137 def join(self, obj, fname):
138 def join(self, obj, fname):
138 return obj.vfs.join(fname)
139 return obj.vfs.join(fname)
139
140
140
141
141 class storecache(_basefilecache):
142 class storecache(_basefilecache):
142 """filecache for files in the store"""
143 """filecache for files in the store"""
143
144
144 def __init__(self, *paths):
145 def __init__(self, *paths):
145 super(storecache, self).__init__(*paths)
146 super(storecache, self).__init__(*paths)
146 for path in paths:
147 for path in paths:
147 _cachedfiles.add((path, b''))
148 _cachedfiles.add((path, b''))
148
149
149 def join(self, obj, fname):
150 def join(self, obj, fname):
150 return obj.sjoin(fname)
151 return obj.sjoin(fname)
151
152
152
153
153 class changelogcache(storecache):
154 class changelogcache(storecache):
154 """filecache for the changelog"""
155 """filecache for the changelog"""
155
156
156 def __init__(self):
157 def __init__(self):
157 super(changelogcache, self).__init__()
158 super(changelogcache, self).__init__()
158 _cachedfiles.add((b'00changelog.i', b''))
159 _cachedfiles.add((b'00changelog.i', b''))
159 _cachedfiles.add((b'00changelog.n', b''))
160 _cachedfiles.add((b'00changelog.n', b''))
160
161
161 def tracked_paths(self, obj):
162 def tracked_paths(self, obj):
162 paths = [self.join(obj, b'00changelog.i')]
163 paths = [self.join(obj, b'00changelog.i')]
163 if obj.store.opener.options.get(b'persistent-nodemap', False):
164 if obj.store.opener.options.get(b'persistent-nodemap', False):
164 paths.append(self.join(obj, b'00changelog.n'))
165 paths.append(self.join(obj, b'00changelog.n'))
165 return paths
166 return paths
166
167
167
168
168 class manifestlogcache(storecache):
169 class manifestlogcache(storecache):
169 """filecache for the manifestlog"""
170 """filecache for the manifestlog"""
170
171
171 def __init__(self):
172 def __init__(self):
172 super(manifestlogcache, self).__init__()
173 super(manifestlogcache, self).__init__()
173 _cachedfiles.add((b'00manifest.i', b''))
174 _cachedfiles.add((b'00manifest.i', b''))
174 _cachedfiles.add((b'00manifest.n', b''))
175 _cachedfiles.add((b'00manifest.n', b''))
175
176
176 def tracked_paths(self, obj):
177 def tracked_paths(self, obj):
177 paths = [self.join(obj, b'00manifest.i')]
178 paths = [self.join(obj, b'00manifest.i')]
178 if obj.store.opener.options.get(b'persistent-nodemap', False):
179 if obj.store.opener.options.get(b'persistent-nodemap', False):
179 paths.append(self.join(obj, b'00manifest.n'))
180 paths.append(self.join(obj, b'00manifest.n'))
180 return paths
181 return paths
181
182
182
183
183 class mixedrepostorecache(_basefilecache):
184 class mixedrepostorecache(_basefilecache):
184 """filecache for a mix files in .hg/store and outside"""
185 """filecache for a mix files in .hg/store and outside"""
185
186
186 def __init__(self, *pathsandlocations):
187 def __init__(self, *pathsandlocations):
187 # scmutil.filecache only uses the path for passing back into our
188 # scmutil.filecache only uses the path for passing back into our
188 # join(), so we can safely pass a list of paths and locations
189 # join(), so we can safely pass a list of paths and locations
189 super(mixedrepostorecache, self).__init__(*pathsandlocations)
190 super(mixedrepostorecache, self).__init__(*pathsandlocations)
190 _cachedfiles.update(pathsandlocations)
191 _cachedfiles.update(pathsandlocations)
191
192
192 def join(self, obj, fnameandlocation):
193 def join(self, obj, fnameandlocation):
193 fname, location = fnameandlocation
194 fname, location = fnameandlocation
194 if location == b'plain':
195 if location == b'plain':
195 return obj.vfs.join(fname)
196 return obj.vfs.join(fname)
196 else:
197 else:
197 if location != b'':
198 if location != b'':
198 raise error.ProgrammingError(
199 raise error.ProgrammingError(
199 b'unexpected location: %s' % location
200 b'unexpected location: %s' % location
200 )
201 )
201 return obj.sjoin(fname)
202 return obj.sjoin(fname)
202
203
203
204
204 def isfilecached(repo, name):
205 def isfilecached(repo, name):
205 """check if a repo has already cached "name" filecache-ed property
206 """check if a repo has already cached "name" filecache-ed property
206
207
207 This returns (cachedobj-or-None, iscached) tuple.
208 This returns (cachedobj-or-None, iscached) tuple.
208 """
209 """
209 cacheentry = repo.unfiltered()._filecache.get(name, None)
210 cacheentry = repo.unfiltered()._filecache.get(name, None)
210 if not cacheentry:
211 if not cacheentry:
211 return None, False
212 return None, False
212 return cacheentry.obj, True
213 return cacheentry.obj, True
213
214
214
215
215 class unfilteredpropertycache(util.propertycache):
216 class unfilteredpropertycache(util.propertycache):
216 """propertycache that apply to unfiltered repo only"""
217 """propertycache that apply to unfiltered repo only"""
217
218
218 def __get__(self, repo, type=None):
219 def __get__(self, repo, type=None):
219 unfi = repo.unfiltered()
220 unfi = repo.unfiltered()
220 if unfi is repo:
221 if unfi is repo:
221 return super(unfilteredpropertycache, self).__get__(unfi)
222 return super(unfilteredpropertycache, self).__get__(unfi)
222 return getattr(unfi, self.name)
223 return getattr(unfi, self.name)
223
224
224
225
225 class filteredpropertycache(util.propertycache):
226 class filteredpropertycache(util.propertycache):
226 """propertycache that must take filtering in account"""
227 """propertycache that must take filtering in account"""
227
228
228 def cachevalue(self, obj, value):
229 def cachevalue(self, obj, value):
229 object.__setattr__(obj, self.name, value)
230 object.__setattr__(obj, self.name, value)
230
231
231
232
232 def hasunfilteredcache(repo, name):
233 def hasunfilteredcache(repo, name):
233 """check if a repo has an unfilteredpropertycache value for <name>"""
234 """check if a repo has an unfilteredpropertycache value for <name>"""
234 return name in vars(repo.unfiltered())
235 return name in vars(repo.unfiltered())
235
236
236
237
237 def unfilteredmethod(orig):
238 def unfilteredmethod(orig):
238 """decorate method that always need to be run on unfiltered version"""
239 """decorate method that always need to be run on unfiltered version"""
239
240
240 @functools.wraps(orig)
241 @functools.wraps(orig)
241 def wrapper(repo, *args, **kwargs):
242 def wrapper(repo, *args, **kwargs):
242 return orig(repo.unfiltered(), *args, **kwargs)
243 return orig(repo.unfiltered(), *args, **kwargs)
243
244
244 return wrapper
245 return wrapper
245
246
246
247
247 moderncaps = {
248 moderncaps = {
248 b'lookup',
249 b'lookup',
249 b'branchmap',
250 b'branchmap',
250 b'pushkey',
251 b'pushkey',
251 b'known',
252 b'known',
252 b'getbundle',
253 b'getbundle',
253 b'unbundle',
254 b'unbundle',
254 }
255 }
255 legacycaps = moderncaps.union({b'changegroupsubset'})
256 legacycaps = moderncaps.union({b'changegroupsubset'})
256
257
257
258
258 @interfaceutil.implementer(repository.ipeercommandexecutor)
259 class LocalCommandExecutor:
259 class localcommandexecutor:
260 def __init__(self, peer):
260 def __init__(self, peer):
261 self._peer = peer
261 self._peer = peer
262 self._sent = False
262 self._sent = False
263 self._closed = False
263 self._closed = False
264
264
265 def __enter__(self):
265 def __enter__(self):
266 return self
266 return self
267
267
268 def __exit__(self, exctype, excvalue, exctb):
268 def __exit__(self, exctype, excvalue, exctb):
269 self.close()
269 self.close()
270
270
271 def callcommand(self, command, args):
271 def callcommand(self, command, args):
272 if self._sent:
272 if self._sent:
273 raise error.ProgrammingError(
273 raise error.ProgrammingError(
274 b'callcommand() cannot be used after sendcommands()'
274 b'callcommand() cannot be used after sendcommands()'
275 )
275 )
276
276
277 if self._closed:
277 if self._closed:
278 raise error.ProgrammingError(
278 raise error.ProgrammingError(
279 b'callcommand() cannot be used after close()'
279 b'callcommand() cannot be used after close()'
280 )
280 )
281
281
282 # We don't need to support anything fancy. Just call the named
282 # We don't need to support anything fancy. Just call the named
283 # method on the peer and return a resolved future.
283 # method on the peer and return a resolved future.
284 fn = getattr(self._peer, pycompat.sysstr(command))
284 fn = getattr(self._peer, pycompat.sysstr(command))
285
285
286 f = futures.Future()
286 f = futures.Future()
287
287
288 try:
288 try:
289 result = fn(**pycompat.strkwargs(args))
289 result = fn(**pycompat.strkwargs(args))
290 except Exception:
290 except Exception:
291 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
291 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
292 else:
292 else:
293 f.set_result(result)
293 f.set_result(result)
294
294
295 return f
295 return f
296
296
297 def sendcommands(self):
297 def sendcommands(self):
298 self._sent = True
298 self._sent = True
299
299
300 def close(self):
300 def close(self):
301 self._closed = True
301 self._closed = True
302
302
303
303
304 @interfaceutil.implementer(repository.ipeercommands)
304 localcommandexecutor = interfaceutil.implementer(
305 class localpeer(repository.peer):
305 repository.ipeercommandexecutor
306 )(LocalCommandExecutor)
307
308 if typing.TYPE_CHECKING:
309 # Help pytype by hiding the interface stuff that confuses it.
310 localcommandexecutor = LocalCommandExecutor
311
312
313 class LocalPeer(repository.peer):
306 '''peer for a local repo; reflects only the most recent API'''
314 '''peer for a local repo; reflects only the most recent API'''
307
315
308 def __init__(self, repo, caps=None, path=None, remotehidden=False):
316 def __init__(self, repo, caps=None, path=None, remotehidden=False):
309 super(localpeer, self).__init__(
317 super(LocalPeer, self).__init__(
310 repo.ui, path=path, remotehidden=remotehidden
318 repo.ui, path=path, remotehidden=remotehidden
311 )
319 )
312
320
313 if caps is None:
321 if caps is None:
314 caps = moderncaps.copy()
322 caps = moderncaps.copy()
315 if remotehidden:
323 if remotehidden:
316 self._repo = repo.filtered(b'served.hidden')
324 self._repo = repo.filtered(b'served.hidden')
317 else:
325 else:
318 self._repo = repo.filtered(b'served')
326 self._repo = repo.filtered(b'served')
319 if repo._wanted_sidedata:
327 if repo._wanted_sidedata:
320 formatted = bundle2.format_remote_wanted_sidedata(repo)
328 formatted = bundle2.format_remote_wanted_sidedata(repo)
321 caps.add(b'exp-wanted-sidedata=' + formatted)
329 caps.add(b'exp-wanted-sidedata=' + formatted)
322
330
323 self._caps = repo._restrictcapabilities(caps)
331 self._caps = repo._restrictcapabilities(caps)
324
332
325 # Begin of _basepeer interface.
333 # Begin of _basepeer interface.
326
334
327 def url(self):
335 def url(self):
328 return self._repo.url()
336 return self._repo.url()
329
337
330 def local(self):
338 def local(self):
331 return self._repo
339 return self._repo
332
340
333 def canpush(self):
341 def canpush(self):
334 return True
342 return True
335
343
336 def close(self):
344 def close(self):
337 self._repo.close()
345 self._repo.close()
338
346
339 # End of _basepeer interface.
347 # End of _basepeer interface.
340
348
341 # Begin of _basewirecommands interface.
349 # Begin of _basewirecommands interface.
342
350
343 def branchmap(self):
351 def branchmap(self):
344 return self._repo.branchmap()
352 return self._repo.branchmap()
345
353
346 def capabilities(self):
354 def capabilities(self):
347 return self._caps
355 return self._caps
348
356
349 def get_cached_bundle_inline(self, path):
357 def get_cached_bundle_inline(self, path):
350 # not needed with local peer
358 # not needed with local peer
351 raise NotImplementedError
359 raise NotImplementedError
352
360
353 def clonebundles(self):
361 def clonebundles(self):
354 return bundlecaches.get_manifest(self._repo)
362 return bundlecaches.get_manifest(self._repo)
355
363
356 def debugwireargs(self, one, two, three=None, four=None, five=None):
364 def debugwireargs(self, one, two, three=None, four=None, five=None):
357 """Used to test argument passing over the wire"""
365 """Used to test argument passing over the wire"""
358 return b"%s %s %s %s %s" % (
366 return b"%s %s %s %s %s" % (
359 one,
367 one,
360 two,
368 two,
361 pycompat.bytestr(three),
369 pycompat.bytestr(three),
362 pycompat.bytestr(four),
370 pycompat.bytestr(four),
363 pycompat.bytestr(five),
371 pycompat.bytestr(five),
364 )
372 )
365
373
366 def getbundle(
374 def getbundle(
367 self,
375 self,
368 source,
376 source,
369 heads=None,
377 heads=None,
370 common=None,
378 common=None,
371 bundlecaps=None,
379 bundlecaps=None,
372 remote_sidedata=None,
380 remote_sidedata=None,
373 **kwargs,
381 **kwargs,
374 ):
382 ):
375 chunks = exchange.getbundlechunks(
383 chunks = exchange.getbundlechunks(
376 self._repo,
384 self._repo,
377 source,
385 source,
378 heads=heads,
386 heads=heads,
379 common=common,
387 common=common,
380 bundlecaps=bundlecaps,
388 bundlecaps=bundlecaps,
381 remote_sidedata=remote_sidedata,
389 remote_sidedata=remote_sidedata,
382 **kwargs,
390 **kwargs,
383 )[1]
391 )[1]
384 cb = util.chunkbuffer(chunks)
392 cb = util.chunkbuffer(chunks)
385
393
386 if exchange.bundle2requested(bundlecaps):
394 if exchange.bundle2requested(bundlecaps):
387 # When requesting a bundle2, getbundle returns a stream to make the
395 # When requesting a bundle2, getbundle returns a stream to make the
388 # wire level function happier. We need to build a proper object
396 # wire level function happier. We need to build a proper object
389 # from it in local peer.
397 # from it in local peer.
390 return bundle2.getunbundler(self.ui, cb)
398 return bundle2.getunbundler(self.ui, cb)
391 else:
399 else:
392 return changegroup.getunbundler(b'01', cb, None)
400 return changegroup.getunbundler(b'01', cb, None)
393
401
394 def heads(self):
402 def heads(self):
395 return self._repo.heads()
403 return self._repo.heads()
396
404
397 def known(self, nodes):
405 def known(self, nodes):
398 return self._repo.known(nodes)
406 return self._repo.known(nodes)
399
407
400 def listkeys(self, namespace):
408 def listkeys(self, namespace):
401 return self._repo.listkeys(namespace)
409 return self._repo.listkeys(namespace)
402
410
403 def lookup(self, key):
411 def lookup(self, key):
404 return self._repo.lookup(key)
412 return self._repo.lookup(key)
405
413
406 def pushkey(self, namespace, key, old, new):
414 def pushkey(self, namespace, key, old, new):
407 return self._repo.pushkey(namespace, key, old, new)
415 return self._repo.pushkey(namespace, key, old, new)
408
416
409 def stream_out(self):
417 def stream_out(self):
410 raise error.Abort(_(b'cannot perform stream clone against local peer'))
418 raise error.Abort(_(b'cannot perform stream clone against local peer'))
411
419
412 def unbundle(self, bundle, heads, url):
420 def unbundle(self, bundle, heads, url):
413 """apply a bundle on a repo
421 """apply a bundle on a repo
414
422
415 This function handles the repo locking itself."""
423 This function handles the repo locking itself."""
416 try:
424 try:
417 try:
425 try:
418 bundle = exchange.readbundle(self.ui, bundle, None)
426 bundle = exchange.readbundle(self.ui, bundle, None)
419 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
427 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
420 if hasattr(ret, 'getchunks'):
428 if hasattr(ret, 'getchunks'):
421 # This is a bundle20 object, turn it into an unbundler.
429 # This is a bundle20 object, turn it into an unbundler.
422 # This little dance should be dropped eventually when the
430 # This little dance should be dropped eventually when the
423 # API is finally improved.
431 # API is finally improved.
424 stream = util.chunkbuffer(ret.getchunks())
432 stream = util.chunkbuffer(ret.getchunks())
425 ret = bundle2.getunbundler(self.ui, stream)
433 ret = bundle2.getunbundler(self.ui, stream)
426 return ret
434 return ret
427 except Exception as exc:
435 except Exception as exc:
428 # If the exception contains output salvaged from a bundle2
436 # If the exception contains output salvaged from a bundle2
429 # reply, we need to make sure it is printed before continuing
437 # reply, we need to make sure it is printed before continuing
430 # to fail. So we build a bundle2 with such output and consume
438 # to fail. So we build a bundle2 with such output and consume
431 # it directly.
439 # it directly.
432 #
440 #
433 # This is not very elegant but allows a "simple" solution for
441 # This is not very elegant but allows a "simple" solution for
434 # issue4594
442 # issue4594
435 output = getattr(exc, '_bundle2salvagedoutput', ())
443 output = getattr(exc, '_bundle2salvagedoutput', ())
436 if output:
444 if output:
437 bundler = bundle2.bundle20(self._repo.ui)
445 bundler = bundle2.bundle20(self._repo.ui)
438 for out in output:
446 for out in output:
439 bundler.addpart(out)
447 bundler.addpart(out)
440 stream = util.chunkbuffer(bundler.getchunks())
448 stream = util.chunkbuffer(bundler.getchunks())
441 b = bundle2.getunbundler(self.ui, stream)
449 b = bundle2.getunbundler(self.ui, stream)
442 bundle2.processbundle(self._repo, b)
450 bundle2.processbundle(self._repo, b)
443 raise
451 raise
444 except error.PushRaced as exc:
452 except error.PushRaced as exc:
445 raise error.ResponseError(
453 raise error.ResponseError(
446 _(b'push failed:'), stringutil.forcebytestr(exc)
454 _(b'push failed:'), stringutil.forcebytestr(exc)
447 )
455 )
448
456
449 # End of _basewirecommands interface.
457 # End of _basewirecommands interface.
450
458
451 # Begin of peer interface.
459 # Begin of peer interface.
452
460
453 def commandexecutor(self):
461 def commandexecutor(self):
454 return localcommandexecutor(self)
462 return localcommandexecutor(self)
455
463
456 # End of peer interface.
464 # End of peer interface.
457
465
458
466
459 @interfaceutil.implementer(repository.ipeerlegacycommands)
467 localpeer = interfaceutil.implementer(repository.ipeercommands)(LocalPeer)
460 class locallegacypeer(localpeer):
468
469 if typing.TYPE_CHECKING:
470 # Help pytype by hiding the interface stuff that confuses it.
471 localpeer = LocalPeer
472
473
474 class LocalLegacyPeer(localpeer):
461 """peer extension which implements legacy methods too; used for tests with
475 """peer extension which implements legacy methods too; used for tests with
462 restricted capabilities"""
476 restricted capabilities"""
463
477
464 def __init__(self, repo, path=None, remotehidden=False):
478 def __init__(self, repo, path=None, remotehidden=False):
465 super(locallegacypeer, self).__init__(
479 super(LocalLegacyPeer, self).__init__(
466 repo, caps=legacycaps, path=path, remotehidden=remotehidden
480 repo, caps=legacycaps, path=path, remotehidden=remotehidden
467 )
481 )
468
482
469 # Begin of baselegacywirecommands interface.
483 # Begin of baselegacywirecommands interface.
470
484
471 def between(self, pairs):
485 def between(self, pairs):
472 return self._repo.between(pairs)
486 return self._repo.between(pairs)
473
487
474 def branches(self, nodes):
488 def branches(self, nodes):
475 return self._repo.branches(nodes)
489 return self._repo.branches(nodes)
476
490
477 def changegroup(self, nodes, source):
491 def changegroup(self, nodes, source):
478 outgoing = discovery.outgoing(
492 outgoing = discovery.outgoing(
479 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
493 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
480 )
494 )
481 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
495 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
482
496
483 def changegroupsubset(self, bases, heads, source):
497 def changegroupsubset(self, bases, heads, source):
484 outgoing = discovery.outgoing(
498 outgoing = discovery.outgoing(
485 self._repo, missingroots=bases, ancestorsof=heads
499 self._repo, missingroots=bases, ancestorsof=heads
486 )
500 )
487 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
501 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
488
502
489 # End of baselegacywirecommands interface.
503 # End of baselegacywirecommands interface.
490
504
491
505
506 locallegacypeer = interfaceutil.implementer(repository.ipeerlegacycommands)(
507 LocalLegacyPeer
508 )
509
510 if typing.TYPE_CHECKING:
511 # Help pytype by hiding the interface stuff that confuses it.
512 locallegacypeer = LocalLegacyPeer
513
492 # Functions receiving (ui, features) that extensions can register to impact
514 # Functions receiving (ui, features) that extensions can register to impact
493 # the ability to load repositories with custom requirements. Only
515 # the ability to load repositories with custom requirements. Only
494 # functions defined in loaded extensions are called.
516 # functions defined in loaded extensions are called.
495 #
517 #
496 # The function receives a set of requirement strings that the repository
518 # The function receives a set of requirement strings that the repository
497 # is capable of opening. Functions will typically add elements to the
519 # is capable of opening. Functions will typically add elements to the
498 # set to reflect that the extension knows how to handle that requirements.
520 # set to reflect that the extension knows how to handle that requirements.
499 featuresetupfuncs = set()
521 featuresetupfuncs = set()
500
522
501
523
502 def _getsharedvfs(hgvfs, requirements):
524 def _getsharedvfs(hgvfs, requirements):
503 """returns the vfs object pointing to root of shared source
525 """returns the vfs object pointing to root of shared source
504 repo for a shared repository
526 repo for a shared repository
505
527
506 hgvfs is vfs pointing at .hg/ of current repo (shared one)
528 hgvfs is vfs pointing at .hg/ of current repo (shared one)
507 requirements is a set of requirements of current repo (shared one)
529 requirements is a set of requirements of current repo (shared one)
508 """
530 """
509 # The ``shared`` or ``relshared`` requirements indicate the
531 # The ``shared`` or ``relshared`` requirements indicate the
510 # store lives in the path contained in the ``.hg/sharedpath`` file.
532 # store lives in the path contained in the ``.hg/sharedpath`` file.
511 # This is an absolute path for ``shared`` and relative to
533 # This is an absolute path for ``shared`` and relative to
512 # ``.hg/`` for ``relshared``.
534 # ``.hg/`` for ``relshared``.
513 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
535 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
514 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
536 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
515 sharedpath = util.normpath(hgvfs.join(sharedpath))
537 sharedpath = util.normpath(hgvfs.join(sharedpath))
516
538
517 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
539 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
518
540
519 if not sharedvfs.exists():
541 if not sharedvfs.exists():
520 raise error.RepoError(
542 raise error.RepoError(
521 _(b'.hg/sharedpath points to nonexistent directory %s')
543 _(b'.hg/sharedpath points to nonexistent directory %s')
522 % sharedvfs.base
544 % sharedvfs.base
523 )
545 )
524 return sharedvfs
546 return sharedvfs
525
547
526
548
527 def makelocalrepository(baseui, path: bytes, intents=None):
549 def makelocalrepository(baseui, path: bytes, intents=None):
528 """Create a local repository object.
550 """Create a local repository object.
529
551
530 Given arguments needed to construct a local repository, this function
552 Given arguments needed to construct a local repository, this function
531 performs various early repository loading functionality (such as
553 performs various early repository loading functionality (such as
532 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
554 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
533 the repository can be opened, derives a type suitable for representing
555 the repository can be opened, derives a type suitable for representing
534 that repository, and returns an instance of it.
556 that repository, and returns an instance of it.
535
557
536 The returned object conforms to the ``repository.completelocalrepository``
558 The returned object conforms to the ``repository.completelocalrepository``
537 interface.
559 interface.
538
560
539 The repository type is derived by calling a series of factory functions
561 The repository type is derived by calling a series of factory functions
540 for each aspect/interface of the final repository. These are defined by
562 for each aspect/interface of the final repository. These are defined by
541 ``REPO_INTERFACES``.
563 ``REPO_INTERFACES``.
542
564
543 Each factory function is called to produce a type implementing a specific
565 Each factory function is called to produce a type implementing a specific
544 interface. The cumulative list of returned types will be combined into a
566 interface. The cumulative list of returned types will be combined into a
545 new type and that type will be instantiated to represent the local
567 new type and that type will be instantiated to represent the local
546 repository.
568 repository.
547
569
548 The factory functions each receive various state that may be consulted
570 The factory functions each receive various state that may be consulted
549 as part of deriving a type.
571 as part of deriving a type.
550
572
551 Extensions should wrap these factory functions to customize repository type
573 Extensions should wrap these factory functions to customize repository type
552 creation. Note that an extension's wrapped function may be called even if
574 creation. Note that an extension's wrapped function may be called even if
553 that extension is not loaded for the repo being constructed. Extensions
575 that extension is not loaded for the repo being constructed. Extensions
554 should check if their ``__name__`` appears in the
576 should check if their ``__name__`` appears in the
555 ``extensionmodulenames`` set passed to the factory function and no-op if
577 ``extensionmodulenames`` set passed to the factory function and no-op if
556 not.
578 not.
557 """
579 """
558 ui = baseui.copy()
580 ui = baseui.copy()
559 # Prevent copying repo configuration.
581 # Prevent copying repo configuration.
560 ui.copy = baseui.copy
582 ui.copy = baseui.copy
561
583
562 # Working directory VFS rooted at repository root.
584 # Working directory VFS rooted at repository root.
563 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
585 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
564
586
565 # Main VFS for .hg/ directory.
587 # Main VFS for .hg/ directory.
566 hgpath = wdirvfs.join(b'.hg')
588 hgpath = wdirvfs.join(b'.hg')
567 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
589 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
568 # Whether this repository is shared one or not
590 # Whether this repository is shared one or not
569 shared = False
591 shared = False
570 # If this repository is shared, vfs pointing to shared repo
592 # If this repository is shared, vfs pointing to shared repo
571 sharedvfs = None
593 sharedvfs = None
572
594
573 # The .hg/ path should exist and should be a directory. All other
595 # The .hg/ path should exist and should be a directory. All other
574 # cases are errors.
596 # cases are errors.
575 if not hgvfs.isdir():
597 if not hgvfs.isdir():
576 try:
598 try:
577 hgvfs.stat()
599 hgvfs.stat()
578 except FileNotFoundError:
600 except FileNotFoundError:
579 pass
601 pass
580 except ValueError as e:
602 except ValueError as e:
581 # Can be raised on Python 3.8 when path is invalid.
603 # Can be raised on Python 3.8 when path is invalid.
582 raise error.Abort(
604 raise error.Abort(
583 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
605 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
584 )
606 )
585
607
586 raise error.RepoError(_(b'repository %s not found') % path)
608 raise error.RepoError(_(b'repository %s not found') % path)
587
609
588 requirements = scmutil.readrequires(hgvfs, True)
610 requirements = scmutil.readrequires(hgvfs, True)
589 shared = (
611 shared = (
590 requirementsmod.SHARED_REQUIREMENT in requirements
612 requirementsmod.SHARED_REQUIREMENT in requirements
591 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
613 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
592 )
614 )
593 storevfs = None
615 storevfs = None
594 if shared:
616 if shared:
595 # This is a shared repo
617 # This is a shared repo
596 sharedvfs = _getsharedvfs(hgvfs, requirements)
618 sharedvfs = _getsharedvfs(hgvfs, requirements)
597 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
619 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
598 else:
620 else:
599 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
621 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
600
622
601 # if .hg/requires contains the sharesafe requirement, it means
623 # if .hg/requires contains the sharesafe requirement, it means
602 # there exists a `.hg/store/requires` too and we should read it
624 # there exists a `.hg/store/requires` too and we should read it
603 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
625 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
604 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
626 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
605 # is not present, refer checkrequirementscompat() for that
627 # is not present, refer checkrequirementscompat() for that
606 #
628 #
607 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
629 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
608 # repository was shared the old way. We check the share source .hg/requires
630 # repository was shared the old way. We check the share source .hg/requires
609 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
631 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
610 # to be reshared
632 # to be reshared
611 hint = _(b"see `hg help config.format.use-share-safe` for more information")
633 hint = _(b"see `hg help config.format.use-share-safe` for more information")
612 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
634 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
613 if (
635 if (
614 shared
636 shared
615 and requirementsmod.SHARESAFE_REQUIREMENT
637 and requirementsmod.SHARESAFE_REQUIREMENT
616 not in scmutil.readrequires(sharedvfs, True)
638 not in scmutil.readrequires(sharedvfs, True)
617 ):
639 ):
618 mismatch_warn = ui.configbool(
640 mismatch_warn = ui.configbool(
619 b'share', b'safe-mismatch.source-not-safe.warn'
641 b'share', b'safe-mismatch.source-not-safe.warn'
620 )
642 )
621 mismatch_config = ui.config(
643 mismatch_config = ui.config(
622 b'share', b'safe-mismatch.source-not-safe'
644 b'share', b'safe-mismatch.source-not-safe'
623 )
645 )
624 mismatch_verbose_upgrade = ui.configbool(
646 mismatch_verbose_upgrade = ui.configbool(
625 b'share', b'safe-mismatch.source-not-safe:verbose-upgrade'
647 b'share', b'safe-mismatch.source-not-safe:verbose-upgrade'
626 )
648 )
627 if mismatch_config in (
649 if mismatch_config in (
628 b'downgrade-allow',
650 b'downgrade-allow',
629 b'allow',
651 b'allow',
630 b'downgrade-abort',
652 b'downgrade-abort',
631 ):
653 ):
632 # prevent cyclic import localrepo -> upgrade -> localrepo
654 # prevent cyclic import localrepo -> upgrade -> localrepo
633 from . import upgrade
655 from . import upgrade
634
656
635 upgrade.downgrade_share_to_non_safe(
657 upgrade.downgrade_share_to_non_safe(
636 ui,
658 ui,
637 hgvfs,
659 hgvfs,
638 sharedvfs,
660 sharedvfs,
639 requirements,
661 requirements,
640 mismatch_config,
662 mismatch_config,
641 mismatch_warn,
663 mismatch_warn,
642 mismatch_verbose_upgrade,
664 mismatch_verbose_upgrade,
643 )
665 )
644 elif mismatch_config == b'abort':
666 elif mismatch_config == b'abort':
645 raise error.Abort(
667 raise error.Abort(
646 _(b"share source does not support share-safe requirement"),
668 _(b"share source does not support share-safe requirement"),
647 hint=hint,
669 hint=hint,
648 )
670 )
649 else:
671 else:
650 raise error.Abort(
672 raise error.Abort(
651 _(
673 _(
652 b"share-safe mismatch with source.\nUnrecognized"
674 b"share-safe mismatch with source.\nUnrecognized"
653 b" value '%s' of `share.safe-mismatch.source-not-safe`"
675 b" value '%s' of `share.safe-mismatch.source-not-safe`"
654 b" set."
676 b" set."
655 )
677 )
656 % mismatch_config,
678 % mismatch_config,
657 hint=hint,
679 hint=hint,
658 )
680 )
659 else:
681 else:
660 requirements |= scmutil.readrequires(storevfs, False)
682 requirements |= scmutil.readrequires(storevfs, False)
661 elif shared:
683 elif shared:
662 sourcerequires = scmutil.readrequires(sharedvfs, False)
684 sourcerequires = scmutil.readrequires(sharedvfs, False)
663 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
685 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
664 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
686 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
665 mismatch_warn = ui.configbool(
687 mismatch_warn = ui.configbool(
666 b'share', b'safe-mismatch.source-safe.warn'
688 b'share', b'safe-mismatch.source-safe.warn'
667 )
689 )
668 mismatch_verbose_upgrade = ui.configbool(
690 mismatch_verbose_upgrade = ui.configbool(
669 b'share', b'safe-mismatch.source-safe:verbose-upgrade'
691 b'share', b'safe-mismatch.source-safe:verbose-upgrade'
670 )
692 )
671 if mismatch_config in (
693 if mismatch_config in (
672 b'upgrade-allow',
694 b'upgrade-allow',
673 b'allow',
695 b'allow',
674 b'upgrade-abort',
696 b'upgrade-abort',
675 ):
697 ):
676 # prevent cyclic import localrepo -> upgrade -> localrepo
698 # prevent cyclic import localrepo -> upgrade -> localrepo
677 from . import upgrade
699 from . import upgrade
678
700
679 upgrade.upgrade_share_to_safe(
701 upgrade.upgrade_share_to_safe(
680 ui,
702 ui,
681 hgvfs,
703 hgvfs,
682 storevfs,
704 storevfs,
683 requirements,
705 requirements,
684 mismatch_config,
706 mismatch_config,
685 mismatch_warn,
707 mismatch_warn,
686 mismatch_verbose_upgrade,
708 mismatch_verbose_upgrade,
687 )
709 )
688 elif mismatch_config == b'abort':
710 elif mismatch_config == b'abort':
689 raise error.Abort(
711 raise error.Abort(
690 _(
712 _(
691 b'version mismatch: source uses share-safe'
713 b'version mismatch: source uses share-safe'
692 b' functionality while the current share does not'
714 b' functionality while the current share does not'
693 ),
715 ),
694 hint=hint,
716 hint=hint,
695 )
717 )
696 else:
718 else:
697 raise error.Abort(
719 raise error.Abort(
698 _(
720 _(
699 b"share-safe mismatch with source.\nUnrecognized"
721 b"share-safe mismatch with source.\nUnrecognized"
700 b" value '%s' of `share.safe-mismatch.source-safe` set."
722 b" value '%s' of `share.safe-mismatch.source-safe` set."
701 )
723 )
702 % mismatch_config,
724 % mismatch_config,
703 hint=hint,
725 hint=hint,
704 )
726 )
705
727
706 # The .hg/hgrc file may load extensions or contain config options
728 # The .hg/hgrc file may load extensions or contain config options
707 # that influence repository construction. Attempt to load it and
729 # that influence repository construction. Attempt to load it and
708 # process any new extensions that it may have pulled in.
730 # process any new extensions that it may have pulled in.
709 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
731 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
710 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
732 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
711 extensions.loadall(ui)
733 extensions.loadall(ui)
712 extensions.populateui(ui)
734 extensions.populateui(ui)
713
735
714 # Set of module names of extensions loaded for this repository.
736 # Set of module names of extensions loaded for this repository.
715 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
737 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
716
738
717 supportedrequirements = gathersupportedrequirements(ui)
739 supportedrequirements = gathersupportedrequirements(ui)
718
740
719 # We first validate the requirements are known.
741 # We first validate the requirements are known.
720 ensurerequirementsrecognized(requirements, supportedrequirements)
742 ensurerequirementsrecognized(requirements, supportedrequirements)
721
743
722 # Then we validate that the known set is reasonable to use together.
744 # Then we validate that the known set is reasonable to use together.
723 ensurerequirementscompatible(ui, requirements)
745 ensurerequirementscompatible(ui, requirements)
724
746
725 # TODO there are unhandled edge cases related to opening repositories with
747 # TODO there are unhandled edge cases related to opening repositories with
726 # shared storage. If storage is shared, we should also test for requirements
748 # shared storage. If storage is shared, we should also test for requirements
727 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
749 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
728 # that repo, as that repo may load extensions needed to open it. This is a
750 # that repo, as that repo may load extensions needed to open it. This is a
729 # bit complicated because we don't want the other hgrc to overwrite settings
751 # bit complicated because we don't want the other hgrc to overwrite settings
730 # in this hgrc.
752 # in this hgrc.
731 #
753 #
732 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
754 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
733 # file when sharing repos. But if a requirement is added after the share is
755 # file when sharing repos. But if a requirement is added after the share is
734 # performed, thereby introducing a new requirement for the opener, we may
756 # performed, thereby introducing a new requirement for the opener, we may
735 # will not see that and could encounter a run-time error interacting with
757 # will not see that and could encounter a run-time error interacting with
736 # that shared store since it has an unknown-to-us requirement.
758 # that shared store since it has an unknown-to-us requirement.
737
759
738 # At this point, we know we should be capable of opening the repository.
760 # At this point, we know we should be capable of opening the repository.
739 # Now get on with doing that.
761 # Now get on with doing that.
740
762
741 features = set()
763 features = set()
742
764
743 # The "store" part of the repository holds versioned data. How it is
765 # The "store" part of the repository holds versioned data. How it is
744 # accessed is determined by various requirements. If `shared` or
766 # accessed is determined by various requirements. If `shared` or
745 # `relshared` requirements are present, this indicates current repository
767 # `relshared` requirements are present, this indicates current repository
746 # is a share and store exists in path mentioned in `.hg/sharedpath`
768 # is a share and store exists in path mentioned in `.hg/sharedpath`
747 if shared:
769 if shared:
748 storebasepath = sharedvfs.base
770 storebasepath = sharedvfs.base
749 cachepath = sharedvfs.join(b'cache')
771 cachepath = sharedvfs.join(b'cache')
750 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
772 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
751 else:
773 else:
752 storebasepath = hgvfs.base
774 storebasepath = hgvfs.base
753 cachepath = hgvfs.join(b'cache')
775 cachepath = hgvfs.join(b'cache')
754 wcachepath = hgvfs.join(b'wcache')
776 wcachepath = hgvfs.join(b'wcache')
755
777
756 # The store has changed over time and the exact layout is dictated by
778 # The store has changed over time and the exact layout is dictated by
757 # requirements. The store interface abstracts differences across all
779 # requirements. The store interface abstracts differences across all
758 # of them.
780 # of them.
759 store = makestore(
781 store = makestore(
760 requirements,
782 requirements,
761 storebasepath,
783 storebasepath,
762 lambda base: vfsmod.vfs(base, cacheaudited=True),
784 lambda base: vfsmod.vfs(base, cacheaudited=True),
763 )
785 )
764 hgvfs.createmode = store.createmode
786 hgvfs.createmode = store.createmode
765
787
766 storevfs = store.vfs
788 storevfs = store.vfs
767 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
789 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
768
790
769 if (
791 if (
770 requirementsmod.REVLOGV2_REQUIREMENT in requirements
792 requirementsmod.REVLOGV2_REQUIREMENT in requirements
771 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
793 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
772 ):
794 ):
773 features.add(repository.REPO_FEATURE_SIDE_DATA)
795 features.add(repository.REPO_FEATURE_SIDE_DATA)
774 # the revlogv2 docket introduced race condition that we need to fix
796 # the revlogv2 docket introduced race condition that we need to fix
775 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
797 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
776
798
777 # The cache vfs is used to manage cache files.
799 # The cache vfs is used to manage cache files.
778 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
800 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
779 cachevfs.createmode = store.createmode
801 cachevfs.createmode = store.createmode
780 # The cache vfs is used to manage cache files related to the working copy
802 # The cache vfs is used to manage cache files related to the working copy
781 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
803 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
782 wcachevfs.createmode = store.createmode
804 wcachevfs.createmode = store.createmode
783
805
784 # Now resolve the type for the repository object. We do this by repeatedly
806 # Now resolve the type for the repository object. We do this by repeatedly
785 # calling a factory function to produces types for specific aspects of the
807 # calling a factory function to produces types for specific aspects of the
786 # repo's operation. The aggregate returned types are used as base classes
808 # repo's operation. The aggregate returned types are used as base classes
787 # for a dynamically-derived type, which will represent our new repository.
809 # for a dynamically-derived type, which will represent our new repository.
788
810
789 bases = []
811 bases = []
790 extrastate = {}
812 extrastate = {}
791
813
792 for iface, fn in REPO_INTERFACES:
814 for iface, fn in REPO_INTERFACES:
793 # We pass all potentially useful state to give extensions tons of
815 # We pass all potentially useful state to give extensions tons of
794 # flexibility.
816 # flexibility.
795 typ = fn()(
817 typ = fn()(
796 ui=ui,
818 ui=ui,
797 intents=intents,
819 intents=intents,
798 requirements=requirements,
820 requirements=requirements,
799 features=features,
821 features=features,
800 wdirvfs=wdirvfs,
822 wdirvfs=wdirvfs,
801 hgvfs=hgvfs,
823 hgvfs=hgvfs,
802 store=store,
824 store=store,
803 storevfs=storevfs,
825 storevfs=storevfs,
804 storeoptions=storevfs.options,
826 storeoptions=storevfs.options,
805 cachevfs=cachevfs,
827 cachevfs=cachevfs,
806 wcachevfs=wcachevfs,
828 wcachevfs=wcachevfs,
807 extensionmodulenames=extensionmodulenames,
829 extensionmodulenames=extensionmodulenames,
808 extrastate=extrastate,
830 extrastate=extrastate,
809 baseclasses=bases,
831 baseclasses=bases,
810 )
832 )
811
833
812 if not isinstance(typ, type):
834 if not isinstance(typ, type):
813 raise error.ProgrammingError(
835 raise error.ProgrammingError(
814 b'unable to construct type for %s' % iface
836 b'unable to construct type for %s' % iface
815 )
837 )
816
838
817 bases.append(typ)
839 bases.append(typ)
818
840
819 # type() allows you to use characters in type names that wouldn't be
841 # type() allows you to use characters in type names that wouldn't be
820 # recognized as Python symbols in source code. We abuse that to add
842 # recognized as Python symbols in source code. We abuse that to add
821 # rich information about our constructed repo.
843 # rich information about our constructed repo.
822 name = pycompat.sysstr(
844 name = pycompat.sysstr(
823 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
845 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
824 )
846 )
825
847
826 cls = type(name, tuple(bases), {})
848 cls = type(name, tuple(bases), {})
827
849
828 return cls(
850 return cls(
829 baseui=baseui,
851 baseui=baseui,
830 ui=ui,
852 ui=ui,
831 origroot=path,
853 origroot=path,
832 wdirvfs=wdirvfs,
854 wdirvfs=wdirvfs,
833 hgvfs=hgvfs,
855 hgvfs=hgvfs,
834 requirements=requirements,
856 requirements=requirements,
835 supportedrequirements=supportedrequirements,
857 supportedrequirements=supportedrequirements,
836 sharedpath=storebasepath,
858 sharedpath=storebasepath,
837 store=store,
859 store=store,
838 cachevfs=cachevfs,
860 cachevfs=cachevfs,
839 wcachevfs=wcachevfs,
861 wcachevfs=wcachevfs,
840 features=features,
862 features=features,
841 intents=intents,
863 intents=intents,
842 )
864 )
843
865
844
866
845 def loadhgrc(
867 def loadhgrc(
846 ui,
868 ui,
847 wdirvfs: vfsmod.vfs,
869 wdirvfs: vfsmod.vfs,
848 hgvfs: vfsmod.vfs,
870 hgvfs: vfsmod.vfs,
849 requirements,
871 requirements,
850 sharedvfs: Optional[vfsmod.vfs] = None,
872 sharedvfs: Optional[vfsmod.vfs] = None,
851 ):
873 ):
852 """Load hgrc files/content into a ui instance.
874 """Load hgrc files/content into a ui instance.
853
875
854 This is called during repository opening to load any additional
876 This is called during repository opening to load any additional
855 config files or settings relevant to the current repository.
877 config files or settings relevant to the current repository.
856
878
857 Returns a bool indicating whether any additional configs were loaded.
879 Returns a bool indicating whether any additional configs were loaded.
858
880
859 Extensions should monkeypatch this function to modify how per-repo
881 Extensions should monkeypatch this function to modify how per-repo
860 configs are loaded. For example, an extension may wish to pull in
882 configs are loaded. For example, an extension may wish to pull in
861 configs from alternate files or sources.
883 configs from alternate files or sources.
862
884
863 sharedvfs is vfs object pointing to source repo if the current one is a
885 sharedvfs is vfs object pointing to source repo if the current one is a
864 shared one
886 shared one
865 """
887 """
866 if not rcutil.use_repo_hgrc():
888 if not rcutil.use_repo_hgrc():
867 return False
889 return False
868
890
869 ret = False
891 ret = False
870 # first load config from shared source if we has to
892 # first load config from shared source if we has to
871 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
893 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
872 try:
894 try:
873 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
895 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
874 ret = True
896 ret = True
875 except IOError:
897 except IOError:
876 pass
898 pass
877
899
878 try:
900 try:
879 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
901 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
880 ret = True
902 ret = True
881 except IOError:
903 except IOError:
882 pass
904 pass
883
905
884 try:
906 try:
885 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
907 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
886 ret = True
908 ret = True
887 except IOError:
909 except IOError:
888 pass
910 pass
889
911
890 return ret
912 return ret
891
913
892
914
893 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
915 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
894 """Perform additional actions after .hg/hgrc is loaded.
916 """Perform additional actions after .hg/hgrc is loaded.
895
917
896 This function is called during repository loading immediately after
918 This function is called during repository loading immediately after
897 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
919 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
898
920
899 The function can be used to validate configs, automatically add
921 The function can be used to validate configs, automatically add
900 options (including extensions) based on requirements, etc.
922 options (including extensions) based on requirements, etc.
901 """
923 """
902
924
903 # Map of requirements to list of extensions to load automatically when
925 # Map of requirements to list of extensions to load automatically when
904 # requirement is present.
926 # requirement is present.
905 autoextensions = {
927 autoextensions = {
906 b'git': [b'git'],
928 b'git': [b'git'],
907 b'largefiles': [b'largefiles'],
929 b'largefiles': [b'largefiles'],
908 b'lfs': [b'lfs'],
930 b'lfs': [b'lfs'],
909 }
931 }
910
932
911 for requirement, names in sorted(autoextensions.items()):
933 for requirement, names in sorted(autoextensions.items()):
912 if requirement not in requirements:
934 if requirement not in requirements:
913 continue
935 continue
914
936
915 for name in names:
937 for name in names:
916 if not ui.hasconfig(b'extensions', name):
938 if not ui.hasconfig(b'extensions', name):
917 ui.setconfig(b'extensions', name, b'', source=b'autoload')
939 ui.setconfig(b'extensions', name, b'', source=b'autoload')
918
940
919
941
920 def gathersupportedrequirements(ui):
942 def gathersupportedrequirements(ui):
921 """Determine the complete set of recognized requirements."""
943 """Determine the complete set of recognized requirements."""
922 # Start with all requirements supported by this file.
944 # Start with all requirements supported by this file.
923 supported = set(localrepository._basesupported)
945 supported = set(localrepository._basesupported)
924
946
925 # Execute ``featuresetupfuncs`` entries if they belong to an extension
947 # Execute ``featuresetupfuncs`` entries if they belong to an extension
926 # relevant to this ui instance.
948 # relevant to this ui instance.
927 modules = {m.__name__ for n, m in extensions.extensions(ui)}
949 modules = {m.__name__ for n, m in extensions.extensions(ui)}
928
950
929 for fn in featuresetupfuncs:
951 for fn in featuresetupfuncs:
930 if fn.__module__ in modules:
952 if fn.__module__ in modules:
931 fn(ui, supported)
953 fn(ui, supported)
932
954
933 # Add derived requirements from registered compression engines.
955 # Add derived requirements from registered compression engines.
934 for name in util.compengines:
956 for name in util.compengines:
935 engine = util.compengines[name]
957 engine = util.compengines[name]
936 if engine.available() and engine.revlogheader():
958 if engine.available() and engine.revlogheader():
937 supported.add(b'exp-compression-%s' % name)
959 supported.add(b'exp-compression-%s' % name)
938 if engine.name() == b'zstd':
960 if engine.name() == b'zstd':
939 supported.add(requirementsmod.REVLOG_COMPRESSION_ZSTD)
961 supported.add(requirementsmod.REVLOG_COMPRESSION_ZSTD)
940
962
941 return supported
963 return supported
942
964
943
965
944 def ensurerequirementsrecognized(requirements, supported):
966 def ensurerequirementsrecognized(requirements, supported):
945 """Validate that a set of local requirements is recognized.
967 """Validate that a set of local requirements is recognized.
946
968
947 Receives a set of requirements. Raises an ``error.RepoError`` if there
969 Receives a set of requirements. Raises an ``error.RepoError`` if there
948 exists any requirement in that set that currently loaded code doesn't
970 exists any requirement in that set that currently loaded code doesn't
949 recognize.
971 recognize.
950
972
951 Returns a set of supported requirements.
973 Returns a set of supported requirements.
952 """
974 """
953 missing = set()
975 missing = set()
954
976
955 for requirement in requirements:
977 for requirement in requirements:
956 if requirement in supported:
978 if requirement in supported:
957 continue
979 continue
958
980
959 if not requirement or not requirement[0:1].isalnum():
981 if not requirement or not requirement[0:1].isalnum():
960 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
982 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
961
983
962 missing.add(requirement)
984 missing.add(requirement)
963
985
964 if missing:
986 if missing:
965 raise error.RequirementError(
987 raise error.RequirementError(
966 _(b'repository requires features unknown to this Mercurial: %s')
988 _(b'repository requires features unknown to this Mercurial: %s')
967 % b' '.join(sorted(missing)),
989 % b' '.join(sorted(missing)),
968 hint=_(
990 hint=_(
969 b'see https://mercurial-scm.org/wiki/MissingRequirement '
991 b'see https://mercurial-scm.org/wiki/MissingRequirement '
970 b'for more information'
992 b'for more information'
971 ),
993 ),
972 )
994 )
973
995
974
996
975 def ensurerequirementscompatible(ui, requirements):
997 def ensurerequirementscompatible(ui, requirements):
976 """Validates that a set of recognized requirements is mutually compatible.
998 """Validates that a set of recognized requirements is mutually compatible.
977
999
978 Some requirements may not be compatible with others or require
1000 Some requirements may not be compatible with others or require
979 config options that aren't enabled. This function is called during
1001 config options that aren't enabled. This function is called during
980 repository opening to ensure that the set of requirements needed
1002 repository opening to ensure that the set of requirements needed
981 to open a repository is sane and compatible with config options.
1003 to open a repository is sane and compatible with config options.
982
1004
983 Extensions can monkeypatch this function to perform additional
1005 Extensions can monkeypatch this function to perform additional
984 checking.
1006 checking.
985
1007
986 ``error.RepoError`` should be raised on failure.
1008 ``error.RepoError`` should be raised on failure.
987 """
1009 """
988 if (
1010 if (
989 requirementsmod.SPARSE_REQUIREMENT in requirements
1011 requirementsmod.SPARSE_REQUIREMENT in requirements
990 and not sparse.enabled
1012 and not sparse.enabled
991 ):
1013 ):
992 raise error.RepoError(
1014 raise error.RepoError(
993 _(
1015 _(
994 b'repository is using sparse feature but '
1016 b'repository is using sparse feature but '
995 b'sparse is not enabled; enable the '
1017 b'sparse is not enabled; enable the '
996 b'"sparse" extensions to access'
1018 b'"sparse" extensions to access'
997 )
1019 )
998 )
1020 )
999
1021
1000
1022
1001 def makestore(requirements, path, vfstype):
1023 def makestore(requirements, path, vfstype):
1002 """Construct a storage object for a repository."""
1024 """Construct a storage object for a repository."""
1003 if requirementsmod.STORE_REQUIREMENT in requirements:
1025 if requirementsmod.STORE_REQUIREMENT in requirements:
1004 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
1026 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
1005 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1027 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1006 return storemod.fncachestore(path, vfstype, dotencode)
1028 return storemod.fncachestore(path, vfstype, dotencode)
1007
1029
1008 return storemod.encodedstore(path, vfstype)
1030 return storemod.encodedstore(path, vfstype)
1009
1031
1010 return storemod.basicstore(path, vfstype)
1032 return storemod.basicstore(path, vfstype)
1011
1033
1012
1034
1013 def resolvestorevfsoptions(ui, requirements, features):
1035 def resolvestorevfsoptions(ui, requirements, features):
1014 """Resolve the options to pass to the store vfs opener.
1036 """Resolve the options to pass to the store vfs opener.
1015
1037
1016 The returned dict is used to influence behavior of the storage layer.
1038 The returned dict is used to influence behavior of the storage layer.
1017 """
1039 """
1018 options = {}
1040 options = {}
1019
1041
1020 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1042 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1021 options[b'treemanifest'] = True
1043 options[b'treemanifest'] = True
1022
1044
1023 # experimental config: format.manifestcachesize
1045 # experimental config: format.manifestcachesize
1024 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1046 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1025 if manifestcachesize is not None:
1047 if manifestcachesize is not None:
1026 options[b'manifestcachesize'] = manifestcachesize
1048 options[b'manifestcachesize'] = manifestcachesize
1027
1049
1028 # In the absence of another requirement superseding a revlog-related
1050 # In the absence of another requirement superseding a revlog-related
1029 # requirement, we have to assume the repo is using revlog version 0.
1051 # requirement, we have to assume the repo is using revlog version 0.
1030 # This revlog format is super old and we don't bother trying to parse
1052 # This revlog format is super old and we don't bother trying to parse
1031 # opener options for it because those options wouldn't do anything
1053 # opener options for it because those options wouldn't do anything
1032 # meaningful on such old repos.
1054 # meaningful on such old repos.
1033 if (
1055 if (
1034 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1056 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1035 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1057 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1036 ):
1058 ):
1037 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1059 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1038 else: # explicitly mark repo as using revlogv0
1060 else: # explicitly mark repo as using revlogv0
1039 options[b'revlogv0'] = True
1061 options[b'revlogv0'] = True
1040
1062
1041 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1063 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1042 options[b'copies-storage'] = b'changeset-sidedata'
1064 options[b'copies-storage'] = b'changeset-sidedata'
1043 else:
1065 else:
1044 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1066 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1045 copiesextramode = (b'changeset-only', b'compatibility')
1067 copiesextramode = (b'changeset-only', b'compatibility')
1046 if writecopiesto in copiesextramode:
1068 if writecopiesto in copiesextramode:
1047 options[b'copies-storage'] = b'extra'
1069 options[b'copies-storage'] = b'extra'
1048
1070
1049 return options
1071 return options
1050
1072
1051
1073
1052 def resolverevlogstorevfsoptions(ui, requirements, features):
1074 def resolverevlogstorevfsoptions(ui, requirements, features):
1053 """Resolve opener options specific to revlogs."""
1075 """Resolve opener options specific to revlogs."""
1054
1076
1055 options = {}
1077 options = {}
1056 options[b'flagprocessors'] = {}
1078 options[b'flagprocessors'] = {}
1057
1079
1058 feature_config = options[b'feature-config'] = revlog.FeatureConfig()
1080 feature_config = options[b'feature-config'] = revlog.FeatureConfig()
1059 data_config = options[b'data-config'] = revlog.DataConfig()
1081 data_config = options[b'data-config'] = revlog.DataConfig()
1060 delta_config = options[b'delta-config'] = revlog.DeltaConfig()
1082 delta_config = options[b'delta-config'] = revlog.DeltaConfig()
1061
1083
1062 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1084 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1063 options[b'revlogv1'] = True
1085 options[b'revlogv1'] = True
1064 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1086 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1065 options[b'revlogv2'] = True
1087 options[b'revlogv2'] = True
1066 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1088 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1067 options[b'changelogv2'] = True
1089 options[b'changelogv2'] = True
1068 cmp_rank = ui.configbool(b'experimental', b'changelog-v2.compute-rank')
1090 cmp_rank = ui.configbool(b'experimental', b'changelog-v2.compute-rank')
1069 options[b'changelogv2.compute-rank'] = cmp_rank
1091 options[b'changelogv2.compute-rank'] = cmp_rank
1070
1092
1071 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1093 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1072 options[b'generaldelta'] = True
1094 options[b'generaldelta'] = True
1073
1095
1074 # experimental config: format.chunkcachesize
1096 # experimental config: format.chunkcachesize
1075 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1097 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1076 if chunkcachesize is not None:
1098 if chunkcachesize is not None:
1077 data_config.chunk_cache_size = chunkcachesize
1099 data_config.chunk_cache_size = chunkcachesize
1078
1100
1079 memory_profile = scmutil.get_resource_profile(ui, b'memory')
1101 memory_profile = scmutil.get_resource_profile(ui, b'memory')
1080 if memory_profile >= scmutil.RESOURCE_MEDIUM:
1102 if memory_profile >= scmutil.RESOURCE_MEDIUM:
1081 data_config.uncompressed_cache_count = 10_000
1103 data_config.uncompressed_cache_count = 10_000
1082 data_config.uncompressed_cache_factor = 4
1104 data_config.uncompressed_cache_factor = 4
1083 if memory_profile >= scmutil.RESOURCE_HIGH:
1105 if memory_profile >= scmutil.RESOURCE_HIGH:
1084 data_config.uncompressed_cache_factor = 10
1106 data_config.uncompressed_cache_factor = 10
1085
1107
1086 delta_config.delta_both_parents = ui.configbool(
1108 delta_config.delta_both_parents = ui.configbool(
1087 b'storage', b'revlog.optimize-delta-parent-choice'
1109 b'storage', b'revlog.optimize-delta-parent-choice'
1088 )
1110 )
1089 delta_config.candidate_group_chunk_size = ui.configint(
1111 delta_config.candidate_group_chunk_size = ui.configint(
1090 b'storage',
1112 b'storage',
1091 b'revlog.delta-parent-search.candidate-group-chunk-size',
1113 b'revlog.delta-parent-search.candidate-group-chunk-size',
1092 )
1114 )
1093 delta_config.debug_delta = ui.configbool(b'debug', b'revlog.debug-delta')
1115 delta_config.debug_delta = ui.configbool(b'debug', b'revlog.debug-delta')
1094
1116
1095 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1117 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1096 options[b'issue6528.fix-incoming'] = issue6528
1118 options[b'issue6528.fix-incoming'] = issue6528
1097
1119
1098 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1120 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1099 lazydeltabase = False
1121 lazydeltabase = False
1100 if lazydelta:
1122 if lazydelta:
1101 lazydeltabase = ui.configbool(
1123 lazydeltabase = ui.configbool(
1102 b'storage', b'revlog.reuse-external-delta-parent'
1124 b'storage', b'revlog.reuse-external-delta-parent'
1103 )
1125 )
1104 if lazydeltabase is None:
1126 if lazydeltabase is None:
1105 lazydeltabase = not scmutil.gddeltaconfig(ui)
1127 lazydeltabase = not scmutil.gddeltaconfig(ui)
1106 delta_config.lazy_delta = lazydelta
1128 delta_config.lazy_delta = lazydelta
1107 delta_config.lazy_delta_base = lazydeltabase
1129 delta_config.lazy_delta_base = lazydeltabase
1108
1130
1109 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1131 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1110 if 0 <= chainspan:
1132 if 0 <= chainspan:
1111 delta_config.max_deltachain_span = chainspan
1133 delta_config.max_deltachain_span = chainspan
1112
1134
1113 has_populate = util.has_mmap_populate()
1135 has_populate = util.has_mmap_populate()
1114 if ui.configbool(b'storage', b'revlog.mmap.index', has_populate):
1136 if ui.configbool(b'storage', b'revlog.mmap.index', has_populate):
1115 data_config.mmap_index_threshold = ui.configbytes(
1137 data_config.mmap_index_threshold = ui.configbytes(
1116 b'storage',
1138 b'storage',
1117 b'revlog.mmap.index:size-threshold',
1139 b'revlog.mmap.index:size-threshold',
1118 )
1140 )
1119
1141
1120 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1142 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1121 srdensitythres = float(
1143 srdensitythres = float(
1122 ui.config(b'experimental', b'sparse-read.density-threshold')
1144 ui.config(b'experimental', b'sparse-read.density-threshold')
1123 )
1145 )
1124 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1146 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1125 data_config.with_sparse_read = withsparseread
1147 data_config.with_sparse_read = withsparseread
1126 data_config.sr_density_threshold = srdensitythres
1148 data_config.sr_density_threshold = srdensitythres
1127 data_config.sr_min_gap_size = srmingapsize
1149 data_config.sr_min_gap_size = srmingapsize
1128
1150
1129 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1151 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1130 delta_config.sparse_revlog = sparserevlog
1152 delta_config.sparse_revlog = sparserevlog
1131 if sparserevlog:
1153 if sparserevlog:
1132 options[b'generaldelta'] = True
1154 options[b'generaldelta'] = True
1133 data_config.with_sparse_read = True
1155 data_config.with_sparse_read = True
1134
1156
1135 maxchainlen = None
1157 maxchainlen = None
1136 if sparserevlog:
1158 if sparserevlog:
1137 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1159 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1138 # experimental config: format.maxchainlen
1160 # experimental config: format.maxchainlen
1139 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1161 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1140 if maxchainlen is not None:
1162 if maxchainlen is not None:
1141 delta_config.max_chain_len = maxchainlen
1163 delta_config.max_chain_len = maxchainlen
1142
1164
1143 for r in requirements:
1165 for r in requirements:
1144 # we allow multiple compression engine requirement to co-exist because
1166 # we allow multiple compression engine requirement to co-exist because
1145 # strickly speaking, revlog seems to support mixed compression style.
1167 # strickly speaking, revlog seems to support mixed compression style.
1146 #
1168 #
1147 # The compression used for new entries will be "the last one"
1169 # The compression used for new entries will be "the last one"
1148 prefix = r.startswith
1170 prefix = r.startswith
1149 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1171 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1150 feature_config.compression_engine = r.split(b'-', 2)[2]
1172 feature_config.compression_engine = r.split(b'-', 2)[2]
1151
1173
1152 zlib_level = ui.configint(b'storage', b'revlog.zlib.level')
1174 zlib_level = ui.configint(b'storage', b'revlog.zlib.level')
1153 if zlib_level is not None:
1175 if zlib_level is not None:
1154 if not (0 <= zlib_level <= 9):
1176 if not (0 <= zlib_level <= 9):
1155 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1177 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1156 raise error.Abort(msg % zlib_level)
1178 raise error.Abort(msg % zlib_level)
1157 feature_config.compression_engine_options[b'zlib.level'] = zlib_level
1179 feature_config.compression_engine_options[b'zlib.level'] = zlib_level
1158 zstd_level = ui.configint(b'storage', b'revlog.zstd.level')
1180 zstd_level = ui.configint(b'storage', b'revlog.zstd.level')
1159 if zstd_level is not None:
1181 if zstd_level is not None:
1160 if not (0 <= zstd_level <= 22):
1182 if not (0 <= zstd_level <= 22):
1161 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1183 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1162 raise error.Abort(msg % zstd_level)
1184 raise error.Abort(msg % zstd_level)
1163 feature_config.compression_engine_options[b'zstd.level'] = zstd_level
1185 feature_config.compression_engine_options[b'zstd.level'] = zstd_level
1164
1186
1165 if requirementsmod.NARROW_REQUIREMENT in requirements:
1187 if requirementsmod.NARROW_REQUIREMENT in requirements:
1166 feature_config.enable_ellipsis = True
1188 feature_config.enable_ellipsis = True
1167
1189
1168 if ui.configbool(b'experimental', b'rust.index'):
1190 if ui.configbool(b'experimental', b'rust.index'):
1169 options[b'rust.index'] = True
1191 options[b'rust.index'] = True
1170 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1192 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1171 slow_path = ui.config(
1193 slow_path = ui.config(
1172 b'storage', b'revlog.persistent-nodemap.slow-path'
1194 b'storage', b'revlog.persistent-nodemap.slow-path'
1173 )
1195 )
1174 if slow_path not in (b'allow', b'warn', b'abort'):
1196 if slow_path not in (b'allow', b'warn', b'abort'):
1175 default = ui.config_default(
1197 default = ui.config_default(
1176 b'storage', b'revlog.persistent-nodemap.slow-path'
1198 b'storage', b'revlog.persistent-nodemap.slow-path'
1177 )
1199 )
1178 msg = _(
1200 msg = _(
1179 b'unknown value for config '
1201 b'unknown value for config '
1180 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1202 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1181 )
1203 )
1182 ui.warn(msg % slow_path)
1204 ui.warn(msg % slow_path)
1183 if not ui.quiet:
1205 if not ui.quiet:
1184 ui.warn(_(b'falling back to default value: %s\n') % default)
1206 ui.warn(_(b'falling back to default value: %s\n') % default)
1185 slow_path = default
1207 slow_path = default
1186
1208
1187 msg = _(
1209 msg = _(
1188 b"accessing `persistent-nodemap` repository without associated "
1210 b"accessing `persistent-nodemap` repository without associated "
1189 b"fast implementation."
1211 b"fast implementation."
1190 )
1212 )
1191 hint = _(
1213 hint = _(
1192 b"check `hg help config.format.use-persistent-nodemap` "
1214 b"check `hg help config.format.use-persistent-nodemap` "
1193 b"for details"
1215 b"for details"
1194 )
1216 )
1195 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1217 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1196 if slow_path == b'warn':
1218 if slow_path == b'warn':
1197 msg = b"warning: " + msg + b'\n'
1219 msg = b"warning: " + msg + b'\n'
1198 ui.warn(msg)
1220 ui.warn(msg)
1199 if not ui.quiet:
1221 if not ui.quiet:
1200 hint = b'(' + hint + b')\n'
1222 hint = b'(' + hint + b')\n'
1201 ui.warn(hint)
1223 ui.warn(hint)
1202 if slow_path == b'abort':
1224 if slow_path == b'abort':
1203 raise error.Abort(msg, hint=hint)
1225 raise error.Abort(msg, hint=hint)
1204 options[b'persistent-nodemap'] = True
1226 options[b'persistent-nodemap'] = True
1205 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1227 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1206 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1228 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1207 if slow_path not in (b'allow', b'warn', b'abort'):
1229 if slow_path not in (b'allow', b'warn', b'abort'):
1208 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1230 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1209 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1231 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1210 ui.warn(msg % slow_path)
1232 ui.warn(msg % slow_path)
1211 if not ui.quiet:
1233 if not ui.quiet:
1212 ui.warn(_(b'falling back to default value: %s\n') % default)
1234 ui.warn(_(b'falling back to default value: %s\n') % default)
1213 slow_path = default
1235 slow_path = default
1214
1236
1215 msg = _(
1237 msg = _(
1216 b"accessing `dirstate-v2` repository without associated "
1238 b"accessing `dirstate-v2` repository without associated "
1217 b"fast implementation."
1239 b"fast implementation."
1218 )
1240 )
1219 hint = _(
1241 hint = _(
1220 b"check `hg help config.format.use-dirstate-v2` " b"for details"
1242 b"check `hg help config.format.use-dirstate-v2` " b"for details"
1221 )
1243 )
1222 if not dirstate.HAS_FAST_DIRSTATE_V2:
1244 if not dirstate.HAS_FAST_DIRSTATE_V2:
1223 if slow_path == b'warn':
1245 if slow_path == b'warn':
1224 msg = b"warning: " + msg + b'\n'
1246 msg = b"warning: " + msg + b'\n'
1225 ui.warn(msg)
1247 ui.warn(msg)
1226 if not ui.quiet:
1248 if not ui.quiet:
1227 hint = b'(' + hint + b')\n'
1249 hint = b'(' + hint + b')\n'
1228 ui.warn(hint)
1250 ui.warn(hint)
1229 if slow_path == b'abort':
1251 if slow_path == b'abort':
1230 raise error.Abort(msg, hint=hint)
1252 raise error.Abort(msg, hint=hint)
1231 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1253 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1232 options[b'persistent-nodemap.mmap'] = True
1254 options[b'persistent-nodemap.mmap'] = True
1233 if ui.configbool(b'devel', b'persistent-nodemap'):
1255 if ui.configbool(b'devel', b'persistent-nodemap'):
1234 options[b'devel-force-nodemap'] = True
1256 options[b'devel-force-nodemap'] = True
1235
1257
1236 return options
1258 return options
1237
1259
1238
1260
1239 def makemain(**kwargs):
1261 def makemain(**kwargs):
1240 """Produce a type conforming to ``ilocalrepositorymain``."""
1262 """Produce a type conforming to ``ilocalrepositorymain``."""
1241 return localrepository
1263 return localrepository
1242
1264
1243
1265
1244 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1266 class RevlogFileStorage:
1245 class revlogfilestorage:
1246 """File storage when using revlogs."""
1267 """File storage when using revlogs."""
1247
1268
1248 def file(self, path):
1269 def file(self, path):
1249 if path.startswith(b'/'):
1270 if path.startswith(b'/'):
1250 path = path[1:]
1271 path = path[1:]
1251
1272
1252 try_split = (
1273 try_split = (
1253 self.currenttransaction() is not None
1274 self.currenttransaction() is not None
1254 or txnutil.mayhavepending(self.root)
1275 or txnutil.mayhavepending(self.root)
1255 )
1276 )
1256
1277
1257 return filelog.filelog(self.svfs, path, try_split=try_split)
1278 return filelog.filelog(self.svfs, path, try_split=try_split)
1258
1279
1259
1280
1260 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1281 revlogfilestorage = interfaceutil.implementer(
1261 class revlognarrowfilestorage:
1282 repository.ilocalrepositoryfilestorage
1283 )(RevlogFileStorage)
1284
1285 if typing.TYPE_CHECKING:
1286 # Help pytype by hiding the interface stuff that confuses it.
1287 revlogfilestorage = RevlogFileStorage
1288
1289
1290 class RevlogNarrowFileStorage:
1262 """File storage when using revlogs and narrow files."""
1291 """File storage when using revlogs and narrow files."""
1263
1292
1264 def file(self, path):
1293 def file(self, path):
1265 if path.startswith(b'/'):
1294 if path.startswith(b'/'):
1266 path = path[1:]
1295 path = path[1:]
1267
1296
1268 try_split = (
1297 try_split = (
1269 self.currenttransaction() is not None
1298 self.currenttransaction() is not None
1270 or txnutil.mayhavepending(self.root)
1299 or txnutil.mayhavepending(self.root)
1271 )
1300 )
1272 return filelog.narrowfilelog(
1301 return filelog.narrowfilelog(
1273 self.svfs, path, self._storenarrowmatch, try_split=try_split
1302 self.svfs, path, self._storenarrowmatch, try_split=try_split
1274 )
1303 )
1275
1304
1276
1305
1306 revlognarrowfilestorage = interfaceutil.implementer(
1307 repository.ilocalrepositoryfilestorage
1308 )(RevlogNarrowFileStorage)
1309
1310 if typing.TYPE_CHECKING:
1311 # Help pytype by hiding the interface stuff that confuses it.
1312 revlognarrowfilestorage = RevlogNarrowFileStorage
1313
1314
1277 def makefilestorage(requirements, features, **kwargs):
1315 def makefilestorage(requirements, features, **kwargs):
1278 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1316 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1279 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1317 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1280 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1318 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1281
1319
1282 if requirementsmod.NARROW_REQUIREMENT in requirements:
1320 if requirementsmod.NARROW_REQUIREMENT in requirements:
1283 return revlognarrowfilestorage
1321 return revlognarrowfilestorage
1284 else:
1322 else:
1285 return revlogfilestorage
1323 return revlogfilestorage
1286
1324
1287
1325
1288 # List of repository interfaces and factory functions for them. Each
1326 # List of repository interfaces and factory functions for them. Each
1289 # will be called in order during ``makelocalrepository()`` to iteratively
1327 # will be called in order during ``makelocalrepository()`` to iteratively
1290 # derive the final type for a local repository instance. We capture the
1328 # derive the final type for a local repository instance. We capture the
1291 # function as a lambda so we don't hold a reference and the module-level
1329 # function as a lambda so we don't hold a reference and the module-level
1292 # functions can be wrapped.
1330 # functions can be wrapped.
1293 REPO_INTERFACES = [
1331 REPO_INTERFACES = [
1294 (repository.ilocalrepositorymain, lambda: makemain),
1332 (repository.ilocalrepositorymain, lambda: makemain),
1295 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1333 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1296 ]
1334 ]
1297
1335
1298
1336 _localrepo_base_classes = object
1299 @interfaceutil.implementer(repository.ilocalrepositorymain)
1337
1300 class localrepository:
1338 if typing.TYPE_CHECKING:
1339 _localrepo_base_classes = [
1340 repository.ilocalrepositorymain,
1341 repository.ilocalrepositoryfilestorage,
1342 ]
1343
1344
1345 class LocalRepository(_localrepo_base_classes):
1301 """Main class for representing local repositories.
1346 """Main class for representing local repositories.
1302
1347
1303 All local repositories are instances of this class.
1348 All local repositories are instances of this class.
1304
1349
1305 Constructed on its own, instances of this class are not usable as
1350 Constructed on its own, instances of this class are not usable as
1306 repository objects. To obtain a usable repository object, call
1351 repository objects. To obtain a usable repository object, call
1307 ``hg.repository()``, ``localrepo.instance()``, or
1352 ``hg.repository()``, ``localrepo.instance()``, or
1308 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1353 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1309 ``instance()`` adds support for creating new repositories.
1354 ``instance()`` adds support for creating new repositories.
1310 ``hg.repository()`` adds more extension integration, including calling
1355 ``hg.repository()`` adds more extension integration, including calling
1311 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1356 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1312 used.
1357 used.
1313 """
1358 """
1314
1359
1315 _basesupported = {
1360 _basesupported = {
1316 requirementsmod.ARCHIVED_PHASE_REQUIREMENT,
1361 requirementsmod.ARCHIVED_PHASE_REQUIREMENT,
1317 requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT,
1362 requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT,
1318 requirementsmod.CHANGELOGV2_REQUIREMENT,
1363 requirementsmod.CHANGELOGV2_REQUIREMENT,
1319 requirementsmod.COPIESSDC_REQUIREMENT,
1364 requirementsmod.COPIESSDC_REQUIREMENT,
1320 requirementsmod.DIRSTATE_TRACKED_HINT_V1,
1365 requirementsmod.DIRSTATE_TRACKED_HINT_V1,
1321 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1366 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1322 requirementsmod.DOTENCODE_REQUIREMENT,
1367 requirementsmod.DOTENCODE_REQUIREMENT,
1323 requirementsmod.FNCACHE_REQUIREMENT,
1368 requirementsmod.FNCACHE_REQUIREMENT,
1324 requirementsmod.GENERALDELTA_REQUIREMENT,
1369 requirementsmod.GENERALDELTA_REQUIREMENT,
1325 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1370 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1326 requirementsmod.NODEMAP_REQUIREMENT,
1371 requirementsmod.NODEMAP_REQUIREMENT,
1327 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1372 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1328 requirementsmod.REVLOGV1_REQUIREMENT,
1373 requirementsmod.REVLOGV1_REQUIREMENT,
1329 requirementsmod.REVLOGV2_REQUIREMENT,
1374 requirementsmod.REVLOGV2_REQUIREMENT,
1330 requirementsmod.SHARED_REQUIREMENT,
1375 requirementsmod.SHARED_REQUIREMENT,
1331 requirementsmod.SHARESAFE_REQUIREMENT,
1376 requirementsmod.SHARESAFE_REQUIREMENT,
1332 requirementsmod.SPARSE_REQUIREMENT,
1377 requirementsmod.SPARSE_REQUIREMENT,
1333 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1378 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1334 requirementsmod.STORE_REQUIREMENT,
1379 requirementsmod.STORE_REQUIREMENT,
1335 requirementsmod.TREEMANIFEST_REQUIREMENT,
1380 requirementsmod.TREEMANIFEST_REQUIREMENT,
1336 }
1381 }
1337
1382
1338 # list of prefix for file which can be written without 'wlock'
1383 # list of prefix for file which can be written without 'wlock'
1339 # Extensions should extend this list when needed
1384 # Extensions should extend this list when needed
1340 _wlockfreeprefix = {
1385 _wlockfreeprefix = {
1341 # We migh consider requiring 'wlock' for the next
1386 # We migh consider requiring 'wlock' for the next
1342 # two, but pretty much all the existing code assume
1387 # two, but pretty much all the existing code assume
1343 # wlock is not needed so we keep them excluded for
1388 # wlock is not needed so we keep them excluded for
1344 # now.
1389 # now.
1345 b'hgrc',
1390 b'hgrc',
1346 b'requires',
1391 b'requires',
1347 # XXX cache is a complicatged business someone
1392 # XXX cache is a complicatged business someone
1348 # should investigate this in depth at some point
1393 # should investigate this in depth at some point
1349 b'cache/',
1394 b'cache/',
1350 # XXX bisect was still a bit too messy at the time
1395 # XXX bisect was still a bit too messy at the time
1351 # this changeset was introduced. Someone should fix
1396 # this changeset was introduced. Someone should fix
1352 # the remainig bit and drop this line
1397 # the remainig bit and drop this line
1353 b'bisect.state',
1398 b'bisect.state',
1354 }
1399 }
1355
1400
1356 def __init__(
1401 def __init__(
1357 self,
1402 self,
1358 baseui,
1403 baseui,
1359 ui,
1404 ui,
1360 origroot: bytes,
1405 origroot: bytes,
1361 wdirvfs: vfsmod.vfs,
1406 wdirvfs: vfsmod.vfs,
1362 hgvfs: vfsmod.vfs,
1407 hgvfs: vfsmod.vfs,
1363 requirements,
1408 requirements,
1364 supportedrequirements,
1409 supportedrequirements,
1365 sharedpath: bytes,
1410 sharedpath: bytes,
1366 store,
1411 store,
1367 cachevfs: vfsmod.vfs,
1412 cachevfs: vfsmod.vfs,
1368 wcachevfs: vfsmod.vfs,
1413 wcachevfs: vfsmod.vfs,
1369 features,
1414 features,
1370 intents=None,
1415 intents=None,
1371 ):
1416 ):
1372 """Create a new local repository instance.
1417 """Create a new local repository instance.
1373
1418
1374 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1419 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1375 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1420 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1376 object.
1421 object.
1377
1422
1378 Arguments:
1423 Arguments:
1379
1424
1380 baseui
1425 baseui
1381 ``ui.ui`` instance that ``ui`` argument was based off of.
1426 ``ui.ui`` instance that ``ui`` argument was based off of.
1382
1427
1383 ui
1428 ui
1384 ``ui.ui`` instance for use by the repository.
1429 ``ui.ui`` instance for use by the repository.
1385
1430
1386 origroot
1431 origroot
1387 ``bytes`` path to working directory root of this repository.
1432 ``bytes`` path to working directory root of this repository.
1388
1433
1389 wdirvfs
1434 wdirvfs
1390 ``vfs.vfs`` rooted at the working directory.
1435 ``vfs.vfs`` rooted at the working directory.
1391
1436
1392 hgvfs
1437 hgvfs
1393 ``vfs.vfs`` rooted at .hg/
1438 ``vfs.vfs`` rooted at .hg/
1394
1439
1395 requirements
1440 requirements
1396 ``set`` of bytestrings representing repository opening requirements.
1441 ``set`` of bytestrings representing repository opening requirements.
1397
1442
1398 supportedrequirements
1443 supportedrequirements
1399 ``set`` of bytestrings representing repository requirements that we
1444 ``set`` of bytestrings representing repository requirements that we
1400 know how to open. May be a supetset of ``requirements``.
1445 know how to open. May be a supetset of ``requirements``.
1401
1446
1402 sharedpath
1447 sharedpath
1403 ``bytes`` Defining path to storage base directory. Points to a
1448 ``bytes`` Defining path to storage base directory. Points to a
1404 ``.hg/`` directory somewhere.
1449 ``.hg/`` directory somewhere.
1405
1450
1406 store
1451 store
1407 ``store.basicstore`` (or derived) instance providing access to
1452 ``store.basicstore`` (or derived) instance providing access to
1408 versioned storage.
1453 versioned storage.
1409
1454
1410 cachevfs
1455 cachevfs
1411 ``vfs.vfs`` used for cache files.
1456 ``vfs.vfs`` used for cache files.
1412
1457
1413 wcachevfs
1458 wcachevfs
1414 ``vfs.vfs`` used for cache files related to the working copy.
1459 ``vfs.vfs`` used for cache files related to the working copy.
1415
1460
1416 features
1461 features
1417 ``set`` of bytestrings defining features/capabilities of this
1462 ``set`` of bytestrings defining features/capabilities of this
1418 instance.
1463 instance.
1419
1464
1420 intents
1465 intents
1421 ``set`` of system strings indicating what this repo will be used
1466 ``set`` of system strings indicating what this repo will be used
1422 for.
1467 for.
1423 """
1468 """
1424 self.baseui = baseui
1469 self.baseui = baseui
1425 self.ui = ui
1470 self.ui = ui
1426 self.origroot = origroot
1471 self.origroot = origroot
1427 # vfs rooted at working directory.
1472 # vfs rooted at working directory.
1428 self.wvfs = wdirvfs
1473 self.wvfs = wdirvfs
1429 self.root = wdirvfs.base
1474 self.root = wdirvfs.base
1430 # vfs rooted at .hg/. Used to access most non-store paths.
1475 # vfs rooted at .hg/. Used to access most non-store paths.
1431 self.vfs = hgvfs
1476 self.vfs = hgvfs
1432 self.path = hgvfs.base
1477 self.path = hgvfs.base
1433 self.requirements = requirements
1478 self.requirements = requirements
1434 self.nodeconstants = sha1nodeconstants
1479 self.nodeconstants = sha1nodeconstants
1435 self.nullid = self.nodeconstants.nullid
1480 self.nullid = self.nodeconstants.nullid
1436 self.supported = supportedrequirements
1481 self.supported = supportedrequirements
1437 self.sharedpath = sharedpath
1482 self.sharedpath = sharedpath
1438 self.store = store
1483 self.store = store
1439 self.cachevfs = cachevfs
1484 self.cachevfs = cachevfs
1440 self.wcachevfs = wcachevfs
1485 self.wcachevfs = wcachevfs
1441 self.features = features
1486 self.features = features
1442
1487
1443 self.filtername = None
1488 self.filtername = None
1444
1489
1445 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1490 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1446 b'devel', b'check-locks'
1491 b'devel', b'check-locks'
1447 ):
1492 ):
1448 self.vfs.audit = self._getvfsward(self.vfs.audit)
1493 self.vfs.audit = self._getvfsward(self.vfs.audit)
1449 # A list of callback to shape the phase if no data were found.
1494 # A list of callback to shape the phase if no data were found.
1450 # Callback are in the form: func(repo, roots) --> processed root.
1495 # Callback are in the form: func(repo, roots) --> processed root.
1451 # This list it to be filled by extension during repo setup
1496 # This list it to be filled by extension during repo setup
1452 self._phasedefaults = []
1497 self._phasedefaults = []
1453
1498
1454 color.setup(self.ui)
1499 color.setup(self.ui)
1455
1500
1456 self.spath = self.store.path
1501 self.spath = self.store.path
1457 self.svfs = self.store.vfs
1502 self.svfs = self.store.vfs
1458 self.sjoin = self.store.join
1503 self.sjoin = self.store.join
1459 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1504 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1460 b'devel', b'check-locks'
1505 b'devel', b'check-locks'
1461 ):
1506 ):
1462 if hasattr(self.svfs, 'vfs'): # this is filtervfs
1507 if hasattr(self.svfs, 'vfs'): # this is filtervfs
1463 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1508 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1464 else: # standard vfs
1509 else: # standard vfs
1465 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1510 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1466
1511
1467 self._dirstatevalidatewarned = False
1512 self._dirstatevalidatewarned = False
1468
1513
1469 self._branchcaches = branchmap.BranchMapCache()
1514 self._branchcaches = branchmap.BranchMapCache()
1470 self._revbranchcache = None
1515 self._revbranchcache = None
1471 self._filterpats = {}
1516 self._filterpats = {}
1472 self._datafilters = {}
1517 self._datafilters = {}
1473 self._transref = self._lockref = self._wlockref = None
1518 self._transref = self._lockref = self._wlockref = None
1474
1519
1475 # A cache for various files under .hg/ that tracks file changes,
1520 # A cache for various files under .hg/ that tracks file changes,
1476 # (used by the filecache decorator)
1521 # (used by the filecache decorator)
1477 #
1522 #
1478 # Maps a property name to its util.filecacheentry
1523 # Maps a property name to its util.filecacheentry
1479 self._filecache = {}
1524 self._filecache = {}
1480
1525
1481 # hold sets of revision to be filtered
1526 # hold sets of revision to be filtered
1482 # should be cleared when something might have changed the filter value:
1527 # should be cleared when something might have changed the filter value:
1483 # - new changesets,
1528 # - new changesets,
1484 # - phase change,
1529 # - phase change,
1485 # - new obsolescence marker,
1530 # - new obsolescence marker,
1486 # - working directory parent change,
1531 # - working directory parent change,
1487 # - bookmark changes
1532 # - bookmark changes
1488 self.filteredrevcache = {}
1533 self.filteredrevcache = {}
1489
1534
1490 self._dirstate = None
1535 self._dirstate = None
1491 # post-dirstate-status hooks
1536 # post-dirstate-status hooks
1492 self._postdsstatus = []
1537 self._postdsstatus = []
1493
1538
1494 self._pending_narrow_pats = None
1539 self._pending_narrow_pats = None
1495 self._pending_narrow_pats_dirstate = None
1540 self._pending_narrow_pats_dirstate = None
1496
1541
1497 # generic mapping between names and nodes
1542 # generic mapping between names and nodes
1498 self.names = namespaces.namespaces()
1543 self.names = namespaces.namespaces()
1499
1544
1500 # Key to signature value.
1545 # Key to signature value.
1501 self._sparsesignaturecache = {}
1546 self._sparsesignaturecache = {}
1502 # Signature to cached matcher instance.
1547 # Signature to cached matcher instance.
1503 self._sparsematchercache = {}
1548 self._sparsematchercache = {}
1504
1549
1505 self._extrafilterid = repoview.extrafilter(ui)
1550 self._extrafilterid = repoview.extrafilter(ui)
1506
1551
1507 self.filecopiesmode = None
1552 self.filecopiesmode = None
1508 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1553 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1509 self.filecopiesmode = b'changeset-sidedata'
1554 self.filecopiesmode = b'changeset-sidedata'
1510
1555
1511 self._wanted_sidedata = set()
1556 self._wanted_sidedata = set()
1512 self._sidedata_computers = {}
1557 self._sidedata_computers = {}
1513 sidedatamod.set_sidedata_spec_for_repo(self)
1558 sidedatamod.set_sidedata_spec_for_repo(self)
1514
1559
1515 def _getvfsward(self, origfunc):
1560 def _getvfsward(self, origfunc):
1516 """build a ward for self.vfs"""
1561 """build a ward for self.vfs"""
1517 rref = weakref.ref(self)
1562 rref = weakref.ref(self)
1518
1563
1519 def checkvfs(path, mode=None):
1564 def checkvfs(path, mode=None):
1520 ret = origfunc(path, mode=mode)
1565 ret = origfunc(path, mode=mode)
1521 repo = rref()
1566 repo = rref()
1522 if (
1567 if (
1523 repo is None
1568 repo is None
1524 or not hasattr(repo, '_wlockref')
1569 or not hasattr(repo, '_wlockref')
1525 or not hasattr(repo, '_lockref')
1570 or not hasattr(repo, '_lockref')
1526 ):
1571 ):
1527 return
1572 return
1528 if mode in (None, b'r', b'rb'):
1573 if mode in (None, b'r', b'rb'):
1529 return
1574 return
1530 if path.startswith(repo.path):
1575 if path.startswith(repo.path):
1531 # truncate name relative to the repository (.hg)
1576 # truncate name relative to the repository (.hg)
1532 path = path[len(repo.path) + 1 :]
1577 path = path[len(repo.path) + 1 :]
1533 if path.startswith(b'cache/'):
1578 if path.startswith(b'cache/'):
1534 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1579 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1535 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1580 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1536 # path prefixes covered by 'lock'
1581 # path prefixes covered by 'lock'
1537 vfs_path_prefixes = (
1582 vfs_path_prefixes = (
1538 b'journal.',
1583 b'journal.',
1539 b'undo.',
1584 b'undo.',
1540 b'strip-backup/',
1585 b'strip-backup/',
1541 b'cache/',
1586 b'cache/',
1542 )
1587 )
1543 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1588 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1544 if repo._currentlock(repo._lockref) is None:
1589 if repo._currentlock(repo._lockref) is None:
1545 repo.ui.develwarn(
1590 repo.ui.develwarn(
1546 b'write with no lock: "%s"' % path,
1591 b'write with no lock: "%s"' % path,
1547 stacklevel=3,
1592 stacklevel=3,
1548 config=b'check-locks',
1593 config=b'check-locks',
1549 )
1594 )
1550 elif repo._currentlock(repo._wlockref) is None:
1595 elif repo._currentlock(repo._wlockref) is None:
1551 # rest of vfs files are covered by 'wlock'
1596 # rest of vfs files are covered by 'wlock'
1552 #
1597 #
1553 # exclude special files
1598 # exclude special files
1554 for prefix in self._wlockfreeprefix:
1599 for prefix in self._wlockfreeprefix:
1555 if path.startswith(prefix):
1600 if path.startswith(prefix):
1556 return
1601 return
1557 repo.ui.develwarn(
1602 repo.ui.develwarn(
1558 b'write with no wlock: "%s"' % path,
1603 b'write with no wlock: "%s"' % path,
1559 stacklevel=3,
1604 stacklevel=3,
1560 config=b'check-locks',
1605 config=b'check-locks',
1561 )
1606 )
1562 return ret
1607 return ret
1563
1608
1564 return checkvfs
1609 return checkvfs
1565
1610
1566 def _getsvfsward(self, origfunc):
1611 def _getsvfsward(self, origfunc):
1567 """build a ward for self.svfs"""
1612 """build a ward for self.svfs"""
1568 rref = weakref.ref(self)
1613 rref = weakref.ref(self)
1569
1614
1570 def checksvfs(path, mode=None):
1615 def checksvfs(path, mode=None):
1571 ret = origfunc(path, mode=mode)
1616 ret = origfunc(path, mode=mode)
1572 repo = rref()
1617 repo = rref()
1573 if repo is None or not hasattr(repo, '_lockref'):
1618 if repo is None or not hasattr(repo, '_lockref'):
1574 return
1619 return
1575 if mode in (None, b'r', b'rb'):
1620 if mode in (None, b'r', b'rb'):
1576 return
1621 return
1577 if path.startswith(repo.sharedpath):
1622 if path.startswith(repo.sharedpath):
1578 # truncate name relative to the repository (.hg)
1623 # truncate name relative to the repository (.hg)
1579 path = path[len(repo.sharedpath) + 1 :]
1624 path = path[len(repo.sharedpath) + 1 :]
1580 if repo._currentlock(repo._lockref) is None:
1625 if repo._currentlock(repo._lockref) is None:
1581 repo.ui.develwarn(
1626 repo.ui.develwarn(
1582 b'write with no lock: "%s"' % path, stacklevel=4
1627 b'write with no lock: "%s"' % path, stacklevel=4
1583 )
1628 )
1584 return ret
1629 return ret
1585
1630
1586 return checksvfs
1631 return checksvfs
1587
1632
1588 @property
1633 @property
1589 def vfs_map(self):
1634 def vfs_map(self):
1590 return {
1635 return {
1591 b'': self.svfs,
1636 b'': self.svfs,
1592 b'plain': self.vfs,
1637 b'plain': self.vfs,
1593 b'store': self.svfs,
1638 b'store': self.svfs,
1594 }
1639 }
1595
1640
1596 def close(self):
1641 def close(self):
1597 self._writecaches()
1642 self._writecaches()
1598
1643
1599 def _writecaches(self):
1644 def _writecaches(self):
1600 if self._revbranchcache:
1645 if self._revbranchcache:
1601 self._revbranchcache.write()
1646 self._revbranchcache.write()
1602
1647
1603 def _restrictcapabilities(self, caps):
1648 def _restrictcapabilities(self, caps):
1604 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1649 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1605 caps = set(caps)
1650 caps = set(caps)
1606 capsblob = bundle2.encodecaps(
1651 capsblob = bundle2.encodecaps(
1607 bundle2.getrepocaps(self, role=b'client')
1652 bundle2.getrepocaps(self, role=b'client')
1608 )
1653 )
1609 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1654 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1610 if self.ui.configbool(b'experimental', b'narrow'):
1655 if self.ui.configbool(b'experimental', b'narrow'):
1611 caps.add(wireprototypes.NARROWCAP)
1656 caps.add(wireprototypes.NARROWCAP)
1612 return caps
1657 return caps
1613
1658
1614 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1659 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1615 # self -> auditor -> self._checknested -> self
1660 # self -> auditor -> self._checknested -> self
1616
1661
1617 @property
1662 @property
1618 def auditor(self):
1663 def auditor(self):
1619 # This is only used by context.workingctx.match in order to
1664 # This is only used by context.workingctx.match in order to
1620 # detect files in subrepos.
1665 # detect files in subrepos.
1621 return pathutil.pathauditor(self.root, callback=self._checknested)
1666 return pathutil.pathauditor(self.root, callback=self._checknested)
1622
1667
1623 @property
1668 @property
1624 def nofsauditor(self):
1669 def nofsauditor(self):
1625 # This is only used by context.basectx.match in order to detect
1670 # This is only used by context.basectx.match in order to detect
1626 # files in subrepos.
1671 # files in subrepos.
1627 return pathutil.pathauditor(
1672 return pathutil.pathauditor(
1628 self.root, callback=self._checknested, realfs=False, cached=True
1673 self.root, callback=self._checknested, realfs=False, cached=True
1629 )
1674 )
1630
1675
1631 def _checknested(self, path):
1676 def _checknested(self, path):
1632 """Determine if path is a legal nested repository."""
1677 """Determine if path is a legal nested repository."""
1633 if not path.startswith(self.root):
1678 if not path.startswith(self.root):
1634 return False
1679 return False
1635 subpath = path[len(self.root) + 1 :]
1680 subpath = path[len(self.root) + 1 :]
1636 normsubpath = util.pconvert(subpath)
1681 normsubpath = util.pconvert(subpath)
1637
1682
1638 # XXX: Checking against the current working copy is wrong in
1683 # XXX: Checking against the current working copy is wrong in
1639 # the sense that it can reject things like
1684 # the sense that it can reject things like
1640 #
1685 #
1641 # $ hg cat -r 10 sub/x.txt
1686 # $ hg cat -r 10 sub/x.txt
1642 #
1687 #
1643 # if sub/ is no longer a subrepository in the working copy
1688 # if sub/ is no longer a subrepository in the working copy
1644 # parent revision.
1689 # parent revision.
1645 #
1690 #
1646 # However, it can of course also allow things that would have
1691 # However, it can of course also allow things that would have
1647 # been rejected before, such as the above cat command if sub/
1692 # been rejected before, such as the above cat command if sub/
1648 # is a subrepository now, but was a normal directory before.
1693 # is a subrepository now, but was a normal directory before.
1649 # The old path auditor would have rejected by mistake since it
1694 # The old path auditor would have rejected by mistake since it
1650 # panics when it sees sub/.hg/.
1695 # panics when it sees sub/.hg/.
1651 #
1696 #
1652 # All in all, checking against the working copy seems sensible
1697 # All in all, checking against the working copy seems sensible
1653 # since we want to prevent access to nested repositories on
1698 # since we want to prevent access to nested repositories on
1654 # the filesystem *now*.
1699 # the filesystem *now*.
1655 ctx = self[None]
1700 ctx = self[None]
1656 parts = util.splitpath(subpath)
1701 parts = util.splitpath(subpath)
1657 while parts:
1702 while parts:
1658 prefix = b'/'.join(parts)
1703 prefix = b'/'.join(parts)
1659 if prefix in ctx.substate:
1704 if prefix in ctx.substate:
1660 if prefix == normsubpath:
1705 if prefix == normsubpath:
1661 return True
1706 return True
1662 else:
1707 else:
1663 sub = ctx.sub(prefix)
1708 sub = ctx.sub(prefix)
1664 return sub.checknested(subpath[len(prefix) + 1 :])
1709 return sub.checknested(subpath[len(prefix) + 1 :])
1665 else:
1710 else:
1666 parts.pop()
1711 parts.pop()
1667 return False
1712 return False
1668
1713
1669 def peer(self, path=None, remotehidden=False):
1714 def peer(self, path=None, remotehidden=False):
1670 return localpeer(
1715 return localpeer(
1671 self, path=path, remotehidden=remotehidden
1716 self, path=path, remotehidden=remotehidden
1672 ) # not cached to avoid reference cycle
1717 ) # not cached to avoid reference cycle
1673
1718
1674 def unfiltered(self):
1719 def unfiltered(self):
1675 """Return unfiltered version of the repository
1720 """Return unfiltered version of the repository
1676
1721
1677 Intended to be overwritten by filtered repo."""
1722 Intended to be overwritten by filtered repo."""
1678 return self
1723 return self
1679
1724
1680 def filtered(self, name, visibilityexceptions=None):
1725 def filtered(self, name, visibilityexceptions=None):
1681 """Return a filtered version of a repository
1726 """Return a filtered version of a repository
1682
1727
1683 The `name` parameter is the identifier of the requested view. This
1728 The `name` parameter is the identifier of the requested view. This
1684 will return a repoview object set "exactly" to the specified view.
1729 will return a repoview object set "exactly" to the specified view.
1685
1730
1686 This function does not apply recursive filtering to a repository. For
1731 This function does not apply recursive filtering to a repository. For
1687 example calling `repo.filtered("served")` will return a repoview using
1732 example calling `repo.filtered("served")` will return a repoview using
1688 the "served" view, regardless of the initial view used by `repo`.
1733 the "served" view, regardless of the initial view used by `repo`.
1689
1734
1690 In other word, there is always only one level of `repoview` "filtering".
1735 In other word, there is always only one level of `repoview` "filtering".
1691 """
1736 """
1692 if self._extrafilterid is not None and b'%' not in name:
1737 if self._extrafilterid is not None and b'%' not in name:
1693 name = name + b'%' + self._extrafilterid
1738 name = name + b'%' + self._extrafilterid
1694
1739
1695 cls = repoview.newtype(self.unfiltered().__class__)
1740 cls = repoview.newtype(self.unfiltered().__class__)
1696 return cls(self, name, visibilityexceptions)
1741 return cls(self, name, visibilityexceptions)
1697
1742
1698 @mixedrepostorecache(
1743 @mixedrepostorecache(
1699 (b'bookmarks', b'plain'),
1744 (b'bookmarks', b'plain'),
1700 (b'bookmarks.current', b'plain'),
1745 (b'bookmarks.current', b'plain'),
1701 (b'bookmarks', b''),
1746 (b'bookmarks', b''),
1702 (b'00changelog.i', b''),
1747 (b'00changelog.i', b''),
1703 )
1748 )
1704 def _bookmarks(self):
1749 def _bookmarks(self):
1705 # Since the multiple files involved in the transaction cannot be
1750 # Since the multiple files involved in the transaction cannot be
1706 # written atomically (with current repository format), there is a race
1751 # written atomically (with current repository format), there is a race
1707 # condition here.
1752 # condition here.
1708 #
1753 #
1709 # 1) changelog content A is read
1754 # 1) changelog content A is read
1710 # 2) outside transaction update changelog to content B
1755 # 2) outside transaction update changelog to content B
1711 # 3) outside transaction update bookmark file referring to content B
1756 # 3) outside transaction update bookmark file referring to content B
1712 # 4) bookmarks file content is read and filtered against changelog-A
1757 # 4) bookmarks file content is read and filtered against changelog-A
1713 #
1758 #
1714 # When this happens, bookmarks against nodes missing from A are dropped.
1759 # When this happens, bookmarks against nodes missing from A are dropped.
1715 #
1760 #
1716 # Having this happening during read is not great, but it become worse
1761 # Having this happening during read is not great, but it become worse
1717 # when this happen during write because the bookmarks to the "unknown"
1762 # when this happen during write because the bookmarks to the "unknown"
1718 # nodes will be dropped for good. However, writes happen within locks.
1763 # nodes will be dropped for good. However, writes happen within locks.
1719 # This locking makes it possible to have a race free consistent read.
1764 # This locking makes it possible to have a race free consistent read.
1720 # For this purpose data read from disc before locking are
1765 # For this purpose data read from disc before locking are
1721 # "invalidated" right after the locks are taken. This invalidations are
1766 # "invalidated" right after the locks are taken. This invalidations are
1722 # "light", the `filecache` mechanism keep the data in memory and will
1767 # "light", the `filecache` mechanism keep the data in memory and will
1723 # reuse them if the underlying files did not changed. Not parsing the
1768 # reuse them if the underlying files did not changed. Not parsing the
1724 # same data multiple times helps performances.
1769 # same data multiple times helps performances.
1725 #
1770 #
1726 # Unfortunately in the case describe above, the files tracked by the
1771 # Unfortunately in the case describe above, the files tracked by the
1727 # bookmarks file cache might not have changed, but the in-memory
1772 # bookmarks file cache might not have changed, but the in-memory
1728 # content is still "wrong" because we used an older changelog content
1773 # content is still "wrong" because we used an older changelog content
1729 # to process the on-disk data. So after locking, the changelog would be
1774 # to process the on-disk data. So after locking, the changelog would be
1730 # refreshed but `_bookmarks` would be preserved.
1775 # refreshed but `_bookmarks` would be preserved.
1731 # Adding `00changelog.i` to the list of tracked file is not
1776 # Adding `00changelog.i` to the list of tracked file is not
1732 # enough, because at the time we build the content for `_bookmarks` in
1777 # enough, because at the time we build the content for `_bookmarks` in
1733 # (4), the changelog file has already diverged from the content used
1778 # (4), the changelog file has already diverged from the content used
1734 # for loading `changelog` in (1)
1779 # for loading `changelog` in (1)
1735 #
1780 #
1736 # To prevent the issue, we force the changelog to be explicitly
1781 # To prevent the issue, we force the changelog to be explicitly
1737 # reloaded while computing `_bookmarks`. The data race can still happen
1782 # reloaded while computing `_bookmarks`. The data race can still happen
1738 # without the lock (with a narrower window), but it would no longer go
1783 # without the lock (with a narrower window), but it would no longer go
1739 # undetected during the lock time refresh.
1784 # undetected during the lock time refresh.
1740 #
1785 #
1741 # The new schedule is as follow
1786 # The new schedule is as follow
1742 #
1787 #
1743 # 1) filecache logic detect that `_bookmarks` needs to be computed
1788 # 1) filecache logic detect that `_bookmarks` needs to be computed
1744 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1789 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1745 # 3) We force `changelog` filecache to be tested
1790 # 3) We force `changelog` filecache to be tested
1746 # 4) cachestat for `changelog` are captured (for changelog)
1791 # 4) cachestat for `changelog` are captured (for changelog)
1747 # 5) `_bookmarks` is computed and cached
1792 # 5) `_bookmarks` is computed and cached
1748 #
1793 #
1749 # The step in (3) ensure we have a changelog at least as recent as the
1794 # The step in (3) ensure we have a changelog at least as recent as the
1750 # cache stat computed in (1). As a result at locking time:
1795 # cache stat computed in (1). As a result at locking time:
1751 # * if the changelog did not changed since (1) -> we can reuse the data
1796 # * if the changelog did not changed since (1) -> we can reuse the data
1752 # * otherwise -> the bookmarks get refreshed.
1797 # * otherwise -> the bookmarks get refreshed.
1753 self._refreshchangelog()
1798 self._refreshchangelog()
1754 return bookmarks.bmstore(self)
1799 return bookmarks.bmstore(self)
1755
1800
1756 def _refreshchangelog(self):
1801 def _refreshchangelog(self):
1757 """make sure the in memory changelog match the on-disk one"""
1802 """make sure the in memory changelog match the on-disk one"""
1758 if 'changelog' in vars(self) and self.currenttransaction() is None:
1803 if 'changelog' in vars(self) and self.currenttransaction() is None:
1759 del self.changelog
1804 del self.changelog
1760
1805
1761 @property
1806 @property
1762 def _activebookmark(self):
1807 def _activebookmark(self):
1763 return self._bookmarks.active
1808 return self._bookmarks.active
1764
1809
1765 # _phasesets depend on changelog. what we need is to call
1810 # _phasesets depend on changelog. what we need is to call
1766 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1811 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1767 # can't be easily expressed in filecache mechanism.
1812 # can't be easily expressed in filecache mechanism.
1768 @storecache(b'phaseroots', b'00changelog.i')
1813 @storecache(b'phaseroots', b'00changelog.i')
1769 def _phasecache(self):
1814 def _phasecache(self):
1770 return phases.phasecache(self, self._phasedefaults)
1815 return phases.phasecache(self, self._phasedefaults)
1771
1816
1772 @storecache(b'obsstore')
1817 @storecache(b'obsstore')
1773 def obsstore(self):
1818 def obsstore(self):
1774 return obsolete.makestore(self.ui, self)
1819 return obsolete.makestore(self.ui, self)
1775
1820
1776 @changelogcache()
1821 @changelogcache()
1777 def changelog(repo):
1822 def changelog(repo):
1778 # load dirstate before changelog to avoid race see issue6303
1823 # load dirstate before changelog to avoid race see issue6303
1779 repo.dirstate.prefetch_parents()
1824 repo.dirstate.prefetch_parents()
1780 return repo.store.changelog(
1825 return repo.store.changelog(
1781 txnutil.mayhavepending(repo.root),
1826 txnutil.mayhavepending(repo.root),
1782 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1827 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1783 )
1828 )
1784
1829
1785 @manifestlogcache()
1830 @manifestlogcache()
1786 def manifestlog(self):
1831 def manifestlog(self):
1787 return self.store.manifestlog(self, self._storenarrowmatch)
1832 return self.store.manifestlog(self, self._storenarrowmatch)
1788
1833
1789 @unfilteredpropertycache
1834 @unfilteredpropertycache
1790 def dirstate(self):
1835 def dirstate(self):
1791 if self._dirstate is None:
1836 if self._dirstate is None:
1792 self._dirstate = self._makedirstate()
1837 self._dirstate = self._makedirstate()
1793 else:
1838 else:
1794 self._dirstate.refresh()
1839 self._dirstate.refresh()
1795 return self._dirstate
1840 return self._dirstate
1796
1841
1797 def _makedirstate(self):
1842 def _makedirstate(self):
1798 """Extension point for wrapping the dirstate per-repo."""
1843 """Extension point for wrapping the dirstate per-repo."""
1799 sparsematchfn = None
1844 sparsematchfn = None
1800 if sparse.use_sparse(self):
1845 if sparse.use_sparse(self):
1801 sparsematchfn = lambda: sparse.matcher(self)
1846 sparsematchfn = lambda: sparse.matcher(self)
1802 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1847 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1803 th = requirementsmod.DIRSTATE_TRACKED_HINT_V1
1848 th = requirementsmod.DIRSTATE_TRACKED_HINT_V1
1804 use_dirstate_v2 = v2_req in self.requirements
1849 use_dirstate_v2 = v2_req in self.requirements
1805 use_tracked_hint = th in self.requirements
1850 use_tracked_hint = th in self.requirements
1806
1851
1807 return dirstate.dirstate(
1852 return dirstate.dirstate(
1808 self.vfs,
1853 self.vfs,
1809 self.ui,
1854 self.ui,
1810 self.root,
1855 self.root,
1811 self._dirstatevalidate,
1856 self._dirstatevalidate,
1812 sparsematchfn,
1857 sparsematchfn,
1813 self.nodeconstants,
1858 self.nodeconstants,
1814 use_dirstate_v2,
1859 use_dirstate_v2,
1815 use_tracked_hint=use_tracked_hint,
1860 use_tracked_hint=use_tracked_hint,
1816 )
1861 )
1817
1862
1818 def _dirstatevalidate(self, node):
1863 def _dirstatevalidate(self, node):
1819 okay = True
1864 okay = True
1820 try:
1865 try:
1821 self.changelog.rev(node)
1866 self.changelog.rev(node)
1822 except error.LookupError:
1867 except error.LookupError:
1823 # If the parent are unknown it might just be because the changelog
1868 # If the parent are unknown it might just be because the changelog
1824 # in memory is lagging behind the dirstate in memory. So try to
1869 # in memory is lagging behind the dirstate in memory. So try to
1825 # refresh the changelog first.
1870 # refresh the changelog first.
1826 #
1871 #
1827 # We only do so if we don't hold the lock, if we do hold the lock
1872 # We only do so if we don't hold the lock, if we do hold the lock
1828 # the invalidation at that time should have taken care of this and
1873 # the invalidation at that time should have taken care of this and
1829 # something is very fishy.
1874 # something is very fishy.
1830 if self.currentlock() is None:
1875 if self.currentlock() is None:
1831 self.invalidate()
1876 self.invalidate()
1832 try:
1877 try:
1833 self.changelog.rev(node)
1878 self.changelog.rev(node)
1834 except error.LookupError:
1879 except error.LookupError:
1835 okay = False
1880 okay = False
1836 else:
1881 else:
1837 # XXX we should consider raising an error here.
1882 # XXX we should consider raising an error here.
1838 okay = False
1883 okay = False
1839 if okay:
1884 if okay:
1840 return node
1885 return node
1841 else:
1886 else:
1842 if not self._dirstatevalidatewarned:
1887 if not self._dirstatevalidatewarned:
1843 self._dirstatevalidatewarned = True
1888 self._dirstatevalidatewarned = True
1844 self.ui.warn(
1889 self.ui.warn(
1845 _(b"warning: ignoring unknown working parent %s!\n")
1890 _(b"warning: ignoring unknown working parent %s!\n")
1846 % short(node)
1891 % short(node)
1847 )
1892 )
1848 return self.nullid
1893 return self.nullid
1849
1894
1850 @storecache(narrowspec.FILENAME)
1895 @storecache(narrowspec.FILENAME)
1851 def narrowpats(self):
1896 def narrowpats(self):
1852 """matcher patterns for this repository's narrowspec
1897 """matcher patterns for this repository's narrowspec
1853
1898
1854 A tuple of (includes, excludes).
1899 A tuple of (includes, excludes).
1855 """
1900 """
1856 # the narrow management should probably move into its own object
1901 # the narrow management should probably move into its own object
1857 val = self._pending_narrow_pats
1902 val = self._pending_narrow_pats
1858 if val is None:
1903 if val is None:
1859 val = narrowspec.load(self)
1904 val = narrowspec.load(self)
1860 return val
1905 return val
1861
1906
1862 @storecache(narrowspec.FILENAME)
1907 @storecache(narrowspec.FILENAME)
1863 def _storenarrowmatch(self):
1908 def _storenarrowmatch(self):
1864 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1909 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1865 return matchmod.always()
1910 return matchmod.always()
1866 include, exclude = self.narrowpats
1911 include, exclude = self.narrowpats
1867 return narrowspec.match(self.root, include=include, exclude=exclude)
1912 return narrowspec.match(self.root, include=include, exclude=exclude)
1868
1913
1869 @storecache(narrowspec.FILENAME)
1914 @storecache(narrowspec.FILENAME)
1870 def _narrowmatch(self):
1915 def _narrowmatch(self):
1871 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1916 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1872 return matchmod.always()
1917 return matchmod.always()
1873 narrowspec.checkworkingcopynarrowspec(self)
1918 narrowspec.checkworkingcopynarrowspec(self)
1874 include, exclude = self.narrowpats
1919 include, exclude = self.narrowpats
1875 return narrowspec.match(self.root, include=include, exclude=exclude)
1920 return narrowspec.match(self.root, include=include, exclude=exclude)
1876
1921
1877 def narrowmatch(self, match=None, includeexact=False):
1922 def narrowmatch(self, match=None, includeexact=False):
1878 """matcher corresponding the the repo's narrowspec
1923 """matcher corresponding the the repo's narrowspec
1879
1924
1880 If `match` is given, then that will be intersected with the narrow
1925 If `match` is given, then that will be intersected with the narrow
1881 matcher.
1926 matcher.
1882
1927
1883 If `includeexact` is True, then any exact matches from `match` will
1928 If `includeexact` is True, then any exact matches from `match` will
1884 be included even if they're outside the narrowspec.
1929 be included even if they're outside the narrowspec.
1885 """
1930 """
1886 if match:
1931 if match:
1887 if includeexact and not self._narrowmatch.always():
1932 if includeexact and not self._narrowmatch.always():
1888 # do not exclude explicitly-specified paths so that they can
1933 # do not exclude explicitly-specified paths so that they can
1889 # be warned later on
1934 # be warned later on
1890 em = matchmod.exact(match.files())
1935 em = matchmod.exact(match.files())
1891 nm = matchmod.unionmatcher([self._narrowmatch, em])
1936 nm = matchmod.unionmatcher([self._narrowmatch, em])
1892 return matchmod.intersectmatchers(match, nm)
1937 return matchmod.intersectmatchers(match, nm)
1893 return matchmod.intersectmatchers(match, self._narrowmatch)
1938 return matchmod.intersectmatchers(match, self._narrowmatch)
1894 return self._narrowmatch
1939 return self._narrowmatch
1895
1940
1896 def setnarrowpats(self, newincludes, newexcludes):
1941 def setnarrowpats(self, newincludes, newexcludes):
1897 narrowspec.save(self, newincludes, newexcludes)
1942 narrowspec.save(self, newincludes, newexcludes)
1898 self.invalidate(clearfilecache=True)
1943 self.invalidate(clearfilecache=True)
1899
1944
1900 @unfilteredpropertycache
1945 @unfilteredpropertycache
1901 def _quick_access_changeid_null(self):
1946 def _quick_access_changeid_null(self):
1902 return {
1947 return {
1903 b'null': (nullrev, self.nodeconstants.nullid),
1948 b'null': (nullrev, self.nodeconstants.nullid),
1904 nullrev: (nullrev, self.nodeconstants.nullid),
1949 nullrev: (nullrev, self.nodeconstants.nullid),
1905 self.nullid: (nullrev, self.nullid),
1950 self.nullid: (nullrev, self.nullid),
1906 }
1951 }
1907
1952
1908 @unfilteredpropertycache
1953 @unfilteredpropertycache
1909 def _quick_access_changeid_wc(self):
1954 def _quick_access_changeid_wc(self):
1910 # also fast path access to the working copy parents
1955 # also fast path access to the working copy parents
1911 # however, only do it for filter that ensure wc is visible.
1956 # however, only do it for filter that ensure wc is visible.
1912 quick = self._quick_access_changeid_null.copy()
1957 quick = self._quick_access_changeid_null.copy()
1913 cl = self.unfiltered().changelog
1958 cl = self.unfiltered().changelog
1914 for node in self.dirstate.parents():
1959 for node in self.dirstate.parents():
1915 if node == self.nullid:
1960 if node == self.nullid:
1916 continue
1961 continue
1917 rev = cl.index.get_rev(node)
1962 rev = cl.index.get_rev(node)
1918 if rev is None:
1963 if rev is None:
1919 # unknown working copy parent case:
1964 # unknown working copy parent case:
1920 #
1965 #
1921 # skip the fast path and let higher code deal with it
1966 # skip the fast path and let higher code deal with it
1922 continue
1967 continue
1923 pair = (rev, node)
1968 pair = (rev, node)
1924 quick[rev] = pair
1969 quick[rev] = pair
1925 quick[node] = pair
1970 quick[node] = pair
1926 # also add the parents of the parents
1971 # also add the parents of the parents
1927 for r in cl.parentrevs(rev):
1972 for r in cl.parentrevs(rev):
1928 if r == nullrev:
1973 if r == nullrev:
1929 continue
1974 continue
1930 n = cl.node(r)
1975 n = cl.node(r)
1931 pair = (r, n)
1976 pair = (r, n)
1932 quick[r] = pair
1977 quick[r] = pair
1933 quick[n] = pair
1978 quick[n] = pair
1934 p1node = self.dirstate.p1()
1979 p1node = self.dirstate.p1()
1935 if p1node != self.nullid:
1980 if p1node != self.nullid:
1936 quick[b'.'] = quick[p1node]
1981 quick[b'.'] = quick[p1node]
1937 return quick
1982 return quick
1938
1983
1939 @unfilteredmethod
1984 @unfilteredmethod
1940 def _quick_access_changeid_invalidate(self):
1985 def _quick_access_changeid_invalidate(self):
1941 if '_quick_access_changeid_wc' in vars(self):
1986 if '_quick_access_changeid_wc' in vars(self):
1942 del self.__dict__['_quick_access_changeid_wc']
1987 del self.__dict__['_quick_access_changeid_wc']
1943
1988
1944 @property
1989 @property
1945 def _quick_access_changeid(self):
1990 def _quick_access_changeid(self):
1946 """an helper dictionnary for __getitem__ calls
1991 """an helper dictionnary for __getitem__ calls
1947
1992
1948 This contains a list of symbol we can recognise right away without
1993 This contains a list of symbol we can recognise right away without
1949 further processing.
1994 further processing.
1950 """
1995 """
1951 if self.filtername in repoview.filter_has_wc:
1996 if self.filtername in repoview.filter_has_wc:
1952 return self._quick_access_changeid_wc
1997 return self._quick_access_changeid_wc
1953 return self._quick_access_changeid_null
1998 return self._quick_access_changeid_null
1954
1999
1955 def __getitem__(self, changeid):
2000 def __getitem__(self, changeid):
1956 # dealing with special cases
2001 # dealing with special cases
1957 if changeid is None:
2002 if changeid is None:
1958 return context.workingctx(self)
2003 return context.workingctx(self)
1959 if isinstance(changeid, context.basectx):
2004 if isinstance(changeid, context.basectx):
1960 return changeid
2005 return changeid
1961
2006
1962 # dealing with multiple revisions
2007 # dealing with multiple revisions
1963 if isinstance(changeid, slice):
2008 if isinstance(changeid, slice):
1964 # wdirrev isn't contiguous so the slice shouldn't include it
2009 # wdirrev isn't contiguous so the slice shouldn't include it
1965 return [
2010 return [
1966 self[i]
2011 self[i]
1967 for i in range(*changeid.indices(len(self)))
2012 for i in range(*changeid.indices(len(self)))
1968 if i not in self.changelog.filteredrevs
2013 if i not in self.changelog.filteredrevs
1969 ]
2014 ]
1970
2015
1971 # dealing with some special values
2016 # dealing with some special values
1972 quick_access = self._quick_access_changeid.get(changeid)
2017 quick_access = self._quick_access_changeid.get(changeid)
1973 if quick_access is not None:
2018 if quick_access is not None:
1974 rev, node = quick_access
2019 rev, node = quick_access
1975 return context.changectx(self, rev, node, maybe_filtered=False)
2020 return context.changectx(self, rev, node, maybe_filtered=False)
1976 if changeid == b'tip':
2021 if changeid == b'tip':
1977 node = self.changelog.tip()
2022 node = self.changelog.tip()
1978 rev = self.changelog.rev(node)
2023 rev = self.changelog.rev(node)
1979 return context.changectx(self, rev, node)
2024 return context.changectx(self, rev, node)
1980
2025
1981 # dealing with arbitrary values
2026 # dealing with arbitrary values
1982 try:
2027 try:
1983 if isinstance(changeid, int):
2028 if isinstance(changeid, int):
1984 node = self.changelog.node(changeid)
2029 node = self.changelog.node(changeid)
1985 rev = changeid
2030 rev = changeid
1986 elif changeid == b'.':
2031 elif changeid == b'.':
1987 # this is a hack to delay/avoid loading obsmarkers
2032 # this is a hack to delay/avoid loading obsmarkers
1988 # when we know that '.' won't be hidden
2033 # when we know that '.' won't be hidden
1989 node = self.dirstate.p1()
2034 node = self.dirstate.p1()
1990 rev = self.unfiltered().changelog.rev(node)
2035 rev = self.unfiltered().changelog.rev(node)
1991 elif len(changeid) == self.nodeconstants.nodelen:
2036 elif len(changeid) == self.nodeconstants.nodelen:
1992 try:
2037 try:
1993 node = changeid
2038 node = changeid
1994 rev = self.changelog.rev(changeid)
2039 rev = self.changelog.rev(changeid)
1995 except error.FilteredLookupError:
2040 except error.FilteredLookupError:
1996 changeid = hex(changeid) # for the error message
2041 changeid = hex(changeid) # for the error message
1997 raise
2042 raise
1998 except LookupError:
2043 except LookupError:
1999 # check if it might have come from damaged dirstate
2044 # check if it might have come from damaged dirstate
2000 #
2045 #
2001 # XXX we could avoid the unfiltered if we had a recognizable
2046 # XXX we could avoid the unfiltered if we had a recognizable
2002 # exception for filtered changeset access
2047 # exception for filtered changeset access
2003 if (
2048 if (
2004 self.local()
2049 self.local()
2005 and changeid in self.unfiltered().dirstate.parents()
2050 and changeid in self.unfiltered().dirstate.parents()
2006 ):
2051 ):
2007 msg = _(b"working directory has unknown parent '%s'!")
2052 msg = _(b"working directory has unknown parent '%s'!")
2008 raise error.Abort(msg % short(changeid))
2053 raise error.Abort(msg % short(changeid))
2009 changeid = hex(changeid) # for the error message
2054 changeid = hex(changeid) # for the error message
2010 raise
2055 raise
2011
2056
2012 elif len(changeid) == 2 * self.nodeconstants.nodelen:
2057 elif len(changeid) == 2 * self.nodeconstants.nodelen:
2013 node = bin(changeid)
2058 node = bin(changeid)
2014 rev = self.changelog.rev(node)
2059 rev = self.changelog.rev(node)
2015 else:
2060 else:
2016 raise error.ProgrammingError(
2061 raise error.ProgrammingError(
2017 b"unsupported changeid '%s' of type %s"
2062 b"unsupported changeid '%s' of type %s"
2018 % (changeid, pycompat.bytestr(type(changeid)))
2063 % (changeid, pycompat.bytestr(type(changeid)))
2019 )
2064 )
2020
2065
2021 return context.changectx(self, rev, node)
2066 return context.changectx(self, rev, node)
2022
2067
2023 except (error.FilteredIndexError, error.FilteredLookupError):
2068 except (error.FilteredIndexError, error.FilteredLookupError):
2024 raise error.FilteredRepoLookupError(
2069 raise error.FilteredRepoLookupError(
2025 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
2070 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
2026 )
2071 )
2027 except (IndexError, LookupError):
2072 except (IndexError, LookupError):
2028 raise error.RepoLookupError(
2073 raise error.RepoLookupError(
2029 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
2074 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
2030 )
2075 )
2031 except error.WdirUnsupported:
2076 except error.WdirUnsupported:
2032 return context.workingctx(self)
2077 return context.workingctx(self)
2033
2078
2034 def __contains__(self, changeid):
2079 def __contains__(self, changeid):
2035 """True if the given changeid exists"""
2080 """True if the given changeid exists"""
2036 try:
2081 try:
2037 self[changeid]
2082 self[changeid]
2038 return True
2083 return True
2039 except error.RepoLookupError:
2084 except error.RepoLookupError:
2040 return False
2085 return False
2041
2086
2042 def __nonzero__(self):
2087 def __nonzero__(self):
2043 return True
2088 return True
2044
2089
2045 __bool__ = __nonzero__
2090 __bool__ = __nonzero__
2046
2091
2047 def __len__(self):
2092 def __len__(self):
2048 # no need to pay the cost of repoview.changelog
2093 # no need to pay the cost of repoview.changelog
2049 unfi = self.unfiltered()
2094 unfi = self.unfiltered()
2050 return len(unfi.changelog)
2095 return len(unfi.changelog)
2051
2096
2052 def __iter__(self):
2097 def __iter__(self):
2053 return iter(self.changelog)
2098 return iter(self.changelog)
2054
2099
2055 def revs(self, expr: bytes, *args):
2100 def revs(self, expr: bytes, *args):
2056 """Find revisions matching a revset.
2101 """Find revisions matching a revset.
2057
2102
2058 The revset is specified as a string ``expr`` that may contain
2103 The revset is specified as a string ``expr`` that may contain
2059 %-formatting to escape certain types. See ``revsetlang.formatspec``.
2104 %-formatting to escape certain types. See ``revsetlang.formatspec``.
2060
2105
2061 Revset aliases from the configuration are not expanded. To expand
2106 Revset aliases from the configuration are not expanded. To expand
2062 user aliases, consider calling ``scmutil.revrange()`` or
2107 user aliases, consider calling ``scmutil.revrange()`` or
2063 ``repo.anyrevs([expr], user=True)``.
2108 ``repo.anyrevs([expr], user=True)``.
2064
2109
2065 Returns a smartset.abstractsmartset, which is a list-like interface
2110 Returns a smartset.abstractsmartset, which is a list-like interface
2066 that contains integer revisions.
2111 that contains integer revisions.
2067 """
2112 """
2068 tree = revsetlang.spectree(expr, *args)
2113 tree = revsetlang.spectree(expr, *args)
2069 return revset.makematcher(tree)(self)
2114 return revset.makematcher(tree)(self)
2070
2115
2071 def set(self, expr: bytes, *args):
2116 def set(self, expr: bytes, *args):
2072 """Find revisions matching a revset and emit changectx instances.
2117 """Find revisions matching a revset and emit changectx instances.
2073
2118
2074 This is a convenience wrapper around ``revs()`` that iterates the
2119 This is a convenience wrapper around ``revs()`` that iterates the
2075 result and is a generator of changectx instances.
2120 result and is a generator of changectx instances.
2076
2121
2077 Revset aliases from the configuration are not expanded. To expand
2122 Revset aliases from the configuration are not expanded. To expand
2078 user aliases, consider calling ``scmutil.revrange()``.
2123 user aliases, consider calling ``scmutil.revrange()``.
2079 """
2124 """
2080 for r in self.revs(expr, *args):
2125 for r in self.revs(expr, *args):
2081 yield self[r]
2126 yield self[r]
2082
2127
2083 def anyrevs(self, specs: bytes, user=False, localalias=None):
2128 def anyrevs(self, specs: bytes, user=False, localalias=None):
2084 """Find revisions matching one of the given revsets.
2129 """Find revisions matching one of the given revsets.
2085
2130
2086 Revset aliases from the configuration are not expanded by default. To
2131 Revset aliases from the configuration are not expanded by default. To
2087 expand user aliases, specify ``user=True``. To provide some local
2132 expand user aliases, specify ``user=True``. To provide some local
2088 definitions overriding user aliases, set ``localalias`` to
2133 definitions overriding user aliases, set ``localalias`` to
2089 ``{name: definitionstring}``.
2134 ``{name: definitionstring}``.
2090 """
2135 """
2091 if specs == [b'null']:
2136 if specs == [b'null']:
2092 return revset.baseset([nullrev])
2137 return revset.baseset([nullrev])
2093 if specs == [b'.']:
2138 if specs == [b'.']:
2094 quick_data = self._quick_access_changeid.get(b'.')
2139 quick_data = self._quick_access_changeid.get(b'.')
2095 if quick_data is not None:
2140 if quick_data is not None:
2096 return revset.baseset([quick_data[0]])
2141 return revset.baseset([quick_data[0]])
2097 if user:
2142 if user:
2098 m = revset.matchany(
2143 m = revset.matchany(
2099 self.ui,
2144 self.ui,
2100 specs,
2145 specs,
2101 lookup=revset.lookupfn(self),
2146 lookup=revset.lookupfn(self),
2102 localalias=localalias,
2147 localalias=localalias,
2103 )
2148 )
2104 else:
2149 else:
2105 m = revset.matchany(None, specs, localalias=localalias)
2150 m = revset.matchany(None, specs, localalias=localalias)
2106 return m(self)
2151 return m(self)
2107
2152
2108 def url(self) -> bytes:
2153 def url(self) -> bytes:
2109 return b'file:' + self.root
2154 return b'file:' + self.root
2110
2155
2111 def hook(self, name, throw=False, **args):
2156 def hook(self, name, throw=False, **args):
2112 """Call a hook, passing this repo instance.
2157 """Call a hook, passing this repo instance.
2113
2158
2114 This a convenience method to aid invoking hooks. Extensions likely
2159 This a convenience method to aid invoking hooks. Extensions likely
2115 won't call this unless they have registered a custom hook or are
2160 won't call this unless they have registered a custom hook or are
2116 replacing code that is expected to call a hook.
2161 replacing code that is expected to call a hook.
2117 """
2162 """
2118 return hook.hook(self.ui, self, name, throw, **args)
2163 return hook.hook(self.ui, self, name, throw, **args)
2119
2164
2120 @filteredpropertycache
2165 @filteredpropertycache
2121 def _tagscache(self):
2166 def _tagscache(self):
2122 """Returns a tagscache object that contains various tags related
2167 """Returns a tagscache object that contains various tags related
2123 caches."""
2168 caches."""
2124
2169
2125 # This simplifies its cache management by having one decorated
2170 # This simplifies its cache management by having one decorated
2126 # function (this one) and the rest simply fetch things from it.
2171 # function (this one) and the rest simply fetch things from it.
2127 class tagscache:
2172 class tagscache:
2128 def __init__(self):
2173 def __init__(self):
2129 # These two define the set of tags for this repository. tags
2174 # These two define the set of tags for this repository. tags
2130 # maps tag name to node; tagtypes maps tag name to 'global' or
2175 # maps tag name to node; tagtypes maps tag name to 'global' or
2131 # 'local'. (Global tags are defined by .hgtags across all
2176 # 'local'. (Global tags are defined by .hgtags across all
2132 # heads, and local tags are defined in .hg/localtags.)
2177 # heads, and local tags are defined in .hg/localtags.)
2133 # They constitute the in-memory cache of tags.
2178 # They constitute the in-memory cache of tags.
2134 self.tags = self.tagtypes = None
2179 self.tags = self.tagtypes = None
2135
2180
2136 self.nodetagscache = self.tagslist = None
2181 self.nodetagscache = self.tagslist = None
2137
2182
2138 cache = tagscache()
2183 cache = tagscache()
2139 cache.tags, cache.tagtypes = self._findtags()
2184 cache.tags, cache.tagtypes = self._findtags()
2140
2185
2141 return cache
2186 return cache
2142
2187
2143 def tags(self):
2188 def tags(self):
2144 '''return a mapping of tag to node'''
2189 '''return a mapping of tag to node'''
2145 t = {}
2190 t = {}
2146 if self.changelog.filteredrevs:
2191 if self.changelog.filteredrevs:
2147 tags, tt = self._findtags()
2192 tags, tt = self._findtags()
2148 else:
2193 else:
2149 tags = self._tagscache.tags
2194 tags = self._tagscache.tags
2150 rev = self.changelog.rev
2195 rev = self.changelog.rev
2151 for k, v in tags.items():
2196 for k, v in tags.items():
2152 try:
2197 try:
2153 # ignore tags to unknown nodes
2198 # ignore tags to unknown nodes
2154 rev(v)
2199 rev(v)
2155 t[k] = v
2200 t[k] = v
2156 except (error.LookupError, ValueError):
2201 except (error.LookupError, ValueError):
2157 pass
2202 pass
2158 return t
2203 return t
2159
2204
2160 def _findtags(self):
2205 def _findtags(self):
2161 """Do the hard work of finding tags. Return a pair of dicts
2206 """Do the hard work of finding tags. Return a pair of dicts
2162 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2207 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2163 maps tag name to a string like \'global\' or \'local\'.
2208 maps tag name to a string like \'global\' or \'local\'.
2164 Subclasses or extensions are free to add their own tags, but
2209 Subclasses or extensions are free to add their own tags, but
2165 should be aware that the returned dicts will be retained for the
2210 should be aware that the returned dicts will be retained for the
2166 duration of the localrepo object."""
2211 duration of the localrepo object."""
2167
2212
2168 # XXX what tagtype should subclasses/extensions use? Currently
2213 # XXX what tagtype should subclasses/extensions use? Currently
2169 # mq and bookmarks add tags, but do not set the tagtype at all.
2214 # mq and bookmarks add tags, but do not set the tagtype at all.
2170 # Should each extension invent its own tag type? Should there
2215 # Should each extension invent its own tag type? Should there
2171 # be one tagtype for all such "virtual" tags? Or is the status
2216 # be one tagtype for all such "virtual" tags? Or is the status
2172 # quo fine?
2217 # quo fine?
2173
2218
2174 # map tag name to (node, hist)
2219 # map tag name to (node, hist)
2175 alltags = tagsmod.findglobaltags(self.ui, self)
2220 alltags = tagsmod.findglobaltags(self.ui, self)
2176 # map tag name to tag type
2221 # map tag name to tag type
2177 tagtypes = {tag: b'global' for tag in alltags}
2222 tagtypes = {tag: b'global' for tag in alltags}
2178
2223
2179 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2224 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2180
2225
2181 # Build the return dicts. Have to re-encode tag names because
2226 # Build the return dicts. Have to re-encode tag names because
2182 # the tags module always uses UTF-8 (in order not to lose info
2227 # the tags module always uses UTF-8 (in order not to lose info
2183 # writing to the cache), but the rest of Mercurial wants them in
2228 # writing to the cache), but the rest of Mercurial wants them in
2184 # local encoding.
2229 # local encoding.
2185 tags = {}
2230 tags = {}
2186 for name, (node, hist) in alltags.items():
2231 for name, (node, hist) in alltags.items():
2187 if node != self.nullid:
2232 if node != self.nullid:
2188 tags[encoding.tolocal(name)] = node
2233 tags[encoding.tolocal(name)] = node
2189 tags[b'tip'] = self.changelog.tip()
2234 tags[b'tip'] = self.changelog.tip()
2190 tagtypes = {
2235 tagtypes = {
2191 encoding.tolocal(name): value for (name, value) in tagtypes.items()
2236 encoding.tolocal(name): value for (name, value) in tagtypes.items()
2192 }
2237 }
2193 return (tags, tagtypes)
2238 return (tags, tagtypes)
2194
2239
2195 def tagtype(self, tagname):
2240 def tagtype(self, tagname):
2196 """
2241 """
2197 return the type of the given tag. result can be:
2242 return the type of the given tag. result can be:
2198
2243
2199 'local' : a local tag
2244 'local' : a local tag
2200 'global' : a global tag
2245 'global' : a global tag
2201 None : tag does not exist
2246 None : tag does not exist
2202 """
2247 """
2203
2248
2204 return self._tagscache.tagtypes.get(tagname)
2249 return self._tagscache.tagtypes.get(tagname)
2205
2250
2206 def tagslist(self):
2251 def tagslist(self):
2207 '''return a list of tags ordered by revision'''
2252 '''return a list of tags ordered by revision'''
2208 if not self._tagscache.tagslist:
2253 if not self._tagscache.tagslist:
2209 l = []
2254 l = []
2210 for t, n in self.tags().items():
2255 for t, n in self.tags().items():
2211 l.append((self.changelog.rev(n), t, n))
2256 l.append((self.changelog.rev(n), t, n))
2212 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2257 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2213
2258
2214 return self._tagscache.tagslist
2259 return self._tagscache.tagslist
2215
2260
2216 def nodetags(self, node):
2261 def nodetags(self, node):
2217 '''return the tags associated with a node'''
2262 '''return the tags associated with a node'''
2218 if not self._tagscache.nodetagscache:
2263 if not self._tagscache.nodetagscache:
2219 nodetagscache = {}
2264 nodetagscache = {}
2220 for t, n in self._tagscache.tags.items():
2265 for t, n in self._tagscache.tags.items():
2221 nodetagscache.setdefault(n, []).append(t)
2266 nodetagscache.setdefault(n, []).append(t)
2222 for tags in nodetagscache.values():
2267 for tags in nodetagscache.values():
2223 tags.sort()
2268 tags.sort()
2224 self._tagscache.nodetagscache = nodetagscache
2269 self._tagscache.nodetagscache = nodetagscache
2225 return self._tagscache.nodetagscache.get(node, [])
2270 return self._tagscache.nodetagscache.get(node, [])
2226
2271
2227 def nodebookmarks(self, node):
2272 def nodebookmarks(self, node):
2228 """return the list of bookmarks pointing to the specified node"""
2273 """return the list of bookmarks pointing to the specified node"""
2229 return self._bookmarks.names(node)
2274 return self._bookmarks.names(node)
2230
2275
2231 def branchmap(self):
2276 def branchmap(self):
2232 """returns a dictionary {branch: [branchheads]} with branchheads
2277 """returns a dictionary {branch: [branchheads]} with branchheads
2233 ordered by increasing revision number"""
2278 ordered by increasing revision number"""
2234 return self._branchcaches[self]
2279 return self._branchcaches[self]
2235
2280
2236 @unfilteredmethod
2281 @unfilteredmethod
2237 def revbranchcache(self):
2282 def revbranchcache(self):
2238 if not self._revbranchcache:
2283 if not self._revbranchcache:
2239 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2284 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2240 return self._revbranchcache
2285 return self._revbranchcache
2241
2286
2242 def register_changeset(self, rev, changelogrevision):
2287 def register_changeset(self, rev, changelogrevision):
2243 self.revbranchcache().setdata(rev, changelogrevision)
2288 self.revbranchcache().setdata(rev, changelogrevision)
2244
2289
2245 def branchtip(self, branch, ignoremissing=False):
2290 def branchtip(self, branch, ignoremissing=False):
2246 """return the tip node for a given branch
2291 """return the tip node for a given branch
2247
2292
2248 If ignoremissing is True, then this method will not raise an error.
2293 If ignoremissing is True, then this method will not raise an error.
2249 This is helpful for callers that only expect None for a missing branch
2294 This is helpful for callers that only expect None for a missing branch
2250 (e.g. namespace).
2295 (e.g. namespace).
2251
2296
2252 """
2297 """
2253 try:
2298 try:
2254 return self.branchmap().branchtip(branch)
2299 return self.branchmap().branchtip(branch)
2255 except KeyError:
2300 except KeyError:
2256 if not ignoremissing:
2301 if not ignoremissing:
2257 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2302 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2258 else:
2303 else:
2259 pass
2304 pass
2260
2305
2261 def lookup(self, key):
2306 def lookup(self, key):
2262 node = scmutil.revsymbol(self, key).node()
2307 node = scmutil.revsymbol(self, key).node()
2263 if node is None:
2308 if node is None:
2264 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2309 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2265 return node
2310 return node
2266
2311
2267 def lookupbranch(self, key):
2312 def lookupbranch(self, key):
2268 if self.branchmap().hasbranch(key):
2313 if self.branchmap().hasbranch(key):
2269 return key
2314 return key
2270
2315
2271 return scmutil.revsymbol(self, key).branch()
2316 return scmutil.revsymbol(self, key).branch()
2272
2317
2273 def known(self, nodes):
2318 def known(self, nodes):
2274 cl = self.changelog
2319 cl = self.changelog
2275 get_rev = cl.index.get_rev
2320 get_rev = cl.index.get_rev
2276 filtered = cl.filteredrevs
2321 filtered = cl.filteredrevs
2277 result = []
2322 result = []
2278 for n in nodes:
2323 for n in nodes:
2279 r = get_rev(n)
2324 r = get_rev(n)
2280 resp = not (r is None or r in filtered)
2325 resp = not (r is None or r in filtered)
2281 result.append(resp)
2326 result.append(resp)
2282 return result
2327 return result
2283
2328
2284 def local(self):
2329 def local(self):
2285 return self
2330 return self
2286
2331
2287 def publishing(self):
2332 def publishing(self):
2288 # it's safe (and desirable) to trust the publish flag unconditionally
2333 # it's safe (and desirable) to trust the publish flag unconditionally
2289 # so that we don't finalize changes shared between users via ssh or nfs
2334 # so that we don't finalize changes shared between users via ssh or nfs
2290 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2335 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2291
2336
2292 def cancopy(self):
2337 def cancopy(self):
2293 # so statichttprepo's override of local() works
2338 # so statichttprepo's override of local() works
2294 if not self.local():
2339 if not self.local():
2295 return False
2340 return False
2296 if not self.publishing():
2341 if not self.publishing():
2297 return True
2342 return True
2298 # if publishing we can't copy if there is filtered content
2343 # if publishing we can't copy if there is filtered content
2299 return not self.filtered(b'visible').changelog.filteredrevs
2344 return not self.filtered(b'visible').changelog.filteredrevs
2300
2345
2301 def shared(self):
2346 def shared(self):
2302 '''the type of shared repository (None if not shared)'''
2347 '''the type of shared repository (None if not shared)'''
2303 if self.sharedpath != self.path:
2348 if self.sharedpath != self.path:
2304 return b'store'
2349 return b'store'
2305 return None
2350 return None
2306
2351
2307 def wjoin(self, f: bytes, *insidef: bytes) -> bytes:
2352 def wjoin(self, f: bytes, *insidef: bytes) -> bytes:
2308 return self.vfs.reljoin(self.root, f, *insidef)
2353 return self.vfs.reljoin(self.root, f, *insidef)
2309
2354
2310 def setparents(self, p1, p2=None):
2355 def setparents(self, p1, p2=None):
2311 if p2 is None:
2356 if p2 is None:
2312 p2 = self.nullid
2357 p2 = self.nullid
2313 self[None].setparents(p1, p2)
2358 self[None].setparents(p1, p2)
2314 self._quick_access_changeid_invalidate()
2359 self._quick_access_changeid_invalidate()
2315
2360
2316 def filectx(self, path: bytes, changeid=None, fileid=None, changectx=None):
2361 def filectx(self, path: bytes, changeid=None, fileid=None, changectx=None):
2317 """changeid must be a changeset revision, if specified.
2362 """changeid must be a changeset revision, if specified.
2318 fileid can be a file revision or node."""
2363 fileid can be a file revision or node."""
2319 return context.filectx(
2364 return context.filectx(
2320 self, path, changeid, fileid, changectx=changectx
2365 self, path, changeid, fileid, changectx=changectx
2321 )
2366 )
2322
2367
2323 def getcwd(self) -> bytes:
2368 def getcwd(self) -> bytes:
2324 return self.dirstate.getcwd()
2369 return self.dirstate.getcwd()
2325
2370
2326 def pathto(self, f: bytes, cwd: Optional[bytes] = None) -> bytes:
2371 def pathto(self, f: bytes, cwd: Optional[bytes] = None) -> bytes:
2327 return self.dirstate.pathto(f, cwd)
2372 return self.dirstate.pathto(f, cwd)
2328
2373
2329 def _loadfilter(self, filter):
2374 def _loadfilter(self, filter):
2330 if filter not in self._filterpats:
2375 if filter not in self._filterpats:
2331 l = []
2376 l = []
2332 for pat, cmd in self.ui.configitems(filter):
2377 for pat, cmd in self.ui.configitems(filter):
2333 if cmd == b'!':
2378 if cmd == b'!':
2334 continue
2379 continue
2335 mf = matchmod.match(self.root, b'', [pat])
2380 mf = matchmod.match(self.root, b'', [pat])
2336 fn = None
2381 fn = None
2337 params = cmd
2382 params = cmd
2338 for name, filterfn in self._datafilters.items():
2383 for name, filterfn in self._datafilters.items():
2339 if cmd.startswith(name):
2384 if cmd.startswith(name):
2340 fn = filterfn
2385 fn = filterfn
2341 params = cmd[len(name) :].lstrip()
2386 params = cmd[len(name) :].lstrip()
2342 break
2387 break
2343 if not fn:
2388 if not fn:
2344 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2389 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2345 fn.__name__ = 'commandfilter'
2390 fn.__name__ = 'commandfilter'
2346 # Wrap old filters not supporting keyword arguments
2391 # Wrap old filters not supporting keyword arguments
2347 if not pycompat.getargspec(fn)[2]:
2392 if not pycompat.getargspec(fn)[2]:
2348 oldfn = fn
2393 oldfn = fn
2349 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2394 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2350 fn.__name__ = 'compat-' + oldfn.__name__
2395 fn.__name__ = 'compat-' + oldfn.__name__
2351 l.append((mf, fn, params))
2396 l.append((mf, fn, params))
2352 self._filterpats[filter] = l
2397 self._filterpats[filter] = l
2353 return self._filterpats[filter]
2398 return self._filterpats[filter]
2354
2399
2355 def _filter(self, filterpats, filename, data):
2400 def _filter(self, filterpats, filename, data):
2356 for mf, fn, cmd in filterpats:
2401 for mf, fn, cmd in filterpats:
2357 if mf(filename):
2402 if mf(filename):
2358 self.ui.debug(
2403 self.ui.debug(
2359 b"filtering %s through %s\n"
2404 b"filtering %s through %s\n"
2360 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2405 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2361 )
2406 )
2362 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2407 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2363 break
2408 break
2364
2409
2365 return data
2410 return data
2366
2411
2367 @unfilteredpropertycache
2412 @unfilteredpropertycache
2368 def _encodefilterpats(self):
2413 def _encodefilterpats(self):
2369 return self._loadfilter(b'encode')
2414 return self._loadfilter(b'encode')
2370
2415
2371 @unfilteredpropertycache
2416 @unfilteredpropertycache
2372 def _decodefilterpats(self):
2417 def _decodefilterpats(self):
2373 return self._loadfilter(b'decode')
2418 return self._loadfilter(b'decode')
2374
2419
2375 def adddatafilter(self, name, filter):
2420 def adddatafilter(self, name, filter):
2376 self._datafilters[name] = filter
2421 self._datafilters[name] = filter
2377
2422
2378 def wread(self, filename: bytes) -> bytes:
2423 def wread(self, filename: bytes) -> bytes:
2379 if self.wvfs.islink(filename):
2424 if self.wvfs.islink(filename):
2380 data = self.wvfs.readlink(filename)
2425 data = self.wvfs.readlink(filename)
2381 else:
2426 else:
2382 data = self.wvfs.read(filename)
2427 data = self.wvfs.read(filename)
2383 return self._filter(self._encodefilterpats, filename, data)
2428 return self._filter(self._encodefilterpats, filename, data)
2384
2429
2385 def wwrite(
2430 def wwrite(
2386 self,
2431 self,
2387 filename: bytes,
2432 filename: bytes,
2388 data: bytes,
2433 data: bytes,
2389 flags: bytes,
2434 flags: bytes,
2390 backgroundclose=False,
2435 backgroundclose=False,
2391 **kwargs,
2436 **kwargs,
2392 ) -> int:
2437 ) -> int:
2393 """write ``data`` into ``filename`` in the working directory
2438 """write ``data`` into ``filename`` in the working directory
2394
2439
2395 This returns length of written (maybe decoded) data.
2440 This returns length of written (maybe decoded) data.
2396 """
2441 """
2397 data = self._filter(self._decodefilterpats, filename, data)
2442 data = self._filter(self._decodefilterpats, filename, data)
2398 if b'l' in flags:
2443 if b'l' in flags:
2399 self.wvfs.symlink(data, filename)
2444 self.wvfs.symlink(data, filename)
2400 else:
2445 else:
2401 self.wvfs.write(
2446 self.wvfs.write(
2402 filename, data, backgroundclose=backgroundclose, **kwargs
2447 filename, data, backgroundclose=backgroundclose, **kwargs
2403 )
2448 )
2404 if b'x' in flags:
2449 if b'x' in flags:
2405 self.wvfs.setflags(filename, False, True)
2450 self.wvfs.setflags(filename, False, True)
2406 else:
2451 else:
2407 self.wvfs.setflags(filename, False, False)
2452 self.wvfs.setflags(filename, False, False)
2408 return len(data)
2453 return len(data)
2409
2454
2410 def wwritedata(self, filename: bytes, data: bytes) -> bytes:
2455 def wwritedata(self, filename: bytes, data: bytes) -> bytes:
2411 return self._filter(self._decodefilterpats, filename, data)
2456 return self._filter(self._decodefilterpats, filename, data)
2412
2457
2413 def currenttransaction(self):
2458 def currenttransaction(self):
2414 """return the current transaction or None if non exists"""
2459 """return the current transaction or None if non exists"""
2415 if self._transref:
2460 if self._transref:
2416 tr = self._transref()
2461 tr = self._transref()
2417 else:
2462 else:
2418 tr = None
2463 tr = None
2419
2464
2420 if tr and tr.running():
2465 if tr and tr.running():
2421 return tr
2466 return tr
2422 return None
2467 return None
2423
2468
2424 def transaction(self, desc, report=None):
2469 def transaction(self, desc, report=None):
2425 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2470 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2426 b'devel', b'check-locks'
2471 b'devel', b'check-locks'
2427 ):
2472 ):
2428 if self._currentlock(self._lockref) is None:
2473 if self._currentlock(self._lockref) is None:
2429 raise error.ProgrammingError(b'transaction requires locking')
2474 raise error.ProgrammingError(b'transaction requires locking')
2430 tr = self.currenttransaction()
2475 tr = self.currenttransaction()
2431 if tr is not None:
2476 if tr is not None:
2432 return tr.nest(name=desc)
2477 return tr.nest(name=desc)
2433
2478
2434 # abort here if the journal already exists
2479 # abort here if the journal already exists
2435 if self.svfs.exists(b"journal"):
2480 if self.svfs.exists(b"journal"):
2436 raise error.RepoError(
2481 raise error.RepoError(
2437 _(b"abandoned transaction found"),
2482 _(b"abandoned transaction found"),
2438 hint=_(b"run 'hg recover' to clean up transaction"),
2483 hint=_(b"run 'hg recover' to clean up transaction"),
2439 )
2484 )
2440
2485
2441 # At that point your dirstate should be clean:
2486 # At that point your dirstate should be clean:
2442 #
2487 #
2443 # - If you don't have the wlock, why would you still have a dirty
2488 # - If you don't have the wlock, why would you still have a dirty
2444 # dirstate ?
2489 # dirstate ?
2445 #
2490 #
2446 # - If you hold the wlock, you should not be opening a transaction in
2491 # - If you hold the wlock, you should not be opening a transaction in
2447 # the middle of a `distate.changing_*` block. The transaction needs to
2492 # the middle of a `distate.changing_*` block. The transaction needs to
2448 # be open before that and wrap the change-context.
2493 # be open before that and wrap the change-context.
2449 #
2494 #
2450 # - If you are not within a `dirstate.changing_*` context, why is our
2495 # - If you are not within a `dirstate.changing_*` context, why is our
2451 # dirstate dirty?
2496 # dirstate dirty?
2452 if self.dirstate._dirty:
2497 if self.dirstate._dirty:
2453 m = "cannot open a transaction with a dirty dirstate"
2498 m = "cannot open a transaction with a dirty dirstate"
2454 raise error.ProgrammingError(m)
2499 raise error.ProgrammingError(m)
2455
2500
2456 idbase = b"%.40f#%f" % (random.random(), time.time())
2501 idbase = b"%.40f#%f" % (random.random(), time.time())
2457 ha = hex(hashutil.sha1(idbase).digest())
2502 ha = hex(hashutil.sha1(idbase).digest())
2458 txnid = b'TXN:' + ha
2503 txnid = b'TXN:' + ha
2459 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2504 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2460
2505
2461 self._writejournal(desc)
2506 self._writejournal(desc)
2462 if report:
2507 if report:
2463 rp = report
2508 rp = report
2464 else:
2509 else:
2465 rp = self.ui.warn
2510 rp = self.ui.warn
2466 vfsmap = self.vfs_map
2511 vfsmap = self.vfs_map
2467 # we must avoid cyclic reference between repo and transaction.
2512 # we must avoid cyclic reference between repo and transaction.
2468 reporef = weakref.ref(self)
2513 reporef = weakref.ref(self)
2469 # Code to track tag movement
2514 # Code to track tag movement
2470 #
2515 #
2471 # Since tags are all handled as file content, it is actually quite hard
2516 # Since tags are all handled as file content, it is actually quite hard
2472 # to track these movement from a code perspective. So we fallback to a
2517 # to track these movement from a code perspective. So we fallback to a
2473 # tracking at the repository level. One could envision to track changes
2518 # tracking at the repository level. One could envision to track changes
2474 # to the '.hgtags' file through changegroup apply but that fails to
2519 # to the '.hgtags' file through changegroup apply but that fails to
2475 # cope with case where transaction expose new heads without changegroup
2520 # cope with case where transaction expose new heads without changegroup
2476 # being involved (eg: phase movement).
2521 # being involved (eg: phase movement).
2477 #
2522 #
2478 # For now, We gate the feature behind a flag since this likely comes
2523 # For now, We gate the feature behind a flag since this likely comes
2479 # with performance impacts. The current code run more often than needed
2524 # with performance impacts. The current code run more often than needed
2480 # and do not use caches as much as it could. The current focus is on
2525 # and do not use caches as much as it could. The current focus is on
2481 # the behavior of the feature so we disable it by default. The flag
2526 # the behavior of the feature so we disable it by default. The flag
2482 # will be removed when we are happy with the performance impact.
2527 # will be removed when we are happy with the performance impact.
2483 #
2528 #
2484 # Once this feature is no longer experimental move the following
2529 # Once this feature is no longer experimental move the following
2485 # documentation to the appropriate help section:
2530 # documentation to the appropriate help section:
2486 #
2531 #
2487 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2532 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2488 # tags (new or changed or deleted tags). In addition the details of
2533 # tags (new or changed or deleted tags). In addition the details of
2489 # these changes are made available in a file at:
2534 # these changes are made available in a file at:
2490 # ``REPOROOT/.hg/changes/tags.changes``.
2535 # ``REPOROOT/.hg/changes/tags.changes``.
2491 # Make sure you check for HG_TAG_MOVED before reading that file as it
2536 # Make sure you check for HG_TAG_MOVED before reading that file as it
2492 # might exist from a previous transaction even if no tag were touched
2537 # might exist from a previous transaction even if no tag were touched
2493 # in this one. Changes are recorded in a line base format::
2538 # in this one. Changes are recorded in a line base format::
2494 #
2539 #
2495 # <action> <hex-node> <tag-name>\n
2540 # <action> <hex-node> <tag-name>\n
2496 #
2541 #
2497 # Actions are defined as follow:
2542 # Actions are defined as follow:
2498 # "-R": tag is removed,
2543 # "-R": tag is removed,
2499 # "+A": tag is added,
2544 # "+A": tag is added,
2500 # "-M": tag is moved (old value),
2545 # "-M": tag is moved (old value),
2501 # "+M": tag is moved (new value),
2546 # "+M": tag is moved (new value),
2502 tracktags = lambda x: None
2547 tracktags = lambda x: None
2503 # experimental config: experimental.hook-track-tags
2548 # experimental config: experimental.hook-track-tags
2504 shouldtracktags = self.ui.configbool(
2549 shouldtracktags = self.ui.configbool(
2505 b'experimental', b'hook-track-tags'
2550 b'experimental', b'hook-track-tags'
2506 )
2551 )
2507 if desc != b'strip' and shouldtracktags:
2552 if desc != b'strip' and shouldtracktags:
2508 oldheads = self.changelog.headrevs()
2553 oldheads = self.changelog.headrevs()
2509
2554
2510 def tracktags(tr2):
2555 def tracktags(tr2):
2511 repo = reporef()
2556 repo = reporef()
2512 assert repo is not None # help pytype
2557 assert repo is not None # help pytype
2513 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2558 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2514 newheads = repo.changelog.headrevs()
2559 newheads = repo.changelog.headrevs()
2515 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2560 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2516 # notes: we compare lists here.
2561 # notes: we compare lists here.
2517 # As we do it only once buiding set would not be cheaper
2562 # As we do it only once buiding set would not be cheaper
2518 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2563 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2519 if changes:
2564 if changes:
2520 tr2.hookargs[b'tag_moved'] = b'1'
2565 tr2.hookargs[b'tag_moved'] = b'1'
2521 with repo.vfs(
2566 with repo.vfs(
2522 b'changes/tags.changes', b'w', atomictemp=True
2567 b'changes/tags.changes', b'w', atomictemp=True
2523 ) as changesfile:
2568 ) as changesfile:
2524 # note: we do not register the file to the transaction
2569 # note: we do not register the file to the transaction
2525 # because we needs it to still exist on the transaction
2570 # because we needs it to still exist on the transaction
2526 # is close (for txnclose hooks)
2571 # is close (for txnclose hooks)
2527 tagsmod.writediff(changesfile, changes)
2572 tagsmod.writediff(changesfile, changes)
2528
2573
2529 def validate(tr2):
2574 def validate(tr2):
2530 """will run pre-closing hooks"""
2575 """will run pre-closing hooks"""
2531 # XXX the transaction API is a bit lacking here so we take a hacky
2576 # XXX the transaction API is a bit lacking here so we take a hacky
2532 # path for now
2577 # path for now
2533 #
2578 #
2534 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2579 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2535 # dict is copied before these run. In addition we needs the data
2580 # dict is copied before these run. In addition we needs the data
2536 # available to in memory hooks too.
2581 # available to in memory hooks too.
2537 #
2582 #
2538 # Moreover, we also need to make sure this runs before txnclose
2583 # Moreover, we also need to make sure this runs before txnclose
2539 # hooks and there is no "pending" mechanism that would execute
2584 # hooks and there is no "pending" mechanism that would execute
2540 # logic only if hooks are about to run.
2585 # logic only if hooks are about to run.
2541 #
2586 #
2542 # Fixing this limitation of the transaction is also needed to track
2587 # Fixing this limitation of the transaction is also needed to track
2543 # other families of changes (bookmarks, phases, obsolescence).
2588 # other families of changes (bookmarks, phases, obsolescence).
2544 #
2589 #
2545 # This will have to be fixed before we remove the experimental
2590 # This will have to be fixed before we remove the experimental
2546 # gating.
2591 # gating.
2547 tracktags(tr2)
2592 tracktags(tr2)
2548 repo = reporef()
2593 repo = reporef()
2549 assert repo is not None # help pytype
2594 assert repo is not None # help pytype
2550
2595
2551 singleheadopt = (b'experimental', b'single-head-per-branch')
2596 singleheadopt = (b'experimental', b'single-head-per-branch')
2552 singlehead = repo.ui.configbool(*singleheadopt)
2597 singlehead = repo.ui.configbool(*singleheadopt)
2553 if singlehead:
2598 if singlehead:
2554 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2599 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2555 accountclosed = singleheadsub.get(
2600 accountclosed = singleheadsub.get(
2556 b"account-closed-heads", False
2601 b"account-closed-heads", False
2557 )
2602 )
2558 if singleheadsub.get(b"public-changes-only", False):
2603 if singleheadsub.get(b"public-changes-only", False):
2559 filtername = b"immutable"
2604 filtername = b"immutable"
2560 else:
2605 else:
2561 filtername = b"visible"
2606 filtername = b"visible"
2562 scmutil.enforcesinglehead(
2607 scmutil.enforcesinglehead(
2563 repo, tr2, desc, accountclosed, filtername
2608 repo, tr2, desc, accountclosed, filtername
2564 )
2609 )
2565 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2610 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2566 for name, (old, new) in sorted(
2611 for name, (old, new) in sorted(
2567 tr.changes[b'bookmarks'].items()
2612 tr.changes[b'bookmarks'].items()
2568 ):
2613 ):
2569 args = tr.hookargs.copy()
2614 args = tr.hookargs.copy()
2570 args.update(bookmarks.preparehookargs(name, old, new))
2615 args.update(bookmarks.preparehookargs(name, old, new))
2571 repo.hook(
2616 repo.hook(
2572 b'pretxnclose-bookmark',
2617 b'pretxnclose-bookmark',
2573 throw=True,
2618 throw=True,
2574 **pycompat.strkwargs(args),
2619 **pycompat.strkwargs(args),
2575 )
2620 )
2576 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2621 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2577 cl = repo.unfiltered().changelog
2622 cl = repo.unfiltered().changelog
2578 for revs, (old, new) in tr.changes[b'phases']:
2623 for revs, (old, new) in tr.changes[b'phases']:
2579 for rev in revs:
2624 for rev in revs:
2580 args = tr.hookargs.copy()
2625 args = tr.hookargs.copy()
2581 node = hex(cl.node(rev))
2626 node = hex(cl.node(rev))
2582 args.update(phases.preparehookargs(node, old, new))
2627 args.update(phases.preparehookargs(node, old, new))
2583 repo.hook(
2628 repo.hook(
2584 b'pretxnclose-phase',
2629 b'pretxnclose-phase',
2585 throw=True,
2630 throw=True,
2586 **pycompat.strkwargs(args),
2631 **pycompat.strkwargs(args),
2587 )
2632 )
2588
2633
2589 repo.hook(
2634 repo.hook(
2590 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2635 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2591 )
2636 )
2592
2637
2593 def releasefn(tr, success):
2638 def releasefn(tr, success):
2594 repo = reporef()
2639 repo = reporef()
2595 if repo is None:
2640 if repo is None:
2596 # If the repo has been GC'd (and this release function is being
2641 # If the repo has been GC'd (and this release function is being
2597 # called from transaction.__del__), there's not much we can do,
2642 # called from transaction.__del__), there's not much we can do,
2598 # so just leave the unfinished transaction there and let the
2643 # so just leave the unfinished transaction there and let the
2599 # user run `hg recover`.
2644 # user run `hg recover`.
2600 return
2645 return
2601 if success:
2646 if success:
2602 # this should be explicitly invoked here, because
2647 # this should be explicitly invoked here, because
2603 # in-memory changes aren't written out at closing
2648 # in-memory changes aren't written out at closing
2604 # transaction, if tr.addfilegenerator (via
2649 # transaction, if tr.addfilegenerator (via
2605 # dirstate.write or so) isn't invoked while
2650 # dirstate.write or so) isn't invoked while
2606 # transaction running
2651 # transaction running
2607 repo.dirstate.write(None)
2652 repo.dirstate.write(None)
2608 else:
2653 else:
2609 # discard all changes (including ones already written
2654 # discard all changes (including ones already written
2610 # out) in this transaction
2655 # out) in this transaction
2611 repo.invalidate(clearfilecache=True)
2656 repo.invalidate(clearfilecache=True)
2612
2657
2613 tr = transaction.transaction(
2658 tr = transaction.transaction(
2614 rp,
2659 rp,
2615 self.svfs,
2660 self.svfs,
2616 vfsmap,
2661 vfsmap,
2617 b"journal",
2662 b"journal",
2618 b"undo",
2663 b"undo",
2619 lambda: None,
2664 lambda: None,
2620 self.store.createmode,
2665 self.store.createmode,
2621 validator=validate,
2666 validator=validate,
2622 releasefn=releasefn,
2667 releasefn=releasefn,
2623 checkambigfiles=_cachedfiles,
2668 checkambigfiles=_cachedfiles,
2624 name=desc,
2669 name=desc,
2625 )
2670 )
2626 for vfs_id, path in self._journalfiles():
2671 for vfs_id, path in self._journalfiles():
2627 tr.add_journal(vfs_id, path)
2672 tr.add_journal(vfs_id, path)
2628 tr.changes[b'origrepolen'] = len(self)
2673 tr.changes[b'origrepolen'] = len(self)
2629 tr.changes[b'obsmarkers'] = set()
2674 tr.changes[b'obsmarkers'] = set()
2630 tr.changes[b'phases'] = []
2675 tr.changes[b'phases'] = []
2631 tr.changes[b'bookmarks'] = {}
2676 tr.changes[b'bookmarks'] = {}
2632
2677
2633 tr.hookargs[b'txnid'] = txnid
2678 tr.hookargs[b'txnid'] = txnid
2634 tr.hookargs[b'txnname'] = desc
2679 tr.hookargs[b'txnname'] = desc
2635 tr.hookargs[b'changes'] = tr.changes
2680 tr.hookargs[b'changes'] = tr.changes
2636 # note: writing the fncache only during finalize mean that the file is
2681 # note: writing the fncache only during finalize mean that the file is
2637 # outdated when running hooks. As fncache is used for streaming clone,
2682 # outdated when running hooks. As fncache is used for streaming clone,
2638 # this is not expected to break anything that happen during the hooks.
2683 # this is not expected to break anything that happen during the hooks.
2639 tr.addfinalize(b'flush-fncache', self.store.write)
2684 tr.addfinalize(b'flush-fncache', self.store.write)
2640
2685
2641 def txnclosehook(tr2):
2686 def txnclosehook(tr2):
2642 """To be run if transaction is successful, will schedule a hook run"""
2687 """To be run if transaction is successful, will schedule a hook run"""
2643 # Don't reference tr2 in hook() so we don't hold a reference.
2688 # Don't reference tr2 in hook() so we don't hold a reference.
2644 # This reduces memory consumption when there are multiple
2689 # This reduces memory consumption when there are multiple
2645 # transactions per lock. This can likely go away if issue5045
2690 # transactions per lock. This can likely go away if issue5045
2646 # fixes the function accumulation.
2691 # fixes the function accumulation.
2647 hookargs = tr2.hookargs
2692 hookargs = tr2.hookargs
2648
2693
2649 def hookfunc(unused_success):
2694 def hookfunc(unused_success):
2650 repo = reporef()
2695 repo = reporef()
2651 assert repo is not None # help pytype
2696 assert repo is not None # help pytype
2652
2697
2653 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2698 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2654 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2699 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2655 for name, (old, new) in bmchanges:
2700 for name, (old, new) in bmchanges:
2656 args = tr.hookargs.copy()
2701 args = tr.hookargs.copy()
2657 args.update(bookmarks.preparehookargs(name, old, new))
2702 args.update(bookmarks.preparehookargs(name, old, new))
2658 repo.hook(
2703 repo.hook(
2659 b'txnclose-bookmark',
2704 b'txnclose-bookmark',
2660 throw=False,
2705 throw=False,
2661 **pycompat.strkwargs(args),
2706 **pycompat.strkwargs(args),
2662 )
2707 )
2663
2708
2664 if hook.hashook(repo.ui, b'txnclose-phase'):
2709 if hook.hashook(repo.ui, b'txnclose-phase'):
2665 cl = repo.unfiltered().changelog
2710 cl = repo.unfiltered().changelog
2666 phasemv = sorted(
2711 phasemv = sorted(
2667 tr.changes[b'phases'], key=lambda r: r[0][0]
2712 tr.changes[b'phases'], key=lambda r: r[0][0]
2668 )
2713 )
2669 for revs, (old, new) in phasemv:
2714 for revs, (old, new) in phasemv:
2670 for rev in revs:
2715 for rev in revs:
2671 args = tr.hookargs.copy()
2716 args = tr.hookargs.copy()
2672 node = hex(cl.node(rev))
2717 node = hex(cl.node(rev))
2673 args.update(phases.preparehookargs(node, old, new))
2718 args.update(phases.preparehookargs(node, old, new))
2674 repo.hook(
2719 repo.hook(
2675 b'txnclose-phase',
2720 b'txnclose-phase',
2676 throw=False,
2721 throw=False,
2677 **pycompat.strkwargs(args),
2722 **pycompat.strkwargs(args),
2678 )
2723 )
2679
2724
2680 repo.hook(
2725 repo.hook(
2681 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2726 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2682 )
2727 )
2683
2728
2684 repo = reporef()
2729 repo = reporef()
2685 assert repo is not None # help pytype
2730 assert repo is not None # help pytype
2686 repo._afterlock(hookfunc)
2731 repo._afterlock(hookfunc)
2687
2732
2688 tr.addfinalize(b'txnclose-hook', txnclosehook)
2733 tr.addfinalize(b'txnclose-hook', txnclosehook)
2689 # Include a leading "-" to make it happen before the transaction summary
2734 # Include a leading "-" to make it happen before the transaction summary
2690 # reports registered via scmutil.registersummarycallback() whose names
2735 # reports registered via scmutil.registersummarycallback() whose names
2691 # are 00-txnreport etc. That way, the caches will be warm when the
2736 # are 00-txnreport etc. That way, the caches will be warm when the
2692 # callbacks run.
2737 # callbacks run.
2693 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2738 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2694
2739
2695 def txnaborthook(tr2):
2740 def txnaborthook(tr2):
2696 """To be run if transaction is aborted"""
2741 """To be run if transaction is aborted"""
2697 repo = reporef()
2742 repo = reporef()
2698 assert repo is not None # help pytype
2743 assert repo is not None # help pytype
2699 repo.hook(
2744 repo.hook(
2700 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2745 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2701 )
2746 )
2702
2747
2703 tr.addabort(b'txnabort-hook', txnaborthook)
2748 tr.addabort(b'txnabort-hook', txnaborthook)
2704 # avoid eager cache invalidation. in-memory data should be identical
2749 # avoid eager cache invalidation. in-memory data should be identical
2705 # to stored data if transaction has no error.
2750 # to stored data if transaction has no error.
2706 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2751 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2707 self._transref = weakref.ref(tr)
2752 self._transref = weakref.ref(tr)
2708 scmutil.registersummarycallback(self, tr, desc)
2753 scmutil.registersummarycallback(self, tr, desc)
2709 # This only exist to deal with the need of rollback to have viable
2754 # This only exist to deal with the need of rollback to have viable
2710 # parents at the end of the operation. So backup viable parents at the
2755 # parents at the end of the operation. So backup viable parents at the
2711 # time of this operation.
2756 # time of this operation.
2712 #
2757 #
2713 # We only do it when the `wlock` is taken, otherwise other might be
2758 # We only do it when the `wlock` is taken, otherwise other might be
2714 # altering the dirstate under us.
2759 # altering the dirstate under us.
2715 #
2760 #
2716 # This is really not a great way to do this (first, because we cannot
2761 # This is really not a great way to do this (first, because we cannot
2717 # always do it). There are more viable alternative that exists
2762 # always do it). There are more viable alternative that exists
2718 #
2763 #
2719 # - backing only the working copy parent in a dedicated files and doing
2764 # - backing only the working copy parent in a dedicated files and doing
2720 # a clean "keep-update" to them on `hg rollback`.
2765 # a clean "keep-update" to them on `hg rollback`.
2721 #
2766 #
2722 # - slightly changing the behavior an applying a logic similar to "hg
2767 # - slightly changing the behavior an applying a logic similar to "hg
2723 # strip" to pick a working copy destination on `hg rollback`
2768 # strip" to pick a working copy destination on `hg rollback`
2724 if self.currentwlock() is not None:
2769 if self.currentwlock() is not None:
2725 ds = self.dirstate
2770 ds = self.dirstate
2726 if not self.vfs.exists(b'branch'):
2771 if not self.vfs.exists(b'branch'):
2727 # force a file to be written if None exist
2772 # force a file to be written if None exist
2728 ds.setbranch(b'default', None)
2773 ds.setbranch(b'default', None)
2729
2774
2730 def backup_dirstate(tr):
2775 def backup_dirstate(tr):
2731 for f in ds.all_file_names():
2776 for f in ds.all_file_names():
2732 # hardlink backup is okay because `dirstate` is always
2777 # hardlink backup is okay because `dirstate` is always
2733 # atomically written and possible data file are append only
2778 # atomically written and possible data file are append only
2734 # and resistant to trailing data.
2779 # and resistant to trailing data.
2735 tr.addbackup(f, hardlink=True, location=b'plain')
2780 tr.addbackup(f, hardlink=True, location=b'plain')
2736
2781
2737 tr.addvalidator(b'dirstate-backup', backup_dirstate)
2782 tr.addvalidator(b'dirstate-backup', backup_dirstate)
2738 return tr
2783 return tr
2739
2784
2740 def _journalfiles(self):
2785 def _journalfiles(self):
2741 return (
2786 return (
2742 (self.svfs, b'journal'),
2787 (self.svfs, b'journal'),
2743 (self.vfs, b'journal.desc'),
2788 (self.vfs, b'journal.desc'),
2744 )
2789 )
2745
2790
2746 def undofiles(self):
2791 def undofiles(self):
2747 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2792 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2748
2793
2749 @unfilteredmethod
2794 @unfilteredmethod
2750 def _writejournal(self, desc):
2795 def _writejournal(self, desc):
2751 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2796 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2752
2797
2753 def recover(self):
2798 def recover(self):
2754 with self.lock():
2799 with self.lock():
2755 if self.svfs.exists(b"journal"):
2800 if self.svfs.exists(b"journal"):
2756 self.ui.status(_(b"rolling back interrupted transaction\n"))
2801 self.ui.status(_(b"rolling back interrupted transaction\n"))
2757 vfsmap = self.vfs_map
2802 vfsmap = self.vfs_map
2758 transaction.rollback(
2803 transaction.rollback(
2759 self.svfs,
2804 self.svfs,
2760 vfsmap,
2805 vfsmap,
2761 b"journal",
2806 b"journal",
2762 self.ui.warn,
2807 self.ui.warn,
2763 checkambigfiles=_cachedfiles,
2808 checkambigfiles=_cachedfiles,
2764 )
2809 )
2765 self.invalidate()
2810 self.invalidate()
2766 return True
2811 return True
2767 else:
2812 else:
2768 self.ui.warn(_(b"no interrupted transaction available\n"))
2813 self.ui.warn(_(b"no interrupted transaction available\n"))
2769 return False
2814 return False
2770
2815
2771 def rollback(self, dryrun=False, force=False):
2816 def rollback(self, dryrun=False, force=False):
2772 wlock = lock = None
2817 wlock = lock = None
2773 try:
2818 try:
2774 wlock = self.wlock()
2819 wlock = self.wlock()
2775 lock = self.lock()
2820 lock = self.lock()
2776 if self.svfs.exists(b"undo"):
2821 if self.svfs.exists(b"undo"):
2777 return self._rollback(dryrun, force)
2822 return self._rollback(dryrun, force)
2778 else:
2823 else:
2779 self.ui.warn(_(b"no rollback information available\n"))
2824 self.ui.warn(_(b"no rollback information available\n"))
2780 return 1
2825 return 1
2781 finally:
2826 finally:
2782 release(lock, wlock)
2827 release(lock, wlock)
2783
2828
2784 @unfilteredmethod # Until we get smarter cache management
2829 @unfilteredmethod # Until we get smarter cache management
2785 def _rollback(self, dryrun, force):
2830 def _rollback(self, dryrun, force):
2786 ui = self.ui
2831 ui = self.ui
2787
2832
2788 parents = self.dirstate.parents()
2833 parents = self.dirstate.parents()
2789 try:
2834 try:
2790 args = self.vfs.read(b'undo.desc').splitlines()
2835 args = self.vfs.read(b'undo.desc').splitlines()
2791 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2836 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2792 if len(args) >= 3:
2837 if len(args) >= 3:
2793 detail = args[2]
2838 detail = args[2]
2794 oldtip = oldlen - 1
2839 oldtip = oldlen - 1
2795
2840
2796 if detail and ui.verbose:
2841 if detail and ui.verbose:
2797 msg = _(
2842 msg = _(
2798 b'repository tip rolled back to revision %d'
2843 b'repository tip rolled back to revision %d'
2799 b' (undo %s: %s)\n'
2844 b' (undo %s: %s)\n'
2800 ) % (oldtip, desc, detail)
2845 ) % (oldtip, desc, detail)
2801 else:
2846 else:
2802 msg = _(
2847 msg = _(
2803 b'repository tip rolled back to revision %d (undo %s)\n'
2848 b'repository tip rolled back to revision %d (undo %s)\n'
2804 ) % (oldtip, desc)
2849 ) % (oldtip, desc)
2805 parentgone = any(self[p].rev() > oldtip for p in parents)
2850 parentgone = any(self[p].rev() > oldtip for p in parents)
2806 except IOError:
2851 except IOError:
2807 msg = _(b'rolling back unknown transaction\n')
2852 msg = _(b'rolling back unknown transaction\n')
2808 desc = None
2853 desc = None
2809 parentgone = True
2854 parentgone = True
2810
2855
2811 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2856 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2812 raise error.Abort(
2857 raise error.Abort(
2813 _(
2858 _(
2814 b'rollback of last commit while not checked out '
2859 b'rollback of last commit while not checked out '
2815 b'may lose data'
2860 b'may lose data'
2816 ),
2861 ),
2817 hint=_(b'use -f to force'),
2862 hint=_(b'use -f to force'),
2818 )
2863 )
2819
2864
2820 ui.status(msg)
2865 ui.status(msg)
2821 if dryrun:
2866 if dryrun:
2822 return 0
2867 return 0
2823
2868
2824 self.destroying()
2869 self.destroying()
2825 vfsmap = self.vfs_map
2870 vfsmap = self.vfs_map
2826 skip_journal_pattern = None
2871 skip_journal_pattern = None
2827 if not parentgone:
2872 if not parentgone:
2828 skip_journal_pattern = RE_SKIP_DIRSTATE_ROLLBACK
2873 skip_journal_pattern = RE_SKIP_DIRSTATE_ROLLBACK
2829 transaction.rollback(
2874 transaction.rollback(
2830 self.svfs,
2875 self.svfs,
2831 vfsmap,
2876 vfsmap,
2832 b'undo',
2877 b'undo',
2833 ui.warn,
2878 ui.warn,
2834 checkambigfiles=_cachedfiles,
2879 checkambigfiles=_cachedfiles,
2835 skip_journal_pattern=skip_journal_pattern,
2880 skip_journal_pattern=skip_journal_pattern,
2836 )
2881 )
2837 self.invalidate()
2882 self.invalidate()
2838 self.dirstate.invalidate()
2883 self.dirstate.invalidate()
2839
2884
2840 if parentgone:
2885 if parentgone:
2841 # replace this with some explicit parent update in the future.
2886 # replace this with some explicit parent update in the future.
2842 has_node = self.changelog.index.has_node
2887 has_node = self.changelog.index.has_node
2843 if not all(has_node(p) for p in self.dirstate._pl):
2888 if not all(has_node(p) for p in self.dirstate._pl):
2844 # There was no dirstate to backup initially, we need to drop
2889 # There was no dirstate to backup initially, we need to drop
2845 # the existing one.
2890 # the existing one.
2846 with self.dirstate.changing_parents(self):
2891 with self.dirstate.changing_parents(self):
2847 self.dirstate.setparents(self.nullid)
2892 self.dirstate.setparents(self.nullid)
2848 self.dirstate.clear()
2893 self.dirstate.clear()
2849
2894
2850 parents = tuple([p.rev() for p in self[None].parents()])
2895 parents = tuple([p.rev() for p in self[None].parents()])
2851 if len(parents) > 1:
2896 if len(parents) > 1:
2852 ui.status(
2897 ui.status(
2853 _(
2898 _(
2854 b'working directory now based on '
2899 b'working directory now based on '
2855 b'revisions %d and %d\n'
2900 b'revisions %d and %d\n'
2856 )
2901 )
2857 % parents
2902 % parents
2858 )
2903 )
2859 else:
2904 else:
2860 ui.status(
2905 ui.status(
2861 _(b'working directory now based on revision %d\n') % parents
2906 _(b'working directory now based on revision %d\n') % parents
2862 )
2907 )
2863 mergestatemod.mergestate.clean(self)
2908 mergestatemod.mergestate.clean(self)
2864
2909
2865 # TODO: if we know which new heads may result from this rollback, pass
2910 # TODO: if we know which new heads may result from this rollback, pass
2866 # them to destroy(), which will prevent the branchhead cache from being
2911 # them to destroy(), which will prevent the branchhead cache from being
2867 # invalidated.
2912 # invalidated.
2868 self.destroyed()
2913 self.destroyed()
2869 return 0
2914 return 0
2870
2915
2871 def _buildcacheupdater(self, newtransaction):
2916 def _buildcacheupdater(self, newtransaction):
2872 """called during transaction to build the callback updating cache
2917 """called during transaction to build the callback updating cache
2873
2918
2874 Lives on the repository to help extension who might want to augment
2919 Lives on the repository to help extension who might want to augment
2875 this logic. For this purpose, the created transaction is passed to the
2920 this logic. For this purpose, the created transaction is passed to the
2876 method.
2921 method.
2877 """
2922 """
2878 # we must avoid cyclic reference between repo and transaction.
2923 # we must avoid cyclic reference between repo and transaction.
2879 reporef = weakref.ref(self)
2924 reporef = weakref.ref(self)
2880
2925
2881 def updater(tr):
2926 def updater(tr):
2882 repo = reporef()
2927 repo = reporef()
2883 assert repo is not None # help pytype
2928 assert repo is not None # help pytype
2884 repo.updatecaches(tr)
2929 repo.updatecaches(tr)
2885
2930
2886 return updater
2931 return updater
2887
2932
2888 @unfilteredmethod
2933 @unfilteredmethod
2889 def updatecaches(self, tr=None, full=False, caches=None):
2934 def updatecaches(self, tr=None, full=False, caches=None):
2890 """warm appropriate caches
2935 """warm appropriate caches
2891
2936
2892 If this function is called after a transaction closed. The transaction
2937 If this function is called after a transaction closed. The transaction
2893 will be available in the 'tr' argument. This can be used to selectively
2938 will be available in the 'tr' argument. This can be used to selectively
2894 update caches relevant to the changes in that transaction.
2939 update caches relevant to the changes in that transaction.
2895
2940
2896 If 'full' is set, make sure all caches the function knows about have
2941 If 'full' is set, make sure all caches the function knows about have
2897 up-to-date data. Even the ones usually loaded more lazily.
2942 up-to-date data. Even the ones usually loaded more lazily.
2898
2943
2899 The `full` argument can take a special "post-clone" value. In this case
2944 The `full` argument can take a special "post-clone" value. In this case
2900 the cache warming is made after a clone and of the slower cache might
2945 the cache warming is made after a clone and of the slower cache might
2901 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2946 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2902 as we plan for a cleaner way to deal with this for 5.9.
2947 as we plan for a cleaner way to deal with this for 5.9.
2903 """
2948 """
2904 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2949 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2905 # During strip, many caches are invalid but
2950 # During strip, many caches are invalid but
2906 # later call to `destroyed` will refresh them.
2951 # later call to `destroyed` will refresh them.
2907 return
2952 return
2908
2953
2909 unfi = self.unfiltered()
2954 unfi = self.unfiltered()
2910
2955
2911 if caches is None:
2956 if caches is None:
2912 caches = repository.CACHES_DEFAULT
2957 caches = repository.CACHES_DEFAULT
2913
2958
2914 if repository.CACHE_BRANCHMAP_SERVED in caches:
2959 if repository.CACHE_BRANCHMAP_SERVED in caches:
2915 if tr is None or tr.changes[b'origrepolen'] < len(self):
2960 if tr is None or tr.changes[b'origrepolen'] < len(self):
2916 self.ui.debug(b'updating the branch cache\n')
2961 self.ui.debug(b'updating the branch cache\n')
2917 dpt = repository.CACHE_BRANCHMAP_DETECT_PURE_TOPO in caches
2962 dpt = repository.CACHE_BRANCHMAP_DETECT_PURE_TOPO in caches
2918 served = self.filtered(b'served')
2963 served = self.filtered(b'served')
2919 self._branchcaches.update_disk(served, detect_pure_topo=dpt)
2964 self._branchcaches.update_disk(served, detect_pure_topo=dpt)
2920 served_hidden = self.filtered(b'served.hidden')
2965 served_hidden = self.filtered(b'served.hidden')
2921 self._branchcaches.update_disk(
2966 self._branchcaches.update_disk(
2922 served_hidden, detect_pure_topo=dpt
2967 served_hidden, detect_pure_topo=dpt
2923 )
2968 )
2924
2969
2925 if repository.CACHE_CHANGELOG_CACHE in caches:
2970 if repository.CACHE_CHANGELOG_CACHE in caches:
2926 self.changelog.update_caches(transaction=tr)
2971 self.changelog.update_caches(transaction=tr)
2927
2972
2928 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2973 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2929 self.manifestlog.update_caches(transaction=tr)
2974 self.manifestlog.update_caches(transaction=tr)
2930 for entry in self.store.walk():
2975 for entry in self.store.walk():
2931 if not entry.is_revlog:
2976 if not entry.is_revlog:
2932 continue
2977 continue
2933 if not entry.is_manifestlog:
2978 if not entry.is_manifestlog:
2934 continue
2979 continue
2935 manifestrevlog = entry.get_revlog_instance(self).get_revlog()
2980 manifestrevlog = entry.get_revlog_instance(self).get_revlog()
2936 if manifestrevlog is not None:
2981 if manifestrevlog is not None:
2937 manifestrevlog.update_caches(transaction=tr)
2982 manifestrevlog.update_caches(transaction=tr)
2938
2983
2939 if repository.CACHE_REV_BRANCH in caches:
2984 if repository.CACHE_REV_BRANCH in caches:
2940 rbc = unfi.revbranchcache()
2985 rbc = unfi.revbranchcache()
2941 for r in unfi.changelog:
2986 for r in unfi.changelog:
2942 rbc.branchinfo(r)
2987 rbc.branchinfo(r)
2943 rbc.write()
2988 rbc.write()
2944
2989
2945 if repository.CACHE_FULL_MANIFEST in caches:
2990 if repository.CACHE_FULL_MANIFEST in caches:
2946 # ensure the working copy parents are in the manifestfulltextcache
2991 # ensure the working copy parents are in the manifestfulltextcache
2947 for ctx in self[b'.'].parents():
2992 for ctx in self[b'.'].parents():
2948 ctx.manifest() # accessing the manifest is enough
2993 ctx.manifest() # accessing the manifest is enough
2949
2994
2950 if repository.CACHE_FILE_NODE_TAGS in caches:
2995 if repository.CACHE_FILE_NODE_TAGS in caches:
2951 # accessing fnode cache warms the cache
2996 # accessing fnode cache warms the cache
2952 tagsmod.warm_cache(self)
2997 tagsmod.warm_cache(self)
2953
2998
2954 if repository.CACHE_TAGS_DEFAULT in caches:
2999 if repository.CACHE_TAGS_DEFAULT in caches:
2955 # accessing tags warm the cache
3000 # accessing tags warm the cache
2956 self.tags()
3001 self.tags()
2957 if repository.CACHE_TAGS_SERVED in caches:
3002 if repository.CACHE_TAGS_SERVED in caches:
2958 self.filtered(b'served').tags()
3003 self.filtered(b'served').tags()
2959
3004
2960 if repository.CACHE_BRANCHMAP_ALL in caches:
3005 if repository.CACHE_BRANCHMAP_ALL in caches:
2961 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
3006 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2962 # so we're forcing a write to cause these caches to be warmed up
3007 # so we're forcing a write to cause these caches to be warmed up
2963 # even if they haven't explicitly been requested yet (if they've
3008 # even if they haven't explicitly been requested yet (if they've
2964 # never been used by hg, they won't ever have been written, even if
3009 # never been used by hg, they won't ever have been written, even if
2965 # they're a subset of another kind of cache that *has* been used).
3010 # they're a subset of another kind of cache that *has* been used).
2966 dpt = repository.CACHE_BRANCHMAP_DETECT_PURE_TOPO in caches
3011 dpt = repository.CACHE_BRANCHMAP_DETECT_PURE_TOPO in caches
2967
3012
2968 for filt in repoview.filtertable.keys():
3013 for filt in repoview.filtertable.keys():
2969 filtered = self.filtered(filt)
3014 filtered = self.filtered(filt)
2970 self._branchcaches.update_disk(filtered, detect_pure_topo=dpt)
3015 self._branchcaches.update_disk(filtered, detect_pure_topo=dpt)
2971
3016
2972 # flush all possibly delayed write.
3017 # flush all possibly delayed write.
2973 self._branchcaches.write_dirty(self)
3018 self._branchcaches.write_dirty(self)
2974
3019
2975 def invalidatecaches(self):
3020 def invalidatecaches(self):
2976 if '_tagscache' in vars(self):
3021 if '_tagscache' in vars(self):
2977 # can't use delattr on proxy
3022 # can't use delattr on proxy
2978 del self.__dict__['_tagscache']
3023 del self.__dict__['_tagscache']
2979
3024
2980 self._branchcaches.clear()
3025 self._branchcaches.clear()
2981 self.invalidatevolatilesets()
3026 self.invalidatevolatilesets()
2982 self._sparsesignaturecache.clear()
3027 self._sparsesignaturecache.clear()
2983
3028
2984 def invalidatevolatilesets(self):
3029 def invalidatevolatilesets(self):
2985 self.filteredrevcache.clear()
3030 self.filteredrevcache.clear()
2986 obsolete.clearobscaches(self)
3031 obsolete.clearobscaches(self)
2987 self._quick_access_changeid_invalidate()
3032 self._quick_access_changeid_invalidate()
2988
3033
2989 def invalidatedirstate(self):
3034 def invalidatedirstate(self):
2990 """Invalidates the dirstate, causing the next call to dirstate
3035 """Invalidates the dirstate, causing the next call to dirstate
2991 to check if it was modified since the last time it was read,
3036 to check if it was modified since the last time it was read,
2992 rereading it if it has.
3037 rereading it if it has.
2993
3038
2994 This is different to dirstate.invalidate() that it doesn't always
3039 This is different to dirstate.invalidate() that it doesn't always
2995 rereads the dirstate. Use dirstate.invalidate() if you want to
3040 rereads the dirstate. Use dirstate.invalidate() if you want to
2996 explicitly read the dirstate again (i.e. restoring it to a previous
3041 explicitly read the dirstate again (i.e. restoring it to a previous
2997 known good state)."""
3042 known good state)."""
2998 unfi = self.unfiltered()
3043 unfi = self.unfiltered()
2999 if 'dirstate' in unfi.__dict__:
3044 if 'dirstate' in unfi.__dict__:
3000 assert not self.dirstate.is_changing_any
3045 assert not self.dirstate.is_changing_any
3001 del unfi.__dict__['dirstate']
3046 del unfi.__dict__['dirstate']
3002
3047
3003 def invalidate(self, clearfilecache=False):
3048 def invalidate(self, clearfilecache=False):
3004 """Invalidates both store and non-store parts other than dirstate
3049 """Invalidates both store and non-store parts other than dirstate
3005
3050
3006 If a transaction is running, invalidation of store is omitted,
3051 If a transaction is running, invalidation of store is omitted,
3007 because discarding in-memory changes might cause inconsistency
3052 because discarding in-memory changes might cause inconsistency
3008 (e.g. incomplete fncache causes unintentional failure, but
3053 (e.g. incomplete fncache causes unintentional failure, but
3009 redundant one doesn't).
3054 redundant one doesn't).
3010 """
3055 """
3011 unfiltered = self.unfiltered() # all file caches are stored unfiltered
3056 unfiltered = self.unfiltered() # all file caches are stored unfiltered
3012 for k in list(self._filecache.keys()):
3057 for k in list(self._filecache.keys()):
3013 if (
3058 if (
3014 k == b'changelog'
3059 k == b'changelog'
3015 and self.currenttransaction()
3060 and self.currenttransaction()
3016 and self.changelog.is_delaying
3061 and self.changelog.is_delaying
3017 ):
3062 ):
3018 # The changelog object may store unwritten revisions. We don't
3063 # The changelog object may store unwritten revisions. We don't
3019 # want to lose them.
3064 # want to lose them.
3020 # TODO: Solve the problem instead of working around it.
3065 # TODO: Solve the problem instead of working around it.
3021 continue
3066 continue
3022
3067
3023 if clearfilecache:
3068 if clearfilecache:
3024 del self._filecache[k]
3069 del self._filecache[k]
3025 try:
3070 try:
3026 # XXX ideally, the key would be a unicode string to match the
3071 # XXX ideally, the key would be a unicode string to match the
3027 # fact it refers to an attribut name. However changing this was
3072 # fact it refers to an attribut name. However changing this was
3028 # a bit a scope creep compared to the series cleaning up
3073 # a bit a scope creep compared to the series cleaning up
3029 # del/set/getattr so we kept thing simple here.
3074 # del/set/getattr so we kept thing simple here.
3030 delattr(unfiltered, pycompat.sysstr(k))
3075 delattr(unfiltered, pycompat.sysstr(k))
3031 except AttributeError:
3076 except AttributeError:
3032 pass
3077 pass
3033 self.invalidatecaches()
3078 self.invalidatecaches()
3034 if not self.currenttransaction():
3079 if not self.currenttransaction():
3035 # TODO: Changing contents of store outside transaction
3080 # TODO: Changing contents of store outside transaction
3036 # causes inconsistency. We should make in-memory store
3081 # causes inconsistency. We should make in-memory store
3037 # changes detectable, and abort if changed.
3082 # changes detectable, and abort if changed.
3038 self.store.invalidatecaches()
3083 self.store.invalidatecaches()
3039
3084
3040 def invalidateall(self):
3085 def invalidateall(self):
3041 """Fully invalidates both store and non-store parts, causing the
3086 """Fully invalidates both store and non-store parts, causing the
3042 subsequent operation to reread any outside changes."""
3087 subsequent operation to reread any outside changes."""
3043 # extension should hook this to invalidate its caches
3088 # extension should hook this to invalidate its caches
3044 self.invalidate()
3089 self.invalidate()
3045 self.invalidatedirstate()
3090 self.invalidatedirstate()
3046
3091
3047 @unfilteredmethod
3092 @unfilteredmethod
3048 def _refreshfilecachestats(self, tr):
3093 def _refreshfilecachestats(self, tr):
3049 """Reload stats of cached files so that they are flagged as valid"""
3094 """Reload stats of cached files so that they are flagged as valid"""
3050 for k, ce in self._filecache.items():
3095 for k, ce in self._filecache.items():
3051 k = pycompat.sysstr(k)
3096 k = pycompat.sysstr(k)
3052 if k == 'dirstate' or k not in self.__dict__:
3097 if k == 'dirstate' or k not in self.__dict__:
3053 continue
3098 continue
3054 ce.refresh()
3099 ce.refresh()
3055
3100
3056 def _lock(
3101 def _lock(
3057 self,
3102 self,
3058 vfs,
3103 vfs,
3059 lockname,
3104 lockname,
3060 wait,
3105 wait,
3061 releasefn,
3106 releasefn,
3062 acquirefn,
3107 acquirefn,
3063 desc,
3108 desc,
3064 ):
3109 ):
3065 timeout = 0
3110 timeout = 0
3066 warntimeout = 0
3111 warntimeout = 0
3067 if wait:
3112 if wait:
3068 timeout = self.ui.configint(b"ui", b"timeout")
3113 timeout = self.ui.configint(b"ui", b"timeout")
3069 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
3114 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
3070 # internal config: ui.signal-safe-lock
3115 # internal config: ui.signal-safe-lock
3071 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
3116 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
3072 sync_file = self.ui.config(b'devel', b'lock-wait-sync-file')
3117 sync_file = self.ui.config(b'devel', b'lock-wait-sync-file')
3073 if not sync_file:
3118 if not sync_file:
3074 sync_file = None
3119 sync_file = None
3075
3120
3076 l = lockmod.trylock(
3121 l = lockmod.trylock(
3077 self.ui,
3122 self.ui,
3078 vfs,
3123 vfs,
3079 lockname,
3124 lockname,
3080 timeout,
3125 timeout,
3081 warntimeout,
3126 warntimeout,
3082 releasefn=releasefn,
3127 releasefn=releasefn,
3083 acquirefn=acquirefn,
3128 acquirefn=acquirefn,
3084 desc=desc,
3129 desc=desc,
3085 signalsafe=signalsafe,
3130 signalsafe=signalsafe,
3086 devel_wait_sync_file=sync_file,
3131 devel_wait_sync_file=sync_file,
3087 )
3132 )
3088 return l
3133 return l
3089
3134
3090 def _afterlock(self, callback):
3135 def _afterlock(self, callback):
3091 """add a callback to be run when the repository is fully unlocked
3136 """add a callback to be run when the repository is fully unlocked
3092
3137
3093 The callback will be executed when the outermost lock is released
3138 The callback will be executed when the outermost lock is released
3094 (with wlock being higher level than 'lock')."""
3139 (with wlock being higher level than 'lock')."""
3095 for ref in (self._wlockref, self._lockref):
3140 for ref in (self._wlockref, self._lockref):
3096 l = ref and ref()
3141 l = ref and ref()
3097 if l and l.held:
3142 if l and l.held:
3098 l.postrelease.append(callback)
3143 l.postrelease.append(callback)
3099 break
3144 break
3100 else: # no lock have been found.
3145 else: # no lock have been found.
3101 callback(True)
3146 callback(True)
3102
3147
3103 def lock(self, wait=True):
3148 def lock(self, wait=True):
3104 """Lock the repository store (.hg/store) and return a weak reference
3149 """Lock the repository store (.hg/store) and return a weak reference
3105 to the lock. Use this before modifying the store (e.g. committing or
3150 to the lock. Use this before modifying the store (e.g. committing or
3106 stripping). If you are opening a transaction, get a lock as well.)
3151 stripping). If you are opening a transaction, get a lock as well.)
3107
3152
3108 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3153 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3109 'wlock' first to avoid a dead-lock hazard."""
3154 'wlock' first to avoid a dead-lock hazard."""
3110 l = self._currentlock(self._lockref)
3155 l = self._currentlock(self._lockref)
3111 if l is not None:
3156 if l is not None:
3112 l.lock()
3157 l.lock()
3113 return l
3158 return l
3114
3159
3115 self.hook(b'prelock', throw=True)
3160 self.hook(b'prelock', throw=True)
3116 l = self._lock(
3161 l = self._lock(
3117 vfs=self.svfs,
3162 vfs=self.svfs,
3118 lockname=b"lock",
3163 lockname=b"lock",
3119 wait=wait,
3164 wait=wait,
3120 releasefn=None,
3165 releasefn=None,
3121 acquirefn=self.invalidate,
3166 acquirefn=self.invalidate,
3122 desc=_(b'repository %s') % self.origroot,
3167 desc=_(b'repository %s') % self.origroot,
3123 )
3168 )
3124 self._lockref = weakref.ref(l)
3169 self._lockref = weakref.ref(l)
3125 return l
3170 return l
3126
3171
3127 def wlock(self, wait=True):
3172 def wlock(self, wait=True):
3128 """Lock the non-store parts of the repository (everything under
3173 """Lock the non-store parts of the repository (everything under
3129 .hg except .hg/store) and return a weak reference to the lock.
3174 .hg except .hg/store) and return a weak reference to the lock.
3130
3175
3131 Use this before modifying files in .hg.
3176 Use this before modifying files in .hg.
3132
3177
3133 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3178 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3134 'wlock' first to avoid a dead-lock hazard."""
3179 'wlock' first to avoid a dead-lock hazard."""
3135 l = self._wlockref() if self._wlockref else None
3180 l = self._wlockref() if self._wlockref else None
3136 if l is not None and l.held:
3181 if l is not None and l.held:
3137 l.lock()
3182 l.lock()
3138 return l
3183 return l
3139
3184
3140 self.hook(b'prewlock', throw=True)
3185 self.hook(b'prewlock', throw=True)
3141 # We do not need to check for non-waiting lock acquisition. Such
3186 # We do not need to check for non-waiting lock acquisition. Such
3142 # acquisition would not cause dead-lock as they would just fail.
3187 # acquisition would not cause dead-lock as they would just fail.
3143 if wait and (
3188 if wait and (
3144 self.ui.configbool(b'devel', b'all-warnings')
3189 self.ui.configbool(b'devel', b'all-warnings')
3145 or self.ui.configbool(b'devel', b'check-locks')
3190 or self.ui.configbool(b'devel', b'check-locks')
3146 ):
3191 ):
3147 if self._currentlock(self._lockref) is not None:
3192 if self._currentlock(self._lockref) is not None:
3148 self.ui.develwarn(b'"wlock" acquired after "lock"')
3193 self.ui.develwarn(b'"wlock" acquired after "lock"')
3149
3194
3150 def unlock():
3195 def unlock():
3151 if self.dirstate.is_changing_any:
3196 if self.dirstate.is_changing_any:
3152 msg = b"wlock release in the middle of a changing parents"
3197 msg = b"wlock release in the middle of a changing parents"
3153 self.ui.develwarn(msg)
3198 self.ui.develwarn(msg)
3154 self.dirstate.invalidate()
3199 self.dirstate.invalidate()
3155 else:
3200 else:
3156 if self.dirstate._dirty:
3201 if self.dirstate._dirty:
3157 msg = b"dirty dirstate on wlock release"
3202 msg = b"dirty dirstate on wlock release"
3158 self.ui.develwarn(msg)
3203 self.ui.develwarn(msg)
3159 self.dirstate.write(None)
3204 self.dirstate.write(None)
3160
3205
3161 unfi = self.unfiltered()
3206 unfi = self.unfiltered()
3162 if 'dirstate' in unfi.__dict__:
3207 if 'dirstate' in unfi.__dict__:
3163 del unfi.__dict__['dirstate']
3208 del unfi.__dict__['dirstate']
3164
3209
3165 l = self._lock(
3210 l = self._lock(
3166 self.vfs,
3211 self.vfs,
3167 b"wlock",
3212 b"wlock",
3168 wait,
3213 wait,
3169 unlock,
3214 unlock,
3170 self.invalidatedirstate,
3215 self.invalidatedirstate,
3171 _(b'working directory of %s') % self.origroot,
3216 _(b'working directory of %s') % self.origroot,
3172 )
3217 )
3173 self._wlockref = weakref.ref(l)
3218 self._wlockref = weakref.ref(l)
3174 return l
3219 return l
3175
3220
3176 def _currentlock(self, lockref):
3221 def _currentlock(self, lockref):
3177 """Returns the lock if it's held, or None if it's not."""
3222 """Returns the lock if it's held, or None if it's not."""
3178 if lockref is None:
3223 if lockref is None:
3179 return None
3224 return None
3180 l = lockref()
3225 l = lockref()
3181 if l is None or not l.held:
3226 if l is None or not l.held:
3182 return None
3227 return None
3183 return l
3228 return l
3184
3229
3185 def currentwlock(self):
3230 def currentwlock(self):
3186 """Returns the wlock if it's held, or None if it's not."""
3231 """Returns the wlock if it's held, or None if it's not."""
3187 return self._currentlock(self._wlockref)
3232 return self._currentlock(self._wlockref)
3188
3233
3189 def currentlock(self):
3234 def currentlock(self):
3190 """Returns the lock if it's held, or None if it's not."""
3235 """Returns the lock if it's held, or None if it's not."""
3191 return self._currentlock(self._lockref)
3236 return self._currentlock(self._lockref)
3192
3237
3193 def checkcommitpatterns(self, wctx, match, status, fail):
3238 def checkcommitpatterns(self, wctx, match, status, fail):
3194 """check for commit arguments that aren't committable"""
3239 """check for commit arguments that aren't committable"""
3195 if match.isexact() or match.prefix():
3240 if match.isexact() or match.prefix():
3196 matched = set(status.modified + status.added + status.removed)
3241 matched = set(status.modified + status.added + status.removed)
3197
3242
3198 for f in match.files():
3243 for f in match.files():
3199 f = self.dirstate.normalize(f)
3244 f = self.dirstate.normalize(f)
3200 if f == b'.' or f in matched or f in wctx.substate:
3245 if f == b'.' or f in matched or f in wctx.substate:
3201 continue
3246 continue
3202 if f in status.deleted:
3247 if f in status.deleted:
3203 fail(f, _(b'file not found!'))
3248 fail(f, _(b'file not found!'))
3204 # Is it a directory that exists or used to exist?
3249 # Is it a directory that exists or used to exist?
3205 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3250 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3206 d = f + b'/'
3251 d = f + b'/'
3207 for mf in matched:
3252 for mf in matched:
3208 if mf.startswith(d):
3253 if mf.startswith(d):
3209 break
3254 break
3210 else:
3255 else:
3211 fail(f, _(b"no match under directory!"))
3256 fail(f, _(b"no match under directory!"))
3212 elif f not in self.dirstate:
3257 elif f not in self.dirstate:
3213 fail(f, _(b"file not tracked!"))
3258 fail(f, _(b"file not tracked!"))
3214
3259
3215 @unfilteredmethod
3260 @unfilteredmethod
3216 def commit(
3261 def commit(
3217 self,
3262 self,
3218 text=b"",
3263 text=b"",
3219 user=None,
3264 user=None,
3220 date=None,
3265 date=None,
3221 match=None,
3266 match=None,
3222 force=False,
3267 force=False,
3223 editor=None,
3268 editor=None,
3224 extra=None,
3269 extra=None,
3225 ):
3270 ):
3226 """Add a new revision to current repository.
3271 """Add a new revision to current repository.
3227
3272
3228 Revision information is gathered from the working directory,
3273 Revision information is gathered from the working directory,
3229 match can be used to filter the committed files. If editor is
3274 match can be used to filter the committed files. If editor is
3230 supplied, it is called to get a commit message.
3275 supplied, it is called to get a commit message.
3231 """
3276 """
3232 if extra is None:
3277 if extra is None:
3233 extra = {}
3278 extra = {}
3234
3279
3235 def fail(f, msg):
3280 def fail(f, msg):
3236 raise error.InputError(b'%s: %s' % (f, msg))
3281 raise error.InputError(b'%s: %s' % (f, msg))
3237
3282
3238 if not match:
3283 if not match:
3239 match = matchmod.always()
3284 match = matchmod.always()
3240
3285
3241 if not force:
3286 if not force:
3242 match.bad = fail
3287 match.bad = fail
3243
3288
3244 # lock() for recent changelog (see issue4368)
3289 # lock() for recent changelog (see issue4368)
3245 with self.wlock(), self.lock():
3290 with self.wlock(), self.lock():
3246 wctx = self[None]
3291 wctx = self[None]
3247 merge = len(wctx.parents()) > 1
3292 merge = len(wctx.parents()) > 1
3248
3293
3249 if not force and merge and not match.always():
3294 if not force and merge and not match.always():
3250 raise error.Abort(
3295 raise error.Abort(
3251 _(
3296 _(
3252 b'cannot partially commit a merge '
3297 b'cannot partially commit a merge '
3253 b'(do not specify files or patterns)'
3298 b'(do not specify files or patterns)'
3254 )
3299 )
3255 )
3300 )
3256
3301
3257 status = self.status(match=match, clean=force)
3302 status = self.status(match=match, clean=force)
3258 if force:
3303 if force:
3259 status.modified.extend(
3304 status.modified.extend(
3260 status.clean
3305 status.clean
3261 ) # mq may commit clean files
3306 ) # mq may commit clean files
3262
3307
3263 # check subrepos
3308 # check subrepos
3264 subs, commitsubs, newstate = subrepoutil.precommit(
3309 subs, commitsubs, newstate = subrepoutil.precommit(
3265 self.ui, wctx, status, match, force=force
3310 self.ui, wctx, status, match, force=force
3266 )
3311 )
3267
3312
3268 # make sure all explicit patterns are matched
3313 # make sure all explicit patterns are matched
3269 if not force:
3314 if not force:
3270 self.checkcommitpatterns(wctx, match, status, fail)
3315 self.checkcommitpatterns(wctx, match, status, fail)
3271
3316
3272 cctx = context.workingcommitctx(
3317 cctx = context.workingcommitctx(
3273 self, status, text, user, date, extra
3318 self, status, text, user, date, extra
3274 )
3319 )
3275
3320
3276 ms = mergestatemod.mergestate.read(self)
3321 ms = mergestatemod.mergestate.read(self)
3277 mergeutil.checkunresolved(ms)
3322 mergeutil.checkunresolved(ms)
3278
3323
3279 # internal config: ui.allowemptycommit
3324 # internal config: ui.allowemptycommit
3280 if cctx.isempty() and not self.ui.configbool(
3325 if cctx.isempty() and not self.ui.configbool(
3281 b'ui', b'allowemptycommit'
3326 b'ui', b'allowemptycommit'
3282 ):
3327 ):
3283 self.ui.debug(b'nothing to commit, clearing merge state\n')
3328 self.ui.debug(b'nothing to commit, clearing merge state\n')
3284 ms.reset()
3329 ms.reset()
3285 return None
3330 return None
3286
3331
3287 if merge and cctx.deleted():
3332 if merge and cctx.deleted():
3288 raise error.Abort(_(b"cannot commit merge with missing files"))
3333 raise error.Abort(_(b"cannot commit merge with missing files"))
3289
3334
3290 if editor:
3335 if editor:
3291 cctx._text = editor(self, cctx, subs)
3336 cctx._text = editor(self, cctx, subs)
3292 edited = text != cctx._text
3337 edited = text != cctx._text
3293
3338
3294 # Save commit message in case this transaction gets rolled back
3339 # Save commit message in case this transaction gets rolled back
3295 # (e.g. by a pretxncommit hook). Leave the content alone on
3340 # (e.g. by a pretxncommit hook). Leave the content alone on
3296 # the assumption that the user will use the same editor again.
3341 # the assumption that the user will use the same editor again.
3297 msg_path = self.savecommitmessage(cctx._text)
3342 msg_path = self.savecommitmessage(cctx._text)
3298
3343
3299 # commit subs and write new state
3344 # commit subs and write new state
3300 if subs:
3345 if subs:
3301 uipathfn = scmutil.getuipathfn(self)
3346 uipathfn = scmutil.getuipathfn(self)
3302 for s in sorted(commitsubs):
3347 for s in sorted(commitsubs):
3303 sub = wctx.sub(s)
3348 sub = wctx.sub(s)
3304 self.ui.status(
3349 self.ui.status(
3305 _(b'committing subrepository %s\n')
3350 _(b'committing subrepository %s\n')
3306 % uipathfn(subrepoutil.subrelpath(sub))
3351 % uipathfn(subrepoutil.subrelpath(sub))
3307 )
3352 )
3308 sr = sub.commit(cctx._text, user, date)
3353 sr = sub.commit(cctx._text, user, date)
3309 newstate[s] = (newstate[s][0], sr)
3354 newstate[s] = (newstate[s][0], sr)
3310 subrepoutil.writestate(self, newstate)
3355 subrepoutil.writestate(self, newstate)
3311
3356
3312 p1, p2 = self.dirstate.parents()
3357 p1, p2 = self.dirstate.parents()
3313 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3358 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3314 try:
3359 try:
3315 self.hook(
3360 self.hook(
3316 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3361 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3317 )
3362 )
3318 with self.transaction(b'commit'):
3363 with self.transaction(b'commit'):
3319 ret = self.commitctx(cctx, True)
3364 ret = self.commitctx(cctx, True)
3320 # update bookmarks, dirstate and mergestate
3365 # update bookmarks, dirstate and mergestate
3321 bookmarks.update(self, [p1, p2], ret)
3366 bookmarks.update(self, [p1, p2], ret)
3322 cctx.markcommitted(ret)
3367 cctx.markcommitted(ret)
3323 ms.reset()
3368 ms.reset()
3324 except: # re-raises
3369 except: # re-raises
3325 if edited:
3370 if edited:
3326 self.ui.write(
3371 self.ui.write(
3327 _(b'note: commit message saved in %s\n') % msg_path
3372 _(b'note: commit message saved in %s\n') % msg_path
3328 )
3373 )
3329 self.ui.write(
3374 self.ui.write(
3330 _(
3375 _(
3331 b"note: use 'hg commit --logfile "
3376 b"note: use 'hg commit --logfile "
3332 b"%s --edit' to reuse it\n"
3377 b"%s --edit' to reuse it\n"
3333 )
3378 )
3334 % msg_path
3379 % msg_path
3335 )
3380 )
3336 raise
3381 raise
3337
3382
3338 def commithook(unused_success):
3383 def commithook(unused_success):
3339 # hack for command that use a temporary commit (eg: histedit)
3384 # hack for command that use a temporary commit (eg: histedit)
3340 # temporary commit got stripped before hook release
3385 # temporary commit got stripped before hook release
3341 if self.changelog.hasnode(ret):
3386 if self.changelog.hasnode(ret):
3342 self.hook(
3387 self.hook(
3343 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3388 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3344 )
3389 )
3345
3390
3346 self._afterlock(commithook)
3391 self._afterlock(commithook)
3347 return ret
3392 return ret
3348
3393
3349 @unfilteredmethod
3394 @unfilteredmethod
3350 def commitctx(self, ctx, error=False, origctx=None):
3395 def commitctx(self, ctx, error=False, origctx=None):
3351 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3396 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3352
3397
3353 @unfilteredmethod
3398 @unfilteredmethod
3354 def destroying(self):
3399 def destroying(self):
3355 """Inform the repository that nodes are about to be destroyed.
3400 """Inform the repository that nodes are about to be destroyed.
3356 Intended for use by strip and rollback, so there's a common
3401 Intended for use by strip and rollback, so there's a common
3357 place for anything that has to be done before destroying history.
3402 place for anything that has to be done before destroying history.
3358
3403
3359 This is mostly useful for saving state that is in memory and waiting
3404 This is mostly useful for saving state that is in memory and waiting
3360 to be flushed when the current lock is released. Because a call to
3405 to be flushed when the current lock is released. Because a call to
3361 destroyed is imminent, the repo will be invalidated causing those
3406 destroyed is imminent, the repo will be invalidated causing those
3362 changes to stay in memory (waiting for the next unlock), or vanish
3407 changes to stay in memory (waiting for the next unlock), or vanish
3363 completely.
3408 completely.
3364 """
3409 """
3365 # When using the same lock to commit and strip, the phasecache is left
3410 # When using the same lock to commit and strip, the phasecache is left
3366 # dirty after committing. Then when we strip, the repo is invalidated,
3411 # dirty after committing. Then when we strip, the repo is invalidated,
3367 # causing those changes to disappear.
3412 # causing those changes to disappear.
3368 if '_phasecache' in vars(self):
3413 if '_phasecache' in vars(self):
3369 self._phasecache.write(self)
3414 self._phasecache.write(self)
3370
3415
3371 @unfilteredmethod
3416 @unfilteredmethod
3372 def destroyed(self):
3417 def destroyed(self):
3373 """Inform the repository that nodes have been destroyed.
3418 """Inform the repository that nodes have been destroyed.
3374 Intended for use by strip and rollback, so there's a common
3419 Intended for use by strip and rollback, so there's a common
3375 place for anything that has to be done after destroying history.
3420 place for anything that has to be done after destroying history.
3376 """
3421 """
3377 # refresh all repository caches
3422 # refresh all repository caches
3378 self.updatecaches()
3423 self.updatecaches()
3379
3424
3380 # Ensure the persistent tag cache is updated. Doing it now
3425 # Ensure the persistent tag cache is updated. Doing it now
3381 # means that the tag cache only has to worry about destroyed
3426 # means that the tag cache only has to worry about destroyed
3382 # heads immediately after a strip/rollback. That in turn
3427 # heads immediately after a strip/rollback. That in turn
3383 # guarantees that "cachetip == currenttip" (comparing both rev
3428 # guarantees that "cachetip == currenttip" (comparing both rev
3384 # and node) always means no nodes have been added or destroyed.
3429 # and node) always means no nodes have been added or destroyed.
3385
3430
3386 # XXX this is suboptimal when qrefresh'ing: we strip the current
3431 # XXX this is suboptimal when qrefresh'ing: we strip the current
3387 # head, refresh the tag cache, then immediately add a new head.
3432 # head, refresh the tag cache, then immediately add a new head.
3388 # But I think doing it this way is necessary for the "instant
3433 # But I think doing it this way is necessary for the "instant
3389 # tag cache retrieval" case to work.
3434 # tag cache retrieval" case to work.
3390 self.invalidate()
3435 self.invalidate()
3391
3436
3392 def status(
3437 def status(
3393 self,
3438 self,
3394 node1=b'.',
3439 node1=b'.',
3395 node2=None,
3440 node2=None,
3396 match=None,
3441 match=None,
3397 ignored=False,
3442 ignored=False,
3398 clean=False,
3443 clean=False,
3399 unknown=False,
3444 unknown=False,
3400 listsubrepos=False,
3445 listsubrepos=False,
3401 ):
3446 ):
3402 '''a convenience method that calls node1.status(node2)'''
3447 '''a convenience method that calls node1.status(node2)'''
3403 return self[node1].status(
3448 return self[node1].status(
3404 node2, match, ignored, clean, unknown, listsubrepos
3449 node2, match, ignored, clean, unknown, listsubrepos
3405 )
3450 )
3406
3451
3407 def addpostdsstatus(self, ps):
3452 def addpostdsstatus(self, ps):
3408 """Add a callback to run within the wlock, at the point at which status
3453 """Add a callback to run within the wlock, at the point at which status
3409 fixups happen.
3454 fixups happen.
3410
3455
3411 On status completion, callback(wctx, status) will be called with the
3456 On status completion, callback(wctx, status) will be called with the
3412 wlock held, unless the dirstate has changed from underneath or the wlock
3457 wlock held, unless the dirstate has changed from underneath or the wlock
3413 couldn't be grabbed.
3458 couldn't be grabbed.
3414
3459
3415 Callbacks should not capture and use a cached copy of the dirstate --
3460 Callbacks should not capture and use a cached copy of the dirstate --
3416 it might change in the meanwhile. Instead, they should access the
3461 it might change in the meanwhile. Instead, they should access the
3417 dirstate via wctx.repo().dirstate.
3462 dirstate via wctx.repo().dirstate.
3418
3463
3419 This list is emptied out after each status run -- extensions should
3464 This list is emptied out after each status run -- extensions should
3420 make sure it adds to this list each time dirstate.status is called.
3465 make sure it adds to this list each time dirstate.status is called.
3421 Extensions should also make sure they don't call this for statuses
3466 Extensions should also make sure they don't call this for statuses
3422 that don't involve the dirstate.
3467 that don't involve the dirstate.
3423 """
3468 """
3424
3469
3425 # The list is located here for uniqueness reasons -- it is actually
3470 # The list is located here for uniqueness reasons -- it is actually
3426 # managed by the workingctx, but that isn't unique per-repo.
3471 # managed by the workingctx, but that isn't unique per-repo.
3427 self._postdsstatus.append(ps)
3472 self._postdsstatus.append(ps)
3428
3473
3429 def postdsstatus(self):
3474 def postdsstatus(self):
3430 """Used by workingctx to get the list of post-dirstate-status hooks."""
3475 """Used by workingctx to get the list of post-dirstate-status hooks."""
3431 return self._postdsstatus
3476 return self._postdsstatus
3432
3477
3433 def clearpostdsstatus(self):
3478 def clearpostdsstatus(self):
3434 """Used by workingctx to clear post-dirstate-status hooks."""
3479 """Used by workingctx to clear post-dirstate-status hooks."""
3435 del self._postdsstatus[:]
3480 del self._postdsstatus[:]
3436
3481
3437 def heads(self, start=None):
3482 def heads(self, start=None):
3438 if start is None:
3483 if start is None:
3439 cl = self.changelog
3484 cl = self.changelog
3440 headrevs = reversed(cl.headrevs())
3485 headrevs = reversed(cl.headrevs())
3441 return [cl.node(rev) for rev in headrevs]
3486 return [cl.node(rev) for rev in headrevs]
3442
3487
3443 heads = self.changelog.heads(start)
3488 heads = self.changelog.heads(start)
3444 # sort the output in rev descending order
3489 # sort the output in rev descending order
3445 return sorted(heads, key=self.changelog.rev, reverse=True)
3490 return sorted(heads, key=self.changelog.rev, reverse=True)
3446
3491
3447 def branchheads(self, branch=None, start=None, closed=False):
3492 def branchheads(self, branch=None, start=None, closed=False):
3448 """return a (possibly filtered) list of heads for the given branch
3493 """return a (possibly filtered) list of heads for the given branch
3449
3494
3450 Heads are returned in topological order, from newest to oldest.
3495 Heads are returned in topological order, from newest to oldest.
3451 If branch is None, use the dirstate branch.
3496 If branch is None, use the dirstate branch.
3452 If start is not None, return only heads reachable from start.
3497 If start is not None, return only heads reachable from start.
3453 If closed is True, return heads that are marked as closed as well.
3498 If closed is True, return heads that are marked as closed as well.
3454 """
3499 """
3455 if branch is None:
3500 if branch is None:
3456 branch = self[None].branch()
3501 branch = self[None].branch()
3457 branches = self.branchmap()
3502 branches = self.branchmap()
3458 if not branches.hasbranch(branch):
3503 if not branches.hasbranch(branch):
3459 return []
3504 return []
3460 # the cache returns heads ordered lowest to highest
3505 # the cache returns heads ordered lowest to highest
3461 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3506 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3462 if start is not None:
3507 if start is not None:
3463 # filter out the heads that cannot be reached from startrev
3508 # filter out the heads that cannot be reached from startrev
3464 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3509 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3465 bheads = [h for h in bheads if h in fbheads]
3510 bheads = [h for h in bheads if h in fbheads]
3466 return bheads
3511 return bheads
3467
3512
3468 def branches(self, nodes):
3513 def branches(self, nodes):
3469 if not nodes:
3514 if not nodes:
3470 nodes = [self.changelog.tip()]
3515 nodes = [self.changelog.tip()]
3471 b = []
3516 b = []
3472 for n in nodes:
3517 for n in nodes:
3473 t = n
3518 t = n
3474 while True:
3519 while True:
3475 p = self.changelog.parents(n)
3520 p = self.changelog.parents(n)
3476 if p[1] != self.nullid or p[0] == self.nullid:
3521 if p[1] != self.nullid or p[0] == self.nullid:
3477 b.append((t, n, p[0], p[1]))
3522 b.append((t, n, p[0], p[1]))
3478 break
3523 break
3479 n = p[0]
3524 n = p[0]
3480 return b
3525 return b
3481
3526
3482 def between(self, pairs):
3527 def between(self, pairs):
3483 r = []
3528 r = []
3484
3529
3485 for top, bottom in pairs:
3530 for top, bottom in pairs:
3486 n, l, i = top, [], 0
3531 n, l, i = top, [], 0
3487 f = 1
3532 f = 1
3488
3533
3489 while n != bottom and n != self.nullid:
3534 while n != bottom and n != self.nullid:
3490 p = self.changelog.parents(n)[0]
3535 p = self.changelog.parents(n)[0]
3491 if i == f:
3536 if i == f:
3492 l.append(n)
3537 l.append(n)
3493 f = f * 2
3538 f = f * 2
3494 n = p
3539 n = p
3495 i += 1
3540 i += 1
3496
3541
3497 r.append(l)
3542 r.append(l)
3498
3543
3499 return r
3544 return r
3500
3545
3501 def checkpush(self, pushop):
3546 def checkpush(self, pushop):
3502 """Extensions can override this function if additional checks have
3547 """Extensions can override this function if additional checks have
3503 to be performed before pushing, or call it if they override push
3548 to be performed before pushing, or call it if they override push
3504 command.
3549 command.
3505 """
3550 """
3506
3551
3507 @unfilteredpropertycache
3552 @unfilteredpropertycache
3508 def prepushoutgoinghooks(self):
3553 def prepushoutgoinghooks(self):
3509 """Return util.hooks consists of a pushop with repo, remote, outgoing
3554 """Return util.hooks consists of a pushop with repo, remote, outgoing
3510 methods, which are called before pushing changesets.
3555 methods, which are called before pushing changesets.
3511 """
3556 """
3512 return util.hooks()
3557 return util.hooks()
3513
3558
3514 def pushkey(self, namespace, key, old, new):
3559 def pushkey(self, namespace, key, old, new):
3515 try:
3560 try:
3516 tr = self.currenttransaction()
3561 tr = self.currenttransaction()
3517 hookargs = {}
3562 hookargs = {}
3518 if tr is not None:
3563 if tr is not None:
3519 hookargs.update(tr.hookargs)
3564 hookargs.update(tr.hookargs)
3520 hookargs = pycompat.strkwargs(hookargs)
3565 hookargs = pycompat.strkwargs(hookargs)
3521 hookargs['namespace'] = namespace
3566 hookargs['namespace'] = namespace
3522 hookargs['key'] = key
3567 hookargs['key'] = key
3523 hookargs['old'] = old
3568 hookargs['old'] = old
3524 hookargs['new'] = new
3569 hookargs['new'] = new
3525 self.hook(b'prepushkey', throw=True, **hookargs)
3570 self.hook(b'prepushkey', throw=True, **hookargs)
3526 except error.HookAbort as exc:
3571 except error.HookAbort as exc:
3527 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3572 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3528 if exc.hint:
3573 if exc.hint:
3529 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3574 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3530 return False
3575 return False
3531 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3576 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3532 ret = pushkey.push(self, namespace, key, old, new)
3577 ret = pushkey.push(self, namespace, key, old, new)
3533
3578
3534 def runhook(unused_success):
3579 def runhook(unused_success):
3535 self.hook(
3580 self.hook(
3536 b'pushkey',
3581 b'pushkey',
3537 namespace=namespace,
3582 namespace=namespace,
3538 key=key,
3583 key=key,
3539 old=old,
3584 old=old,
3540 new=new,
3585 new=new,
3541 ret=ret,
3586 ret=ret,
3542 )
3587 )
3543
3588
3544 self._afterlock(runhook)
3589 self._afterlock(runhook)
3545 return ret
3590 return ret
3546
3591
3547 def listkeys(self, namespace):
3592 def listkeys(self, namespace):
3548 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3593 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3549 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3594 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3550 values = pushkey.list(self, namespace)
3595 values = pushkey.list(self, namespace)
3551 self.hook(b'listkeys', namespace=namespace, values=values)
3596 self.hook(b'listkeys', namespace=namespace, values=values)
3552 return values
3597 return values
3553
3598
3554 def debugwireargs(self, one, two, three=None, four=None, five=None):
3599 def debugwireargs(self, one, two, three=None, four=None, five=None):
3555 '''used to test argument passing over the wire'''
3600 '''used to test argument passing over the wire'''
3556 return b"%s %s %s %s %s" % (
3601 return b"%s %s %s %s %s" % (
3557 one,
3602 one,
3558 two,
3603 two,
3559 pycompat.bytestr(three),
3604 pycompat.bytestr(three),
3560 pycompat.bytestr(four),
3605 pycompat.bytestr(four),
3561 pycompat.bytestr(five),
3606 pycompat.bytestr(five),
3562 )
3607 )
3563
3608
3564 def savecommitmessage(self, text):
3609 def savecommitmessage(self, text):
3565 fp = self.vfs(b'last-message.txt', b'wb')
3610 fp = self.vfs(b'last-message.txt', b'wb')
3566 try:
3611 try:
3567 fp.write(text)
3612 fp.write(text)
3568 finally:
3613 finally:
3569 fp.close()
3614 fp.close()
3570 return self.pathto(fp.name[len(self.root) + 1 :])
3615 return self.pathto(fp.name[len(self.root) + 1 :])
3571
3616
3572 def register_wanted_sidedata(self, category):
3617 def register_wanted_sidedata(self, category):
3573 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3618 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3574 # Only revlogv2 repos can want sidedata.
3619 # Only revlogv2 repos can want sidedata.
3575 return
3620 return
3576 self._wanted_sidedata.add(pycompat.bytestr(category))
3621 self._wanted_sidedata.add(pycompat.bytestr(category))
3577
3622
3578 def register_sidedata_computer(
3623 def register_sidedata_computer(
3579 self, kind, category, keys, computer, flags, replace=False
3624 self, kind, category, keys, computer, flags, replace=False
3580 ):
3625 ):
3581 if kind not in revlogconst.ALL_KINDS:
3626 if kind not in revlogconst.ALL_KINDS:
3582 msg = _(b"unexpected revlog kind '%s'.")
3627 msg = _(b"unexpected revlog kind '%s'.")
3583 raise error.ProgrammingError(msg % kind)
3628 raise error.ProgrammingError(msg % kind)
3584 category = pycompat.bytestr(category)
3629 category = pycompat.bytestr(category)
3585 already_registered = category in self._sidedata_computers.get(kind, [])
3630 already_registered = category in self._sidedata_computers.get(kind, [])
3586 if already_registered and not replace:
3631 if already_registered and not replace:
3587 msg = _(
3632 msg = _(
3588 b"cannot register a sidedata computer twice for category '%s'."
3633 b"cannot register a sidedata computer twice for category '%s'."
3589 )
3634 )
3590 raise error.ProgrammingError(msg % category)
3635 raise error.ProgrammingError(msg % category)
3591 if replace and not already_registered:
3636 if replace and not already_registered:
3592 msg = _(
3637 msg = _(
3593 b"cannot replace a sidedata computer that isn't registered "
3638 b"cannot replace a sidedata computer that isn't registered "
3594 b"for category '%s'."
3639 b"for category '%s'."
3595 )
3640 )
3596 raise error.ProgrammingError(msg % category)
3641 raise error.ProgrammingError(msg % category)
3597 self._sidedata_computers.setdefault(kind, {})
3642 self._sidedata_computers.setdefault(kind, {})
3598 self._sidedata_computers[kind][category] = (keys, computer, flags)
3643 self._sidedata_computers[kind][category] = (keys, computer, flags)
3599
3644
3600
3645
3646 localrepository = interfaceutil.implementer(repository.ilocalrepositorymain)(
3647 LocalRepository
3648 )
3649
3650 if typing.TYPE_CHECKING:
3651 # Help pytype by hiding the interface stuff that confuses it.
3652 localrepository = LocalRepository
3653
3654
3601 def undoname(fn: bytes) -> bytes:
3655 def undoname(fn: bytes) -> bytes:
3602 base, name = os.path.split(fn)
3656 base, name = os.path.split(fn)
3603 assert name.startswith(b'journal')
3657 assert name.startswith(b'journal')
3604 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3658 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3605
3659
3606
3660
3607 def instance(ui, path: bytes, create, intents=None, createopts=None):
3661 def instance(ui, path: bytes, create, intents=None, createopts=None):
3608 # prevent cyclic import localrepo -> upgrade -> localrepo
3662 # prevent cyclic import localrepo -> upgrade -> localrepo
3609 from . import upgrade
3663 from . import upgrade
3610
3664
3611 localpath = urlutil.urllocalpath(path)
3665 localpath = urlutil.urllocalpath(path)
3612 if create:
3666 if create:
3613 createrepository(ui, localpath, createopts=createopts)
3667 createrepository(ui, localpath, createopts=createopts)
3614
3668
3615 def repo_maker():
3669 def repo_maker():
3616 return makelocalrepository(ui, localpath, intents=intents)
3670 return makelocalrepository(ui, localpath, intents=intents)
3617
3671
3618 repo = repo_maker()
3672 repo = repo_maker()
3619 repo = upgrade.may_auto_upgrade(repo, repo_maker)
3673 repo = upgrade.may_auto_upgrade(repo, repo_maker)
3620 return repo
3674 return repo
3621
3675
3622
3676
3623 def islocal(path: bytes) -> bool:
3677 def islocal(path: bytes) -> bool:
3624 return True
3678 return True
3625
3679
3626
3680
3627 def defaultcreateopts(ui, createopts=None):
3681 def defaultcreateopts(ui, createopts=None):
3628 """Populate the default creation options for a repository.
3682 """Populate the default creation options for a repository.
3629
3683
3630 A dictionary of explicitly requested creation options can be passed
3684 A dictionary of explicitly requested creation options can be passed
3631 in. Missing keys will be populated.
3685 in. Missing keys will be populated.
3632 """
3686 """
3633 createopts = dict(createopts or {})
3687 createopts = dict(createopts or {})
3634
3688
3635 if b'backend' not in createopts:
3689 if b'backend' not in createopts:
3636 # experimental config: storage.new-repo-backend
3690 # experimental config: storage.new-repo-backend
3637 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3691 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3638
3692
3639 return createopts
3693 return createopts
3640
3694
3641
3695
3642 def clone_requirements(ui, createopts, srcrepo):
3696 def clone_requirements(ui, createopts, srcrepo):
3643 """clone the requirements of a local repo for a local clone
3697 """clone the requirements of a local repo for a local clone
3644
3698
3645 The store requirements are unchanged while the working copy requirements
3699 The store requirements are unchanged while the working copy requirements
3646 depends on the configuration
3700 depends on the configuration
3647 """
3701 """
3648 target_requirements = set()
3702 target_requirements = set()
3649 if not srcrepo.requirements:
3703 if not srcrepo.requirements:
3650 # this is a legacy revlog "v0" repository, we cannot do anything fancy
3704 # this is a legacy revlog "v0" repository, we cannot do anything fancy
3651 # with it.
3705 # with it.
3652 return target_requirements
3706 return target_requirements
3653 createopts = defaultcreateopts(ui, createopts=createopts)
3707 createopts = defaultcreateopts(ui, createopts=createopts)
3654 for r in newreporequirements(ui, createopts):
3708 for r in newreporequirements(ui, createopts):
3655 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3709 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3656 target_requirements.add(r)
3710 target_requirements.add(r)
3657
3711
3658 for r in srcrepo.requirements:
3712 for r in srcrepo.requirements:
3659 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3713 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3660 target_requirements.add(r)
3714 target_requirements.add(r)
3661 return target_requirements
3715 return target_requirements
3662
3716
3663
3717
3664 def newreporequirements(ui, createopts):
3718 def newreporequirements(ui, createopts):
3665 """Determine the set of requirements for a new local repository.
3719 """Determine the set of requirements for a new local repository.
3666
3720
3667 Extensions can wrap this function to specify custom requirements for
3721 Extensions can wrap this function to specify custom requirements for
3668 new repositories.
3722 new repositories.
3669 """
3723 """
3670
3724
3671 if b'backend' not in createopts:
3725 if b'backend' not in createopts:
3672 raise error.ProgrammingError(
3726 raise error.ProgrammingError(
3673 b'backend key not present in createopts; '
3727 b'backend key not present in createopts; '
3674 b'was defaultcreateopts() called?'
3728 b'was defaultcreateopts() called?'
3675 )
3729 )
3676
3730
3677 if createopts[b'backend'] != b'revlogv1':
3731 if createopts[b'backend'] != b'revlogv1':
3678 raise error.Abort(
3732 raise error.Abort(
3679 _(
3733 _(
3680 b'unable to determine repository requirements for '
3734 b'unable to determine repository requirements for '
3681 b'storage backend: %s'
3735 b'storage backend: %s'
3682 )
3736 )
3683 % createopts[b'backend']
3737 % createopts[b'backend']
3684 )
3738 )
3685
3739
3686 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3740 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3687 if ui.configbool(b'format', b'usestore'):
3741 if ui.configbool(b'format', b'usestore'):
3688 requirements.add(requirementsmod.STORE_REQUIREMENT)
3742 requirements.add(requirementsmod.STORE_REQUIREMENT)
3689 if ui.configbool(b'format', b'usefncache'):
3743 if ui.configbool(b'format', b'usefncache'):
3690 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3744 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3691 if ui.configbool(b'format', b'dotencode'):
3745 if ui.configbool(b'format', b'dotencode'):
3692 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3746 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3693
3747
3694 compengines = ui.configlist(b'format', b'revlog-compression')
3748 compengines = ui.configlist(b'format', b'revlog-compression')
3695 for compengine in compengines:
3749 for compengine in compengines:
3696 if compengine in util.compengines:
3750 if compengine in util.compengines:
3697 engine = util.compengines[compengine]
3751 engine = util.compengines[compengine]
3698 if engine.available() and engine.revlogheader():
3752 if engine.available() and engine.revlogheader():
3699 break
3753 break
3700 else:
3754 else:
3701 raise error.Abort(
3755 raise error.Abort(
3702 _(
3756 _(
3703 b'compression engines %s defined by '
3757 b'compression engines %s defined by '
3704 b'format.revlog-compression not available'
3758 b'format.revlog-compression not available'
3705 )
3759 )
3706 % b', '.join(b'"%s"' % e for e in compengines),
3760 % b', '.join(b'"%s"' % e for e in compengines),
3707 hint=_(
3761 hint=_(
3708 b'run "hg debuginstall" to list available '
3762 b'run "hg debuginstall" to list available '
3709 b'compression engines'
3763 b'compression engines'
3710 ),
3764 ),
3711 )
3765 )
3712
3766
3713 # zlib is the historical default and doesn't need an explicit requirement.
3767 # zlib is the historical default and doesn't need an explicit requirement.
3714 if compengine == b'zstd':
3768 if compengine == b'zstd':
3715 requirements.add(b'revlog-compression-zstd')
3769 requirements.add(b'revlog-compression-zstd')
3716 elif compengine != b'zlib':
3770 elif compengine != b'zlib':
3717 requirements.add(b'exp-compression-%s' % compengine)
3771 requirements.add(b'exp-compression-%s' % compengine)
3718
3772
3719 if scmutil.gdinitconfig(ui):
3773 if scmutil.gdinitconfig(ui):
3720 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3774 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3721 if ui.configbool(b'format', b'sparse-revlog'):
3775 if ui.configbool(b'format', b'sparse-revlog'):
3722 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3776 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3723
3777
3724 # experimental config: format.use-dirstate-v2
3778 # experimental config: format.use-dirstate-v2
3725 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3779 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3726 if ui.configbool(b'format', b'use-dirstate-v2'):
3780 if ui.configbool(b'format', b'use-dirstate-v2'):
3727 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3781 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3728
3782
3729 # experimental config: format.exp-use-copies-side-data-changeset
3783 # experimental config: format.exp-use-copies-side-data-changeset
3730 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3784 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3731 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3785 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3732 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3786 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3733 if ui.configbool(b'experimental', b'treemanifest'):
3787 if ui.configbool(b'experimental', b'treemanifest'):
3734 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3788 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3735
3789
3736 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3790 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3737 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3791 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3738 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3792 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3739
3793
3740 revlogv2 = ui.config(b'experimental', b'revlogv2')
3794 revlogv2 = ui.config(b'experimental', b'revlogv2')
3741 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3795 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3742 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3796 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3743 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3797 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3744 # experimental config: format.internal-phase
3798 # experimental config: format.internal-phase
3745 if ui.configbool(b'format', b'use-internal-phase'):
3799 if ui.configbool(b'format', b'use-internal-phase'):
3746 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3800 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3747
3801
3748 # experimental config: format.exp-archived-phase
3802 # experimental config: format.exp-archived-phase
3749 if ui.configbool(b'format', b'exp-archived-phase'):
3803 if ui.configbool(b'format', b'exp-archived-phase'):
3750 requirements.add(requirementsmod.ARCHIVED_PHASE_REQUIREMENT)
3804 requirements.add(requirementsmod.ARCHIVED_PHASE_REQUIREMENT)
3751
3805
3752 if createopts.get(b'narrowfiles'):
3806 if createopts.get(b'narrowfiles'):
3753 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3807 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3754
3808
3755 if createopts.get(b'lfs'):
3809 if createopts.get(b'lfs'):
3756 requirements.add(b'lfs')
3810 requirements.add(b'lfs')
3757
3811
3758 if ui.configbool(b'format', b'bookmarks-in-store'):
3812 if ui.configbool(b'format', b'bookmarks-in-store'):
3759 requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3813 requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3760
3814
3761 # The feature is disabled unless a fast implementation is available.
3815 # The feature is disabled unless a fast implementation is available.
3762 persistent_nodemap_default = policy.importrust('revlog') is not None
3816 persistent_nodemap_default = policy.importrust('revlog') is not None
3763 if ui.configbool(
3817 if ui.configbool(
3764 b'format', b'use-persistent-nodemap', persistent_nodemap_default
3818 b'format', b'use-persistent-nodemap', persistent_nodemap_default
3765 ):
3819 ):
3766 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3820 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3767
3821
3768 # if share-safe is enabled, let's create the new repository with the new
3822 # if share-safe is enabled, let's create the new repository with the new
3769 # requirement
3823 # requirement
3770 if ui.configbool(b'format', b'use-share-safe'):
3824 if ui.configbool(b'format', b'use-share-safe'):
3771 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3825 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3772
3826
3773 # if we are creating a share-repoΒΉ we have to handle requirement
3827 # if we are creating a share-repoΒΉ we have to handle requirement
3774 # differently.
3828 # differently.
3775 #
3829 #
3776 # [1] (i.e. reusing the store from another repository, just having a
3830 # [1] (i.e. reusing the store from another repository, just having a
3777 # working copy)
3831 # working copy)
3778 if b'sharedrepo' in createopts:
3832 if b'sharedrepo' in createopts:
3779 source_requirements = set(createopts[b'sharedrepo'].requirements)
3833 source_requirements = set(createopts[b'sharedrepo'].requirements)
3780
3834
3781 if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements:
3835 if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements:
3782 # share to an old school repository, we have to copy the
3836 # share to an old school repository, we have to copy the
3783 # requirements and hope for the best.
3837 # requirements and hope for the best.
3784 requirements = source_requirements
3838 requirements = source_requirements
3785 else:
3839 else:
3786 # We have control on the working copy only, so "copy" the non
3840 # We have control on the working copy only, so "copy" the non
3787 # working copy part over, ignoring previous logic.
3841 # working copy part over, ignoring previous logic.
3788 to_drop = set()
3842 to_drop = set()
3789 for req in requirements:
3843 for req in requirements:
3790 if req in requirementsmod.WORKING_DIR_REQUIREMENTS:
3844 if req in requirementsmod.WORKING_DIR_REQUIREMENTS:
3791 continue
3845 continue
3792 if req in source_requirements:
3846 if req in source_requirements:
3793 continue
3847 continue
3794 to_drop.add(req)
3848 to_drop.add(req)
3795 requirements -= to_drop
3849 requirements -= to_drop
3796 requirements |= source_requirements
3850 requirements |= source_requirements
3797
3851
3798 if createopts.get(b'sharedrelative'):
3852 if createopts.get(b'sharedrelative'):
3799 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3853 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3800 else:
3854 else:
3801 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3855 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3802
3856
3803 if ui.configbool(b'format', b'use-dirstate-tracked-hint'):
3857 if ui.configbool(b'format', b'use-dirstate-tracked-hint'):
3804 version = ui.configint(b'format', b'use-dirstate-tracked-hint.version')
3858 version = ui.configint(b'format', b'use-dirstate-tracked-hint.version')
3805 msg = _(b"ignoring unknown tracked key version: %d\n")
3859 msg = _(b"ignoring unknown tracked key version: %d\n")
3806 hint = _(
3860 hint = _(
3807 b"see `hg help config.format.use-dirstate-tracked-hint-version"
3861 b"see `hg help config.format.use-dirstate-tracked-hint-version"
3808 )
3862 )
3809 if version != 1:
3863 if version != 1:
3810 ui.warn(msg % version, hint=hint)
3864 ui.warn(msg % version, hint=hint)
3811 else:
3865 else:
3812 requirements.add(requirementsmod.DIRSTATE_TRACKED_HINT_V1)
3866 requirements.add(requirementsmod.DIRSTATE_TRACKED_HINT_V1)
3813
3867
3814 return requirements
3868 return requirements
3815
3869
3816
3870
3817 def checkrequirementscompat(ui, requirements):
3871 def checkrequirementscompat(ui, requirements):
3818 """Checks compatibility of repository requirements enabled and disabled.
3872 """Checks compatibility of repository requirements enabled and disabled.
3819
3873
3820 Returns a set of requirements which needs to be dropped because dependend
3874 Returns a set of requirements which needs to be dropped because dependend
3821 requirements are not enabled. Also warns users about it"""
3875 requirements are not enabled. Also warns users about it"""
3822
3876
3823 dropped = set()
3877 dropped = set()
3824
3878
3825 if requirementsmod.STORE_REQUIREMENT not in requirements:
3879 if requirementsmod.STORE_REQUIREMENT not in requirements:
3826 if requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3880 if requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3827 ui.warn(
3881 ui.warn(
3828 _(
3882 _(
3829 b'ignoring enabled \'format.bookmarks-in-store\' config '
3883 b'ignoring enabled \'format.bookmarks-in-store\' config '
3830 b'beacuse it is incompatible with disabled '
3884 b'beacuse it is incompatible with disabled '
3831 b'\'format.usestore\' config\n'
3885 b'\'format.usestore\' config\n'
3832 )
3886 )
3833 )
3887 )
3834 dropped.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3888 dropped.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3835
3889
3836 if (
3890 if (
3837 requirementsmod.SHARED_REQUIREMENT in requirements
3891 requirementsmod.SHARED_REQUIREMENT in requirements
3838 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3892 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3839 ):
3893 ):
3840 raise error.Abort(
3894 raise error.Abort(
3841 _(
3895 _(
3842 b"cannot create shared repository as source was created"
3896 b"cannot create shared repository as source was created"
3843 b" with 'format.usestore' config disabled"
3897 b" with 'format.usestore' config disabled"
3844 )
3898 )
3845 )
3899 )
3846
3900
3847 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3901 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3848 if ui.hasconfig(b'format', b'use-share-safe'):
3902 if ui.hasconfig(b'format', b'use-share-safe'):
3849 msg = _(
3903 msg = _(
3850 b"ignoring enabled 'format.use-share-safe' config because "
3904 b"ignoring enabled 'format.use-share-safe' config because "
3851 b"it is incompatible with disabled 'format.usestore'"
3905 b"it is incompatible with disabled 'format.usestore'"
3852 b" config\n"
3906 b" config\n"
3853 )
3907 )
3854 ui.warn(msg)
3908 ui.warn(msg)
3855 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3909 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3856
3910
3857 return dropped
3911 return dropped
3858
3912
3859
3913
3860 def filterknowncreateopts(ui, createopts):
3914 def filterknowncreateopts(ui, createopts):
3861 """Filters a dict of repo creation options against options that are known.
3915 """Filters a dict of repo creation options against options that are known.
3862
3916
3863 Receives a dict of repo creation options and returns a dict of those
3917 Receives a dict of repo creation options and returns a dict of those
3864 options that we don't know how to handle.
3918 options that we don't know how to handle.
3865
3919
3866 This function is called as part of repository creation. If the
3920 This function is called as part of repository creation. If the
3867 returned dict contains any items, repository creation will not
3921 returned dict contains any items, repository creation will not
3868 be allowed, as it means there was a request to create a repository
3922 be allowed, as it means there was a request to create a repository
3869 with options not recognized by loaded code.
3923 with options not recognized by loaded code.
3870
3924
3871 Extensions can wrap this function to filter out creation options
3925 Extensions can wrap this function to filter out creation options
3872 they know how to handle.
3926 they know how to handle.
3873 """
3927 """
3874 known = {
3928 known = {
3875 b'backend',
3929 b'backend',
3876 b'lfs',
3930 b'lfs',
3877 b'narrowfiles',
3931 b'narrowfiles',
3878 b'sharedrepo',
3932 b'sharedrepo',
3879 b'sharedrelative',
3933 b'sharedrelative',
3880 b'shareditems',
3934 b'shareditems',
3881 b'shallowfilestore',
3935 b'shallowfilestore',
3882 }
3936 }
3883
3937
3884 return {k: v for k, v in createopts.items() if k not in known}
3938 return {k: v for k, v in createopts.items() if k not in known}
3885
3939
3886
3940
3887 def createrepository(ui, path: bytes, createopts=None, requirements=None):
3941 def createrepository(ui, path: bytes, createopts=None, requirements=None):
3888 """Create a new repository in a vfs.
3942 """Create a new repository in a vfs.
3889
3943
3890 ``path`` path to the new repo's working directory.
3944 ``path`` path to the new repo's working directory.
3891 ``createopts`` options for the new repository.
3945 ``createopts`` options for the new repository.
3892 ``requirement`` predefined set of requirements.
3946 ``requirement`` predefined set of requirements.
3893 (incompatible with ``createopts``)
3947 (incompatible with ``createopts``)
3894
3948
3895 The following keys for ``createopts`` are recognized:
3949 The following keys for ``createopts`` are recognized:
3896
3950
3897 backend
3951 backend
3898 The storage backend to use.
3952 The storage backend to use.
3899 lfs
3953 lfs
3900 Repository will be created with ``lfs`` requirement. The lfs extension
3954 Repository will be created with ``lfs`` requirement. The lfs extension
3901 will automatically be loaded when the repository is accessed.
3955 will automatically be loaded when the repository is accessed.
3902 narrowfiles
3956 narrowfiles
3903 Set up repository to support narrow file storage.
3957 Set up repository to support narrow file storage.
3904 sharedrepo
3958 sharedrepo
3905 Repository object from which storage should be shared.
3959 Repository object from which storage should be shared.
3906 sharedrelative
3960 sharedrelative
3907 Boolean indicating if the path to the shared repo should be
3961 Boolean indicating if the path to the shared repo should be
3908 stored as relative. By default, the pointer to the "parent" repo
3962 stored as relative. By default, the pointer to the "parent" repo
3909 is stored as an absolute path.
3963 is stored as an absolute path.
3910 shareditems
3964 shareditems
3911 Set of items to share to the new repository (in addition to storage).
3965 Set of items to share to the new repository (in addition to storage).
3912 shallowfilestore
3966 shallowfilestore
3913 Indicates that storage for files should be shallow (not all ancestor
3967 Indicates that storage for files should be shallow (not all ancestor
3914 revisions are known).
3968 revisions are known).
3915 """
3969 """
3916
3970
3917 if requirements is not None:
3971 if requirements is not None:
3918 if createopts is not None:
3972 if createopts is not None:
3919 msg = b'cannot specify both createopts and requirements'
3973 msg = b'cannot specify both createopts and requirements'
3920 raise error.ProgrammingError(msg)
3974 raise error.ProgrammingError(msg)
3921 createopts = {}
3975 createopts = {}
3922 else:
3976 else:
3923 createopts = defaultcreateopts(ui, createopts=createopts)
3977 createopts = defaultcreateopts(ui, createopts=createopts)
3924
3978
3925 unknownopts = filterknowncreateopts(ui, createopts)
3979 unknownopts = filterknowncreateopts(ui, createopts)
3926
3980
3927 if not isinstance(unknownopts, dict):
3981 if not isinstance(unknownopts, dict):
3928 raise error.ProgrammingError(
3982 raise error.ProgrammingError(
3929 b'filterknowncreateopts() did not return a dict'
3983 b'filterknowncreateopts() did not return a dict'
3930 )
3984 )
3931
3985
3932 if unknownopts:
3986 if unknownopts:
3933 raise error.Abort(
3987 raise error.Abort(
3934 _(
3988 _(
3935 b'unable to create repository because of unknown '
3989 b'unable to create repository because of unknown '
3936 b'creation option: %s'
3990 b'creation option: %s'
3937 )
3991 )
3938 % b', '.join(sorted(unknownopts)),
3992 % b', '.join(sorted(unknownopts)),
3939 hint=_(b'is a required extension not loaded?'),
3993 hint=_(b'is a required extension not loaded?'),
3940 )
3994 )
3941
3995
3942 requirements = newreporequirements(ui, createopts=createopts)
3996 requirements = newreporequirements(ui, createopts=createopts)
3943 requirements -= checkrequirementscompat(ui, requirements)
3997 requirements -= checkrequirementscompat(ui, requirements)
3944
3998
3945 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3999 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3946
4000
3947 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
4001 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3948 if hgvfs.exists():
4002 if hgvfs.exists():
3949 raise error.RepoError(_(b'repository %s already exists') % path)
4003 raise error.RepoError(_(b'repository %s already exists') % path)
3950
4004
3951 if b'sharedrepo' in createopts:
4005 if b'sharedrepo' in createopts:
3952 sharedpath = createopts[b'sharedrepo'].sharedpath
4006 sharedpath = createopts[b'sharedrepo'].sharedpath
3953
4007
3954 if createopts.get(b'sharedrelative'):
4008 if createopts.get(b'sharedrelative'):
3955 try:
4009 try:
3956 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
4010 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3957 sharedpath = util.pconvert(sharedpath)
4011 sharedpath = util.pconvert(sharedpath)
3958 except (IOError, ValueError) as e:
4012 except (IOError, ValueError) as e:
3959 # ValueError is raised on Windows if the drive letters differ
4013 # ValueError is raised on Windows if the drive letters differ
3960 # on each path.
4014 # on each path.
3961 raise error.Abort(
4015 raise error.Abort(
3962 _(b'cannot calculate relative path'),
4016 _(b'cannot calculate relative path'),
3963 hint=stringutil.forcebytestr(e),
4017 hint=stringutil.forcebytestr(e),
3964 )
4018 )
3965
4019
3966 if not wdirvfs.exists():
4020 if not wdirvfs.exists():
3967 wdirvfs.makedirs()
4021 wdirvfs.makedirs()
3968
4022
3969 hgvfs.makedir(notindexed=True)
4023 hgvfs.makedir(notindexed=True)
3970 if b'sharedrepo' not in createopts:
4024 if b'sharedrepo' not in createopts:
3971 hgvfs.mkdir(b'cache')
4025 hgvfs.mkdir(b'cache')
3972 hgvfs.mkdir(b'wcache')
4026 hgvfs.mkdir(b'wcache')
3973
4027
3974 has_store = requirementsmod.STORE_REQUIREMENT in requirements
4028 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3975 if has_store and b'sharedrepo' not in createopts:
4029 if has_store and b'sharedrepo' not in createopts:
3976 hgvfs.mkdir(b'store')
4030 hgvfs.mkdir(b'store')
3977
4031
3978 # We create an invalid changelog outside the store so very old
4032 # We create an invalid changelog outside the store so very old
3979 # Mercurial versions (which didn't know about the requirements
4033 # Mercurial versions (which didn't know about the requirements
3980 # file) encounter an error on reading the changelog. This
4034 # file) encounter an error on reading the changelog. This
3981 # effectively locks out old clients and prevents them from
4035 # effectively locks out old clients and prevents them from
3982 # mucking with a repo in an unknown format.
4036 # mucking with a repo in an unknown format.
3983 #
4037 #
3984 # The revlog header has version 65535, which won't be recognized by
4038 # The revlog header has version 65535, which won't be recognized by
3985 # such old clients.
4039 # such old clients.
3986 hgvfs.append(
4040 hgvfs.append(
3987 b'00changelog.i',
4041 b'00changelog.i',
3988 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
4042 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3989 b'layout',
4043 b'layout',
3990 )
4044 )
3991
4045
3992 # Filter the requirements into working copy and store ones
4046 # Filter the requirements into working copy and store ones
3993 wcreq, storereq = scmutil.filterrequirements(requirements)
4047 wcreq, storereq = scmutil.filterrequirements(requirements)
3994 # write working copy ones
4048 # write working copy ones
3995 scmutil.writerequires(hgvfs, wcreq)
4049 scmutil.writerequires(hgvfs, wcreq)
3996 # If there are store requirements and the current repository
4050 # If there are store requirements and the current repository
3997 # is not a shared one, write stored requirements
4051 # is not a shared one, write stored requirements
3998 # For new shared repository, we don't need to write the store
4052 # For new shared repository, we don't need to write the store
3999 # requirements as they are already present in store requires
4053 # requirements as they are already present in store requires
4000 if storereq and b'sharedrepo' not in createopts:
4054 if storereq and b'sharedrepo' not in createopts:
4001 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
4055 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
4002 scmutil.writerequires(storevfs, storereq)
4056 scmutil.writerequires(storevfs, storereq)
4003
4057
4004 # Write out file telling readers where to find the shared store.
4058 # Write out file telling readers where to find the shared store.
4005 if b'sharedrepo' in createopts:
4059 if b'sharedrepo' in createopts:
4006 hgvfs.write(b'sharedpath', sharedpath)
4060 hgvfs.write(b'sharedpath', sharedpath)
4007
4061
4008 if createopts.get(b'shareditems'):
4062 if createopts.get(b'shareditems'):
4009 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
4063 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
4010 hgvfs.write(b'shared', shared)
4064 hgvfs.write(b'shared', shared)
4011
4065
4012
4066
4013 def poisonrepository(repo):
4067 def poisonrepository(repo):
4014 """Poison a repository instance so it can no longer be used."""
4068 """Poison a repository instance so it can no longer be used."""
4015 # Perform any cleanup on the instance.
4069 # Perform any cleanup on the instance.
4016 repo.close()
4070 repo.close()
4017
4071
4018 # Our strategy is to replace the type of the object with one that
4072 # Our strategy is to replace the type of the object with one that
4019 # has all attribute lookups result in error.
4073 # has all attribute lookups result in error.
4020 #
4074 #
4021 # But we have to allow the close() method because some constructors
4075 # But we have to allow the close() method because some constructors
4022 # of repos call close() on repo references.
4076 # of repos call close() on repo references.
4023 class poisonedrepository:
4077 class poisonedrepository:
4024 def __getattribute__(self, item):
4078 def __getattribute__(self, item):
4025 if item == 'close':
4079 if item == 'close':
4026 return object.__getattribute__(self, item)
4080 return object.__getattribute__(self, item)
4027
4081
4028 raise error.ProgrammingError(
4082 raise error.ProgrammingError(
4029 b'repo instances should not be used after unshare'
4083 b'repo instances should not be used after unshare'
4030 )
4084 )
4031
4085
4032 def close(self):
4086 def close(self):
4033 pass
4087 pass
4034
4088
4035 # We may have a repoview, which intercepts __setattr__. So be sure
4089 # We may have a repoview, which intercepts __setattr__. So be sure
4036 # we operate at the lowest level possible.
4090 # we operate at the lowest level possible.
4037 object.__setattr__(repo, '__class__', poisonedrepository)
4091 object.__setattr__(repo, '__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now