##// END OF EJS Templates
branch: pass current transaction when writing branch for transaction backup...
marmoute -
r51160:240a04ce default
parent child Browse files
Show More
@@ -1,3999 +1,3999
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 # coding: utf-8
2 # coding: utf-8
3 #
3 #
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9
9
10 import functools
10 import functools
11 import os
11 import os
12 import random
12 import random
13 import re
13 import re
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from concurrent import futures
18 from concurrent import futures
19 from typing import (
19 from typing import (
20 Optional,
20 Optional,
21 )
21 )
22
22
23 from .i18n import _
23 from .i18n import _
24 from .node import (
24 from .node import (
25 bin,
25 bin,
26 hex,
26 hex,
27 nullrev,
27 nullrev,
28 sha1nodeconstants,
28 sha1nodeconstants,
29 short,
29 short,
30 )
30 )
31 from .pycompat import (
31 from .pycompat import (
32 delattr,
32 delattr,
33 getattr,
33 getattr,
34 )
34 )
35 from . import (
35 from . import (
36 bookmarks,
36 bookmarks,
37 branchmap,
37 branchmap,
38 bundle2,
38 bundle2,
39 bundlecaches,
39 bundlecaches,
40 changegroup,
40 changegroup,
41 color,
41 color,
42 commit,
42 commit,
43 context,
43 context,
44 dirstate,
44 dirstate,
45 discovery,
45 discovery,
46 encoding,
46 encoding,
47 error,
47 error,
48 exchange,
48 exchange,
49 extensions,
49 extensions,
50 filelog,
50 filelog,
51 hook,
51 hook,
52 lock as lockmod,
52 lock as lockmod,
53 match as matchmod,
53 match as matchmod,
54 mergestate as mergestatemod,
54 mergestate as mergestatemod,
55 mergeutil,
55 mergeutil,
56 namespaces,
56 namespaces,
57 narrowspec,
57 narrowspec,
58 obsolete,
58 obsolete,
59 pathutil,
59 pathutil,
60 phases,
60 phases,
61 pushkey,
61 pushkey,
62 pycompat,
62 pycompat,
63 rcutil,
63 rcutil,
64 repoview,
64 repoview,
65 requirements as requirementsmod,
65 requirements as requirementsmod,
66 revlog,
66 revlog,
67 revset,
67 revset,
68 revsetlang,
68 revsetlang,
69 scmutil,
69 scmutil,
70 sparse,
70 sparse,
71 store as storemod,
71 store as storemod,
72 subrepoutil,
72 subrepoutil,
73 tags as tagsmod,
73 tags as tagsmod,
74 transaction,
74 transaction,
75 txnutil,
75 txnutil,
76 util,
76 util,
77 vfs as vfsmod,
77 vfs as vfsmod,
78 wireprototypes,
78 wireprototypes,
79 )
79 )
80
80
81 from .interfaces import (
81 from .interfaces import (
82 repository,
82 repository,
83 util as interfaceutil,
83 util as interfaceutil,
84 )
84 )
85
85
86 from .utils import (
86 from .utils import (
87 hashutil,
87 hashutil,
88 procutil,
88 procutil,
89 stringutil,
89 stringutil,
90 urlutil,
90 urlutil,
91 )
91 )
92
92
93 from .revlogutils import (
93 from .revlogutils import (
94 concurrency_checker as revlogchecker,
94 concurrency_checker as revlogchecker,
95 constants as revlogconst,
95 constants as revlogconst,
96 sidedata as sidedatamod,
96 sidedata as sidedatamod,
97 )
97 )
98
98
99 release = lockmod.release
99 release = lockmod.release
100 urlerr = util.urlerr
100 urlerr = util.urlerr
101 urlreq = util.urlreq
101 urlreq = util.urlreq
102
102
103 RE_SKIP_DIRSTATE_ROLLBACK = re.compile(
103 RE_SKIP_DIRSTATE_ROLLBACK = re.compile(
104 b"^((dirstate|narrowspec.dirstate).*|branch$)"
104 b"^((dirstate|narrowspec.dirstate).*|branch$)"
105 )
105 )
106
106
107 # set of (path, vfs-location) tuples. vfs-location is:
107 # set of (path, vfs-location) tuples. vfs-location is:
108 # - 'plain for vfs relative paths
108 # - 'plain for vfs relative paths
109 # - '' for svfs relative paths
109 # - '' for svfs relative paths
110 _cachedfiles = set()
110 _cachedfiles = set()
111
111
112
112
113 class _basefilecache(scmutil.filecache):
113 class _basefilecache(scmutil.filecache):
114 """All filecache usage on repo are done for logic that should be unfiltered"""
114 """All filecache usage on repo are done for logic that should be unfiltered"""
115
115
116 def __get__(self, repo, type=None):
116 def __get__(self, repo, type=None):
117 if repo is None:
117 if repo is None:
118 return self
118 return self
119 # proxy to unfiltered __dict__ since filtered repo has no entry
119 # proxy to unfiltered __dict__ since filtered repo has no entry
120 unfi = repo.unfiltered()
120 unfi = repo.unfiltered()
121 try:
121 try:
122 return unfi.__dict__[self.sname]
122 return unfi.__dict__[self.sname]
123 except KeyError:
123 except KeyError:
124 pass
124 pass
125 return super(_basefilecache, self).__get__(unfi, type)
125 return super(_basefilecache, self).__get__(unfi, type)
126
126
127 def set(self, repo, value):
127 def set(self, repo, value):
128 return super(_basefilecache, self).set(repo.unfiltered(), value)
128 return super(_basefilecache, self).set(repo.unfiltered(), value)
129
129
130
130
131 class repofilecache(_basefilecache):
131 class repofilecache(_basefilecache):
132 """filecache for files in .hg but outside of .hg/store"""
132 """filecache for files in .hg but outside of .hg/store"""
133
133
134 def __init__(self, *paths):
134 def __init__(self, *paths):
135 super(repofilecache, self).__init__(*paths)
135 super(repofilecache, self).__init__(*paths)
136 for path in paths:
136 for path in paths:
137 _cachedfiles.add((path, b'plain'))
137 _cachedfiles.add((path, b'plain'))
138
138
139 def join(self, obj, fname):
139 def join(self, obj, fname):
140 return obj.vfs.join(fname)
140 return obj.vfs.join(fname)
141
141
142
142
143 class storecache(_basefilecache):
143 class storecache(_basefilecache):
144 """filecache for files in the store"""
144 """filecache for files in the store"""
145
145
146 def __init__(self, *paths):
146 def __init__(self, *paths):
147 super(storecache, self).__init__(*paths)
147 super(storecache, self).__init__(*paths)
148 for path in paths:
148 for path in paths:
149 _cachedfiles.add((path, b''))
149 _cachedfiles.add((path, b''))
150
150
151 def join(self, obj, fname):
151 def join(self, obj, fname):
152 return obj.sjoin(fname)
152 return obj.sjoin(fname)
153
153
154
154
155 class changelogcache(storecache):
155 class changelogcache(storecache):
156 """filecache for the changelog"""
156 """filecache for the changelog"""
157
157
158 def __init__(self):
158 def __init__(self):
159 super(changelogcache, self).__init__()
159 super(changelogcache, self).__init__()
160 _cachedfiles.add((b'00changelog.i', b''))
160 _cachedfiles.add((b'00changelog.i', b''))
161 _cachedfiles.add((b'00changelog.n', b''))
161 _cachedfiles.add((b'00changelog.n', b''))
162
162
163 def tracked_paths(self, obj):
163 def tracked_paths(self, obj):
164 paths = [self.join(obj, b'00changelog.i')]
164 paths = [self.join(obj, b'00changelog.i')]
165 if obj.store.opener.options.get(b'persistent-nodemap', False):
165 if obj.store.opener.options.get(b'persistent-nodemap', False):
166 paths.append(self.join(obj, b'00changelog.n'))
166 paths.append(self.join(obj, b'00changelog.n'))
167 return paths
167 return paths
168
168
169
169
170 class manifestlogcache(storecache):
170 class manifestlogcache(storecache):
171 """filecache for the manifestlog"""
171 """filecache for the manifestlog"""
172
172
173 def __init__(self):
173 def __init__(self):
174 super(manifestlogcache, self).__init__()
174 super(manifestlogcache, self).__init__()
175 _cachedfiles.add((b'00manifest.i', b''))
175 _cachedfiles.add((b'00manifest.i', b''))
176 _cachedfiles.add((b'00manifest.n', b''))
176 _cachedfiles.add((b'00manifest.n', b''))
177
177
178 def tracked_paths(self, obj):
178 def tracked_paths(self, obj):
179 paths = [self.join(obj, b'00manifest.i')]
179 paths = [self.join(obj, b'00manifest.i')]
180 if obj.store.opener.options.get(b'persistent-nodemap', False):
180 if obj.store.opener.options.get(b'persistent-nodemap', False):
181 paths.append(self.join(obj, b'00manifest.n'))
181 paths.append(self.join(obj, b'00manifest.n'))
182 return paths
182 return paths
183
183
184
184
185 class mixedrepostorecache(_basefilecache):
185 class mixedrepostorecache(_basefilecache):
186 """filecache for a mix files in .hg/store and outside"""
186 """filecache for a mix files in .hg/store and outside"""
187
187
188 def __init__(self, *pathsandlocations):
188 def __init__(self, *pathsandlocations):
189 # scmutil.filecache only uses the path for passing back into our
189 # scmutil.filecache only uses the path for passing back into our
190 # join(), so we can safely pass a list of paths and locations
190 # join(), so we can safely pass a list of paths and locations
191 super(mixedrepostorecache, self).__init__(*pathsandlocations)
191 super(mixedrepostorecache, self).__init__(*pathsandlocations)
192 _cachedfiles.update(pathsandlocations)
192 _cachedfiles.update(pathsandlocations)
193
193
194 def join(self, obj, fnameandlocation):
194 def join(self, obj, fnameandlocation):
195 fname, location = fnameandlocation
195 fname, location = fnameandlocation
196 if location == b'plain':
196 if location == b'plain':
197 return obj.vfs.join(fname)
197 return obj.vfs.join(fname)
198 else:
198 else:
199 if location != b'':
199 if location != b'':
200 raise error.ProgrammingError(
200 raise error.ProgrammingError(
201 b'unexpected location: %s' % location
201 b'unexpected location: %s' % location
202 )
202 )
203 return obj.sjoin(fname)
203 return obj.sjoin(fname)
204
204
205
205
206 def isfilecached(repo, name):
206 def isfilecached(repo, name):
207 """check if a repo has already cached "name" filecache-ed property
207 """check if a repo has already cached "name" filecache-ed property
208
208
209 This returns (cachedobj-or-None, iscached) tuple.
209 This returns (cachedobj-or-None, iscached) tuple.
210 """
210 """
211 cacheentry = repo.unfiltered()._filecache.get(name, None)
211 cacheentry = repo.unfiltered()._filecache.get(name, None)
212 if not cacheentry:
212 if not cacheentry:
213 return None, False
213 return None, False
214 return cacheentry.obj, True
214 return cacheentry.obj, True
215
215
216
216
217 class unfilteredpropertycache(util.propertycache):
217 class unfilteredpropertycache(util.propertycache):
218 """propertycache that apply to unfiltered repo only"""
218 """propertycache that apply to unfiltered repo only"""
219
219
220 def __get__(self, repo, type=None):
220 def __get__(self, repo, type=None):
221 unfi = repo.unfiltered()
221 unfi = repo.unfiltered()
222 if unfi is repo:
222 if unfi is repo:
223 return super(unfilteredpropertycache, self).__get__(unfi)
223 return super(unfilteredpropertycache, self).__get__(unfi)
224 return getattr(unfi, self.name)
224 return getattr(unfi, self.name)
225
225
226
226
227 class filteredpropertycache(util.propertycache):
227 class filteredpropertycache(util.propertycache):
228 """propertycache that must take filtering in account"""
228 """propertycache that must take filtering in account"""
229
229
230 def cachevalue(self, obj, value):
230 def cachevalue(self, obj, value):
231 object.__setattr__(obj, self.name, value)
231 object.__setattr__(obj, self.name, value)
232
232
233
233
234 def hasunfilteredcache(repo, name):
234 def hasunfilteredcache(repo, name):
235 """check if a repo has an unfilteredpropertycache value for <name>"""
235 """check if a repo has an unfilteredpropertycache value for <name>"""
236 return name in vars(repo.unfiltered())
236 return name in vars(repo.unfiltered())
237
237
238
238
239 def unfilteredmethod(orig):
239 def unfilteredmethod(orig):
240 """decorate method that always need to be run on unfiltered version"""
240 """decorate method that always need to be run on unfiltered version"""
241
241
242 @functools.wraps(orig)
242 @functools.wraps(orig)
243 def wrapper(repo, *args, **kwargs):
243 def wrapper(repo, *args, **kwargs):
244 return orig(repo.unfiltered(), *args, **kwargs)
244 return orig(repo.unfiltered(), *args, **kwargs)
245
245
246 return wrapper
246 return wrapper
247
247
248
248
249 moderncaps = {
249 moderncaps = {
250 b'lookup',
250 b'lookup',
251 b'branchmap',
251 b'branchmap',
252 b'pushkey',
252 b'pushkey',
253 b'known',
253 b'known',
254 b'getbundle',
254 b'getbundle',
255 b'unbundle',
255 b'unbundle',
256 }
256 }
257 legacycaps = moderncaps.union({b'changegroupsubset'})
257 legacycaps = moderncaps.union({b'changegroupsubset'})
258
258
259
259
260 @interfaceutil.implementer(repository.ipeercommandexecutor)
260 @interfaceutil.implementer(repository.ipeercommandexecutor)
261 class localcommandexecutor:
261 class localcommandexecutor:
262 def __init__(self, peer):
262 def __init__(self, peer):
263 self._peer = peer
263 self._peer = peer
264 self._sent = False
264 self._sent = False
265 self._closed = False
265 self._closed = False
266
266
267 def __enter__(self):
267 def __enter__(self):
268 return self
268 return self
269
269
270 def __exit__(self, exctype, excvalue, exctb):
270 def __exit__(self, exctype, excvalue, exctb):
271 self.close()
271 self.close()
272
272
273 def callcommand(self, command, args):
273 def callcommand(self, command, args):
274 if self._sent:
274 if self._sent:
275 raise error.ProgrammingError(
275 raise error.ProgrammingError(
276 b'callcommand() cannot be used after sendcommands()'
276 b'callcommand() cannot be used after sendcommands()'
277 )
277 )
278
278
279 if self._closed:
279 if self._closed:
280 raise error.ProgrammingError(
280 raise error.ProgrammingError(
281 b'callcommand() cannot be used after close()'
281 b'callcommand() cannot be used after close()'
282 )
282 )
283
283
284 # We don't need to support anything fancy. Just call the named
284 # We don't need to support anything fancy. Just call the named
285 # method on the peer and return a resolved future.
285 # method on the peer and return a resolved future.
286 fn = getattr(self._peer, pycompat.sysstr(command))
286 fn = getattr(self._peer, pycompat.sysstr(command))
287
287
288 f = futures.Future()
288 f = futures.Future()
289
289
290 try:
290 try:
291 result = fn(**pycompat.strkwargs(args))
291 result = fn(**pycompat.strkwargs(args))
292 except Exception:
292 except Exception:
293 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
293 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
294 else:
294 else:
295 f.set_result(result)
295 f.set_result(result)
296
296
297 return f
297 return f
298
298
299 def sendcommands(self):
299 def sendcommands(self):
300 self._sent = True
300 self._sent = True
301
301
302 def close(self):
302 def close(self):
303 self._closed = True
303 self._closed = True
304
304
305
305
306 @interfaceutil.implementer(repository.ipeercommands)
306 @interfaceutil.implementer(repository.ipeercommands)
307 class localpeer(repository.peer):
307 class localpeer(repository.peer):
308 '''peer for a local repo; reflects only the most recent API'''
308 '''peer for a local repo; reflects only the most recent API'''
309
309
310 def __init__(self, repo, caps=None, path=None):
310 def __init__(self, repo, caps=None, path=None):
311 super(localpeer, self).__init__(repo.ui, path=path)
311 super(localpeer, self).__init__(repo.ui, path=path)
312
312
313 if caps is None:
313 if caps is None:
314 caps = moderncaps.copy()
314 caps = moderncaps.copy()
315 self._repo = repo.filtered(b'served')
315 self._repo = repo.filtered(b'served')
316
316
317 if repo._wanted_sidedata:
317 if repo._wanted_sidedata:
318 formatted = bundle2.format_remote_wanted_sidedata(repo)
318 formatted = bundle2.format_remote_wanted_sidedata(repo)
319 caps.add(b'exp-wanted-sidedata=' + formatted)
319 caps.add(b'exp-wanted-sidedata=' + formatted)
320
320
321 self._caps = repo._restrictcapabilities(caps)
321 self._caps = repo._restrictcapabilities(caps)
322
322
323 # Begin of _basepeer interface.
323 # Begin of _basepeer interface.
324
324
325 def url(self):
325 def url(self):
326 return self._repo.url()
326 return self._repo.url()
327
327
328 def local(self):
328 def local(self):
329 return self._repo
329 return self._repo
330
330
331 def canpush(self):
331 def canpush(self):
332 return True
332 return True
333
333
334 def close(self):
334 def close(self):
335 self._repo.close()
335 self._repo.close()
336
336
337 # End of _basepeer interface.
337 # End of _basepeer interface.
338
338
339 # Begin of _basewirecommands interface.
339 # Begin of _basewirecommands interface.
340
340
341 def branchmap(self):
341 def branchmap(self):
342 return self._repo.branchmap()
342 return self._repo.branchmap()
343
343
344 def capabilities(self):
344 def capabilities(self):
345 return self._caps
345 return self._caps
346
346
347 def clonebundles(self):
347 def clonebundles(self):
348 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
348 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
349
349
350 def debugwireargs(self, one, two, three=None, four=None, five=None):
350 def debugwireargs(self, one, two, three=None, four=None, five=None):
351 """Used to test argument passing over the wire"""
351 """Used to test argument passing over the wire"""
352 return b"%s %s %s %s %s" % (
352 return b"%s %s %s %s %s" % (
353 one,
353 one,
354 two,
354 two,
355 pycompat.bytestr(three),
355 pycompat.bytestr(three),
356 pycompat.bytestr(four),
356 pycompat.bytestr(four),
357 pycompat.bytestr(five),
357 pycompat.bytestr(five),
358 )
358 )
359
359
360 def getbundle(
360 def getbundle(
361 self,
361 self,
362 source,
362 source,
363 heads=None,
363 heads=None,
364 common=None,
364 common=None,
365 bundlecaps=None,
365 bundlecaps=None,
366 remote_sidedata=None,
366 remote_sidedata=None,
367 **kwargs
367 **kwargs
368 ):
368 ):
369 chunks = exchange.getbundlechunks(
369 chunks = exchange.getbundlechunks(
370 self._repo,
370 self._repo,
371 source,
371 source,
372 heads=heads,
372 heads=heads,
373 common=common,
373 common=common,
374 bundlecaps=bundlecaps,
374 bundlecaps=bundlecaps,
375 remote_sidedata=remote_sidedata,
375 remote_sidedata=remote_sidedata,
376 **kwargs
376 **kwargs
377 )[1]
377 )[1]
378 cb = util.chunkbuffer(chunks)
378 cb = util.chunkbuffer(chunks)
379
379
380 if exchange.bundle2requested(bundlecaps):
380 if exchange.bundle2requested(bundlecaps):
381 # When requesting a bundle2, getbundle returns a stream to make the
381 # When requesting a bundle2, getbundle returns a stream to make the
382 # wire level function happier. We need to build a proper object
382 # wire level function happier. We need to build a proper object
383 # from it in local peer.
383 # from it in local peer.
384 return bundle2.getunbundler(self.ui, cb)
384 return bundle2.getunbundler(self.ui, cb)
385 else:
385 else:
386 return changegroup.getunbundler(b'01', cb, None)
386 return changegroup.getunbundler(b'01', cb, None)
387
387
388 def heads(self):
388 def heads(self):
389 return self._repo.heads()
389 return self._repo.heads()
390
390
391 def known(self, nodes):
391 def known(self, nodes):
392 return self._repo.known(nodes)
392 return self._repo.known(nodes)
393
393
394 def listkeys(self, namespace):
394 def listkeys(self, namespace):
395 return self._repo.listkeys(namespace)
395 return self._repo.listkeys(namespace)
396
396
397 def lookup(self, key):
397 def lookup(self, key):
398 return self._repo.lookup(key)
398 return self._repo.lookup(key)
399
399
400 def pushkey(self, namespace, key, old, new):
400 def pushkey(self, namespace, key, old, new):
401 return self._repo.pushkey(namespace, key, old, new)
401 return self._repo.pushkey(namespace, key, old, new)
402
402
403 def stream_out(self):
403 def stream_out(self):
404 raise error.Abort(_(b'cannot perform stream clone against local peer'))
404 raise error.Abort(_(b'cannot perform stream clone against local peer'))
405
405
406 def unbundle(self, bundle, heads, url):
406 def unbundle(self, bundle, heads, url):
407 """apply a bundle on a repo
407 """apply a bundle on a repo
408
408
409 This function handles the repo locking itself."""
409 This function handles the repo locking itself."""
410 try:
410 try:
411 try:
411 try:
412 bundle = exchange.readbundle(self.ui, bundle, None)
412 bundle = exchange.readbundle(self.ui, bundle, None)
413 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
413 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
414 if util.safehasattr(ret, b'getchunks'):
414 if util.safehasattr(ret, b'getchunks'):
415 # This is a bundle20 object, turn it into an unbundler.
415 # This is a bundle20 object, turn it into an unbundler.
416 # This little dance should be dropped eventually when the
416 # This little dance should be dropped eventually when the
417 # API is finally improved.
417 # API is finally improved.
418 stream = util.chunkbuffer(ret.getchunks())
418 stream = util.chunkbuffer(ret.getchunks())
419 ret = bundle2.getunbundler(self.ui, stream)
419 ret = bundle2.getunbundler(self.ui, stream)
420 return ret
420 return ret
421 except Exception as exc:
421 except Exception as exc:
422 # If the exception contains output salvaged from a bundle2
422 # If the exception contains output salvaged from a bundle2
423 # reply, we need to make sure it is printed before continuing
423 # reply, we need to make sure it is printed before continuing
424 # to fail. So we build a bundle2 with such output and consume
424 # to fail. So we build a bundle2 with such output and consume
425 # it directly.
425 # it directly.
426 #
426 #
427 # This is not very elegant but allows a "simple" solution for
427 # This is not very elegant but allows a "simple" solution for
428 # issue4594
428 # issue4594
429 output = getattr(exc, '_bundle2salvagedoutput', ())
429 output = getattr(exc, '_bundle2salvagedoutput', ())
430 if output:
430 if output:
431 bundler = bundle2.bundle20(self._repo.ui)
431 bundler = bundle2.bundle20(self._repo.ui)
432 for out in output:
432 for out in output:
433 bundler.addpart(out)
433 bundler.addpart(out)
434 stream = util.chunkbuffer(bundler.getchunks())
434 stream = util.chunkbuffer(bundler.getchunks())
435 b = bundle2.getunbundler(self.ui, stream)
435 b = bundle2.getunbundler(self.ui, stream)
436 bundle2.processbundle(self._repo, b)
436 bundle2.processbundle(self._repo, b)
437 raise
437 raise
438 except error.PushRaced as exc:
438 except error.PushRaced as exc:
439 raise error.ResponseError(
439 raise error.ResponseError(
440 _(b'push failed:'), stringutil.forcebytestr(exc)
440 _(b'push failed:'), stringutil.forcebytestr(exc)
441 )
441 )
442
442
443 # End of _basewirecommands interface.
443 # End of _basewirecommands interface.
444
444
445 # Begin of peer interface.
445 # Begin of peer interface.
446
446
447 def commandexecutor(self):
447 def commandexecutor(self):
448 return localcommandexecutor(self)
448 return localcommandexecutor(self)
449
449
450 # End of peer interface.
450 # End of peer interface.
451
451
452
452
453 @interfaceutil.implementer(repository.ipeerlegacycommands)
453 @interfaceutil.implementer(repository.ipeerlegacycommands)
454 class locallegacypeer(localpeer):
454 class locallegacypeer(localpeer):
455 """peer extension which implements legacy methods too; used for tests with
455 """peer extension which implements legacy methods too; used for tests with
456 restricted capabilities"""
456 restricted capabilities"""
457
457
458 def __init__(self, repo, path=None):
458 def __init__(self, repo, path=None):
459 super(locallegacypeer, self).__init__(repo, caps=legacycaps, path=path)
459 super(locallegacypeer, self).__init__(repo, caps=legacycaps, path=path)
460
460
461 # Begin of baselegacywirecommands interface.
461 # Begin of baselegacywirecommands interface.
462
462
463 def between(self, pairs):
463 def between(self, pairs):
464 return self._repo.between(pairs)
464 return self._repo.between(pairs)
465
465
466 def branches(self, nodes):
466 def branches(self, nodes):
467 return self._repo.branches(nodes)
467 return self._repo.branches(nodes)
468
468
469 def changegroup(self, nodes, source):
469 def changegroup(self, nodes, source):
470 outgoing = discovery.outgoing(
470 outgoing = discovery.outgoing(
471 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
471 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
472 )
472 )
473 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
473 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
474
474
475 def changegroupsubset(self, bases, heads, source):
475 def changegroupsubset(self, bases, heads, source):
476 outgoing = discovery.outgoing(
476 outgoing = discovery.outgoing(
477 self._repo, missingroots=bases, ancestorsof=heads
477 self._repo, missingroots=bases, ancestorsof=heads
478 )
478 )
479 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
479 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
480
480
481 # End of baselegacywirecommands interface.
481 # End of baselegacywirecommands interface.
482
482
483
483
484 # Functions receiving (ui, features) that extensions can register to impact
484 # Functions receiving (ui, features) that extensions can register to impact
485 # the ability to load repositories with custom requirements. Only
485 # the ability to load repositories with custom requirements. Only
486 # functions defined in loaded extensions are called.
486 # functions defined in loaded extensions are called.
487 #
487 #
488 # The function receives a set of requirement strings that the repository
488 # The function receives a set of requirement strings that the repository
489 # is capable of opening. Functions will typically add elements to the
489 # is capable of opening. Functions will typically add elements to the
490 # set to reflect that the extension knows how to handle that requirements.
490 # set to reflect that the extension knows how to handle that requirements.
491 featuresetupfuncs = set()
491 featuresetupfuncs = set()
492
492
493
493
494 def _getsharedvfs(hgvfs, requirements):
494 def _getsharedvfs(hgvfs, requirements):
495 """returns the vfs object pointing to root of shared source
495 """returns the vfs object pointing to root of shared source
496 repo for a shared repository
496 repo for a shared repository
497
497
498 hgvfs is vfs pointing at .hg/ of current repo (shared one)
498 hgvfs is vfs pointing at .hg/ of current repo (shared one)
499 requirements is a set of requirements of current repo (shared one)
499 requirements is a set of requirements of current repo (shared one)
500 """
500 """
501 # The ``shared`` or ``relshared`` requirements indicate the
501 # The ``shared`` or ``relshared`` requirements indicate the
502 # store lives in the path contained in the ``.hg/sharedpath`` file.
502 # store lives in the path contained in the ``.hg/sharedpath`` file.
503 # This is an absolute path for ``shared`` and relative to
503 # This is an absolute path for ``shared`` and relative to
504 # ``.hg/`` for ``relshared``.
504 # ``.hg/`` for ``relshared``.
505 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
505 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
506 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
506 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
507 sharedpath = util.normpath(hgvfs.join(sharedpath))
507 sharedpath = util.normpath(hgvfs.join(sharedpath))
508
508
509 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
509 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
510
510
511 if not sharedvfs.exists():
511 if not sharedvfs.exists():
512 raise error.RepoError(
512 raise error.RepoError(
513 _(b'.hg/sharedpath points to nonexistent directory %s')
513 _(b'.hg/sharedpath points to nonexistent directory %s')
514 % sharedvfs.base
514 % sharedvfs.base
515 )
515 )
516 return sharedvfs
516 return sharedvfs
517
517
518
518
519 def _readrequires(vfs, allowmissing):
519 def _readrequires(vfs, allowmissing):
520 """reads the require file present at root of this vfs
520 """reads the require file present at root of this vfs
521 and return a set of requirements
521 and return a set of requirements
522
522
523 If allowmissing is True, we suppress FileNotFoundError if raised"""
523 If allowmissing is True, we suppress FileNotFoundError if raised"""
524 # requires file contains a newline-delimited list of
524 # requires file contains a newline-delimited list of
525 # features/capabilities the opener (us) must have in order to use
525 # features/capabilities the opener (us) must have in order to use
526 # the repository. This file was introduced in Mercurial 0.9.2,
526 # the repository. This file was introduced in Mercurial 0.9.2,
527 # which means very old repositories may not have one. We assume
527 # which means very old repositories may not have one. We assume
528 # a missing file translates to no requirements.
528 # a missing file translates to no requirements.
529 read = vfs.tryread if allowmissing else vfs.read
529 read = vfs.tryread if allowmissing else vfs.read
530 return set(read(b'requires').splitlines())
530 return set(read(b'requires').splitlines())
531
531
532
532
533 def makelocalrepository(baseui, path: bytes, intents=None):
533 def makelocalrepository(baseui, path: bytes, intents=None):
534 """Create a local repository object.
534 """Create a local repository object.
535
535
536 Given arguments needed to construct a local repository, this function
536 Given arguments needed to construct a local repository, this function
537 performs various early repository loading functionality (such as
537 performs various early repository loading functionality (such as
538 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
538 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
539 the repository can be opened, derives a type suitable for representing
539 the repository can be opened, derives a type suitable for representing
540 that repository, and returns an instance of it.
540 that repository, and returns an instance of it.
541
541
542 The returned object conforms to the ``repository.completelocalrepository``
542 The returned object conforms to the ``repository.completelocalrepository``
543 interface.
543 interface.
544
544
545 The repository type is derived by calling a series of factory functions
545 The repository type is derived by calling a series of factory functions
546 for each aspect/interface of the final repository. These are defined by
546 for each aspect/interface of the final repository. These are defined by
547 ``REPO_INTERFACES``.
547 ``REPO_INTERFACES``.
548
548
549 Each factory function is called to produce a type implementing a specific
549 Each factory function is called to produce a type implementing a specific
550 interface. The cumulative list of returned types will be combined into a
550 interface. The cumulative list of returned types will be combined into a
551 new type and that type will be instantiated to represent the local
551 new type and that type will be instantiated to represent the local
552 repository.
552 repository.
553
553
554 The factory functions each receive various state that may be consulted
554 The factory functions each receive various state that may be consulted
555 as part of deriving a type.
555 as part of deriving a type.
556
556
557 Extensions should wrap these factory functions to customize repository type
557 Extensions should wrap these factory functions to customize repository type
558 creation. Note that an extension's wrapped function may be called even if
558 creation. Note that an extension's wrapped function may be called even if
559 that extension is not loaded for the repo being constructed. Extensions
559 that extension is not loaded for the repo being constructed. Extensions
560 should check if their ``__name__`` appears in the
560 should check if their ``__name__`` appears in the
561 ``extensionmodulenames`` set passed to the factory function and no-op if
561 ``extensionmodulenames`` set passed to the factory function and no-op if
562 not.
562 not.
563 """
563 """
564 ui = baseui.copy()
564 ui = baseui.copy()
565 # Prevent copying repo configuration.
565 # Prevent copying repo configuration.
566 ui.copy = baseui.copy
566 ui.copy = baseui.copy
567
567
568 # Working directory VFS rooted at repository root.
568 # Working directory VFS rooted at repository root.
569 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
569 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
570
570
571 # Main VFS for .hg/ directory.
571 # Main VFS for .hg/ directory.
572 hgpath = wdirvfs.join(b'.hg')
572 hgpath = wdirvfs.join(b'.hg')
573 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
573 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
574 # Whether this repository is shared one or not
574 # Whether this repository is shared one or not
575 shared = False
575 shared = False
576 # If this repository is shared, vfs pointing to shared repo
576 # If this repository is shared, vfs pointing to shared repo
577 sharedvfs = None
577 sharedvfs = None
578
578
579 # The .hg/ path should exist and should be a directory. All other
579 # The .hg/ path should exist and should be a directory. All other
580 # cases are errors.
580 # cases are errors.
581 if not hgvfs.isdir():
581 if not hgvfs.isdir():
582 try:
582 try:
583 hgvfs.stat()
583 hgvfs.stat()
584 except FileNotFoundError:
584 except FileNotFoundError:
585 pass
585 pass
586 except ValueError as e:
586 except ValueError as e:
587 # Can be raised on Python 3.8 when path is invalid.
587 # Can be raised on Python 3.8 when path is invalid.
588 raise error.Abort(
588 raise error.Abort(
589 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
589 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
590 )
590 )
591
591
592 raise error.RepoError(_(b'repository %s not found') % path)
592 raise error.RepoError(_(b'repository %s not found') % path)
593
593
594 requirements = _readrequires(hgvfs, True)
594 requirements = _readrequires(hgvfs, True)
595 shared = (
595 shared = (
596 requirementsmod.SHARED_REQUIREMENT in requirements
596 requirementsmod.SHARED_REQUIREMENT in requirements
597 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
597 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
598 )
598 )
599 storevfs = None
599 storevfs = None
600 if shared:
600 if shared:
601 # This is a shared repo
601 # This is a shared repo
602 sharedvfs = _getsharedvfs(hgvfs, requirements)
602 sharedvfs = _getsharedvfs(hgvfs, requirements)
603 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
603 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
604 else:
604 else:
605 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
605 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
606
606
607 # if .hg/requires contains the sharesafe requirement, it means
607 # if .hg/requires contains the sharesafe requirement, it means
608 # there exists a `.hg/store/requires` too and we should read it
608 # there exists a `.hg/store/requires` too and we should read it
609 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
609 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
610 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
610 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
611 # is not present, refer checkrequirementscompat() for that
611 # is not present, refer checkrequirementscompat() for that
612 #
612 #
613 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
613 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
614 # repository was shared the old way. We check the share source .hg/requires
614 # repository was shared the old way. We check the share source .hg/requires
615 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
615 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
616 # to be reshared
616 # to be reshared
617 hint = _(b"see `hg help config.format.use-share-safe` for more information")
617 hint = _(b"see `hg help config.format.use-share-safe` for more information")
618 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
618 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
619 if (
619 if (
620 shared
620 shared
621 and requirementsmod.SHARESAFE_REQUIREMENT
621 and requirementsmod.SHARESAFE_REQUIREMENT
622 not in _readrequires(sharedvfs, True)
622 not in _readrequires(sharedvfs, True)
623 ):
623 ):
624 mismatch_warn = ui.configbool(
624 mismatch_warn = ui.configbool(
625 b'share', b'safe-mismatch.source-not-safe.warn'
625 b'share', b'safe-mismatch.source-not-safe.warn'
626 )
626 )
627 mismatch_config = ui.config(
627 mismatch_config = ui.config(
628 b'share', b'safe-mismatch.source-not-safe'
628 b'share', b'safe-mismatch.source-not-safe'
629 )
629 )
630 mismatch_verbose_upgrade = ui.configbool(
630 mismatch_verbose_upgrade = ui.configbool(
631 b'share', b'safe-mismatch.source-not-safe:verbose-upgrade'
631 b'share', b'safe-mismatch.source-not-safe:verbose-upgrade'
632 )
632 )
633 if mismatch_config in (
633 if mismatch_config in (
634 b'downgrade-allow',
634 b'downgrade-allow',
635 b'allow',
635 b'allow',
636 b'downgrade-abort',
636 b'downgrade-abort',
637 ):
637 ):
638 # prevent cyclic import localrepo -> upgrade -> localrepo
638 # prevent cyclic import localrepo -> upgrade -> localrepo
639 from . import upgrade
639 from . import upgrade
640
640
641 upgrade.downgrade_share_to_non_safe(
641 upgrade.downgrade_share_to_non_safe(
642 ui,
642 ui,
643 hgvfs,
643 hgvfs,
644 sharedvfs,
644 sharedvfs,
645 requirements,
645 requirements,
646 mismatch_config,
646 mismatch_config,
647 mismatch_warn,
647 mismatch_warn,
648 mismatch_verbose_upgrade,
648 mismatch_verbose_upgrade,
649 )
649 )
650 elif mismatch_config == b'abort':
650 elif mismatch_config == b'abort':
651 raise error.Abort(
651 raise error.Abort(
652 _(b"share source does not support share-safe requirement"),
652 _(b"share source does not support share-safe requirement"),
653 hint=hint,
653 hint=hint,
654 )
654 )
655 else:
655 else:
656 raise error.Abort(
656 raise error.Abort(
657 _(
657 _(
658 b"share-safe mismatch with source.\nUnrecognized"
658 b"share-safe mismatch with source.\nUnrecognized"
659 b" value '%s' of `share.safe-mismatch.source-not-safe`"
659 b" value '%s' of `share.safe-mismatch.source-not-safe`"
660 b" set."
660 b" set."
661 )
661 )
662 % mismatch_config,
662 % mismatch_config,
663 hint=hint,
663 hint=hint,
664 )
664 )
665 else:
665 else:
666 requirements |= _readrequires(storevfs, False)
666 requirements |= _readrequires(storevfs, False)
667 elif shared:
667 elif shared:
668 sourcerequires = _readrequires(sharedvfs, False)
668 sourcerequires = _readrequires(sharedvfs, False)
669 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
669 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
670 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
670 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
671 mismatch_warn = ui.configbool(
671 mismatch_warn = ui.configbool(
672 b'share', b'safe-mismatch.source-safe.warn'
672 b'share', b'safe-mismatch.source-safe.warn'
673 )
673 )
674 mismatch_verbose_upgrade = ui.configbool(
674 mismatch_verbose_upgrade = ui.configbool(
675 b'share', b'safe-mismatch.source-safe:verbose-upgrade'
675 b'share', b'safe-mismatch.source-safe:verbose-upgrade'
676 )
676 )
677 if mismatch_config in (
677 if mismatch_config in (
678 b'upgrade-allow',
678 b'upgrade-allow',
679 b'allow',
679 b'allow',
680 b'upgrade-abort',
680 b'upgrade-abort',
681 ):
681 ):
682 # prevent cyclic import localrepo -> upgrade -> localrepo
682 # prevent cyclic import localrepo -> upgrade -> localrepo
683 from . import upgrade
683 from . import upgrade
684
684
685 upgrade.upgrade_share_to_safe(
685 upgrade.upgrade_share_to_safe(
686 ui,
686 ui,
687 hgvfs,
687 hgvfs,
688 storevfs,
688 storevfs,
689 requirements,
689 requirements,
690 mismatch_config,
690 mismatch_config,
691 mismatch_warn,
691 mismatch_warn,
692 mismatch_verbose_upgrade,
692 mismatch_verbose_upgrade,
693 )
693 )
694 elif mismatch_config == b'abort':
694 elif mismatch_config == b'abort':
695 raise error.Abort(
695 raise error.Abort(
696 _(
696 _(
697 b'version mismatch: source uses share-safe'
697 b'version mismatch: source uses share-safe'
698 b' functionality while the current share does not'
698 b' functionality while the current share does not'
699 ),
699 ),
700 hint=hint,
700 hint=hint,
701 )
701 )
702 else:
702 else:
703 raise error.Abort(
703 raise error.Abort(
704 _(
704 _(
705 b"share-safe mismatch with source.\nUnrecognized"
705 b"share-safe mismatch with source.\nUnrecognized"
706 b" value '%s' of `share.safe-mismatch.source-safe` set."
706 b" value '%s' of `share.safe-mismatch.source-safe` set."
707 )
707 )
708 % mismatch_config,
708 % mismatch_config,
709 hint=hint,
709 hint=hint,
710 )
710 )
711
711
712 # The .hg/hgrc file may load extensions or contain config options
712 # The .hg/hgrc file may load extensions or contain config options
713 # that influence repository construction. Attempt to load it and
713 # that influence repository construction. Attempt to load it and
714 # process any new extensions that it may have pulled in.
714 # process any new extensions that it may have pulled in.
715 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
715 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
716 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
716 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
717 extensions.loadall(ui)
717 extensions.loadall(ui)
718 extensions.populateui(ui)
718 extensions.populateui(ui)
719
719
720 # Set of module names of extensions loaded for this repository.
720 # Set of module names of extensions loaded for this repository.
721 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
721 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
722
722
723 supportedrequirements = gathersupportedrequirements(ui)
723 supportedrequirements = gathersupportedrequirements(ui)
724
724
725 # We first validate the requirements are known.
725 # We first validate the requirements are known.
726 ensurerequirementsrecognized(requirements, supportedrequirements)
726 ensurerequirementsrecognized(requirements, supportedrequirements)
727
727
728 # Then we validate that the known set is reasonable to use together.
728 # Then we validate that the known set is reasonable to use together.
729 ensurerequirementscompatible(ui, requirements)
729 ensurerequirementscompatible(ui, requirements)
730
730
731 # TODO there are unhandled edge cases related to opening repositories with
731 # TODO there are unhandled edge cases related to opening repositories with
732 # shared storage. If storage is shared, we should also test for requirements
732 # shared storage. If storage is shared, we should also test for requirements
733 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
733 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
734 # that repo, as that repo may load extensions needed to open it. This is a
734 # that repo, as that repo may load extensions needed to open it. This is a
735 # bit complicated because we don't want the other hgrc to overwrite settings
735 # bit complicated because we don't want the other hgrc to overwrite settings
736 # in this hgrc.
736 # in this hgrc.
737 #
737 #
738 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
738 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
739 # file when sharing repos. But if a requirement is added after the share is
739 # file when sharing repos. But if a requirement is added after the share is
740 # performed, thereby introducing a new requirement for the opener, we may
740 # performed, thereby introducing a new requirement for the opener, we may
741 # will not see that and could encounter a run-time error interacting with
741 # will not see that and could encounter a run-time error interacting with
742 # that shared store since it has an unknown-to-us requirement.
742 # that shared store since it has an unknown-to-us requirement.
743
743
744 # At this point, we know we should be capable of opening the repository.
744 # At this point, we know we should be capable of opening the repository.
745 # Now get on with doing that.
745 # Now get on with doing that.
746
746
747 features = set()
747 features = set()
748
748
749 # The "store" part of the repository holds versioned data. How it is
749 # The "store" part of the repository holds versioned data. How it is
750 # accessed is determined by various requirements. If `shared` or
750 # accessed is determined by various requirements. If `shared` or
751 # `relshared` requirements are present, this indicates current repository
751 # `relshared` requirements are present, this indicates current repository
752 # is a share and store exists in path mentioned in `.hg/sharedpath`
752 # is a share and store exists in path mentioned in `.hg/sharedpath`
753 if shared:
753 if shared:
754 storebasepath = sharedvfs.base
754 storebasepath = sharedvfs.base
755 cachepath = sharedvfs.join(b'cache')
755 cachepath = sharedvfs.join(b'cache')
756 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
756 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
757 else:
757 else:
758 storebasepath = hgvfs.base
758 storebasepath = hgvfs.base
759 cachepath = hgvfs.join(b'cache')
759 cachepath = hgvfs.join(b'cache')
760 wcachepath = hgvfs.join(b'wcache')
760 wcachepath = hgvfs.join(b'wcache')
761
761
762 # The store has changed over time and the exact layout is dictated by
762 # The store has changed over time and the exact layout is dictated by
763 # requirements. The store interface abstracts differences across all
763 # requirements. The store interface abstracts differences across all
764 # of them.
764 # of them.
765 store = makestore(
765 store = makestore(
766 requirements,
766 requirements,
767 storebasepath,
767 storebasepath,
768 lambda base: vfsmod.vfs(base, cacheaudited=True),
768 lambda base: vfsmod.vfs(base, cacheaudited=True),
769 )
769 )
770 hgvfs.createmode = store.createmode
770 hgvfs.createmode = store.createmode
771
771
772 storevfs = store.vfs
772 storevfs = store.vfs
773 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
773 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
774
774
775 if (
775 if (
776 requirementsmod.REVLOGV2_REQUIREMENT in requirements
776 requirementsmod.REVLOGV2_REQUIREMENT in requirements
777 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
777 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
778 ):
778 ):
779 features.add(repository.REPO_FEATURE_SIDE_DATA)
779 features.add(repository.REPO_FEATURE_SIDE_DATA)
780 # the revlogv2 docket introduced race condition that we need to fix
780 # the revlogv2 docket introduced race condition that we need to fix
781 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
781 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
782
782
783 # The cache vfs is used to manage cache files.
783 # The cache vfs is used to manage cache files.
784 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
784 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
785 cachevfs.createmode = store.createmode
785 cachevfs.createmode = store.createmode
786 # The cache vfs is used to manage cache files related to the working copy
786 # The cache vfs is used to manage cache files related to the working copy
787 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
787 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
788 wcachevfs.createmode = store.createmode
788 wcachevfs.createmode = store.createmode
789
789
790 # Now resolve the type for the repository object. We do this by repeatedly
790 # Now resolve the type for the repository object. We do this by repeatedly
791 # calling a factory function to produces types for specific aspects of the
791 # calling a factory function to produces types for specific aspects of the
792 # repo's operation. The aggregate returned types are used as base classes
792 # repo's operation. The aggregate returned types are used as base classes
793 # for a dynamically-derived type, which will represent our new repository.
793 # for a dynamically-derived type, which will represent our new repository.
794
794
795 bases = []
795 bases = []
796 extrastate = {}
796 extrastate = {}
797
797
798 for iface, fn in REPO_INTERFACES:
798 for iface, fn in REPO_INTERFACES:
799 # We pass all potentially useful state to give extensions tons of
799 # We pass all potentially useful state to give extensions tons of
800 # flexibility.
800 # flexibility.
801 typ = fn()(
801 typ = fn()(
802 ui=ui,
802 ui=ui,
803 intents=intents,
803 intents=intents,
804 requirements=requirements,
804 requirements=requirements,
805 features=features,
805 features=features,
806 wdirvfs=wdirvfs,
806 wdirvfs=wdirvfs,
807 hgvfs=hgvfs,
807 hgvfs=hgvfs,
808 store=store,
808 store=store,
809 storevfs=storevfs,
809 storevfs=storevfs,
810 storeoptions=storevfs.options,
810 storeoptions=storevfs.options,
811 cachevfs=cachevfs,
811 cachevfs=cachevfs,
812 wcachevfs=wcachevfs,
812 wcachevfs=wcachevfs,
813 extensionmodulenames=extensionmodulenames,
813 extensionmodulenames=extensionmodulenames,
814 extrastate=extrastate,
814 extrastate=extrastate,
815 baseclasses=bases,
815 baseclasses=bases,
816 )
816 )
817
817
818 if not isinstance(typ, type):
818 if not isinstance(typ, type):
819 raise error.ProgrammingError(
819 raise error.ProgrammingError(
820 b'unable to construct type for %s' % iface
820 b'unable to construct type for %s' % iface
821 )
821 )
822
822
823 bases.append(typ)
823 bases.append(typ)
824
824
825 # type() allows you to use characters in type names that wouldn't be
825 # type() allows you to use characters in type names that wouldn't be
826 # recognized as Python symbols in source code. We abuse that to add
826 # recognized as Python symbols in source code. We abuse that to add
827 # rich information about our constructed repo.
827 # rich information about our constructed repo.
828 name = pycompat.sysstr(
828 name = pycompat.sysstr(
829 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
829 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
830 )
830 )
831
831
832 cls = type(name, tuple(bases), {})
832 cls = type(name, tuple(bases), {})
833
833
834 return cls(
834 return cls(
835 baseui=baseui,
835 baseui=baseui,
836 ui=ui,
836 ui=ui,
837 origroot=path,
837 origroot=path,
838 wdirvfs=wdirvfs,
838 wdirvfs=wdirvfs,
839 hgvfs=hgvfs,
839 hgvfs=hgvfs,
840 requirements=requirements,
840 requirements=requirements,
841 supportedrequirements=supportedrequirements,
841 supportedrequirements=supportedrequirements,
842 sharedpath=storebasepath,
842 sharedpath=storebasepath,
843 store=store,
843 store=store,
844 cachevfs=cachevfs,
844 cachevfs=cachevfs,
845 wcachevfs=wcachevfs,
845 wcachevfs=wcachevfs,
846 features=features,
846 features=features,
847 intents=intents,
847 intents=intents,
848 )
848 )
849
849
850
850
851 def loadhgrc(
851 def loadhgrc(
852 ui,
852 ui,
853 wdirvfs: vfsmod.vfs,
853 wdirvfs: vfsmod.vfs,
854 hgvfs: vfsmod.vfs,
854 hgvfs: vfsmod.vfs,
855 requirements,
855 requirements,
856 sharedvfs: Optional[vfsmod.vfs] = None,
856 sharedvfs: Optional[vfsmod.vfs] = None,
857 ):
857 ):
858 """Load hgrc files/content into a ui instance.
858 """Load hgrc files/content into a ui instance.
859
859
860 This is called during repository opening to load any additional
860 This is called during repository opening to load any additional
861 config files or settings relevant to the current repository.
861 config files or settings relevant to the current repository.
862
862
863 Returns a bool indicating whether any additional configs were loaded.
863 Returns a bool indicating whether any additional configs were loaded.
864
864
865 Extensions should monkeypatch this function to modify how per-repo
865 Extensions should monkeypatch this function to modify how per-repo
866 configs are loaded. For example, an extension may wish to pull in
866 configs are loaded. For example, an extension may wish to pull in
867 configs from alternate files or sources.
867 configs from alternate files or sources.
868
868
869 sharedvfs is vfs object pointing to source repo if the current one is a
869 sharedvfs is vfs object pointing to source repo if the current one is a
870 shared one
870 shared one
871 """
871 """
872 if not rcutil.use_repo_hgrc():
872 if not rcutil.use_repo_hgrc():
873 return False
873 return False
874
874
875 ret = False
875 ret = False
876 # first load config from shared source if we has to
876 # first load config from shared source if we has to
877 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
877 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
878 try:
878 try:
879 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
879 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
880 ret = True
880 ret = True
881 except IOError:
881 except IOError:
882 pass
882 pass
883
883
884 try:
884 try:
885 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
885 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
886 ret = True
886 ret = True
887 except IOError:
887 except IOError:
888 pass
888 pass
889
889
890 try:
890 try:
891 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
891 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
892 ret = True
892 ret = True
893 except IOError:
893 except IOError:
894 pass
894 pass
895
895
896 return ret
896 return ret
897
897
898
898
899 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
899 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
900 """Perform additional actions after .hg/hgrc is loaded.
900 """Perform additional actions after .hg/hgrc is loaded.
901
901
902 This function is called during repository loading immediately after
902 This function is called during repository loading immediately after
903 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
903 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
904
904
905 The function can be used to validate configs, automatically add
905 The function can be used to validate configs, automatically add
906 options (including extensions) based on requirements, etc.
906 options (including extensions) based on requirements, etc.
907 """
907 """
908
908
909 # Map of requirements to list of extensions to load automatically when
909 # Map of requirements to list of extensions to load automatically when
910 # requirement is present.
910 # requirement is present.
911 autoextensions = {
911 autoextensions = {
912 b'git': [b'git'],
912 b'git': [b'git'],
913 b'largefiles': [b'largefiles'],
913 b'largefiles': [b'largefiles'],
914 b'lfs': [b'lfs'],
914 b'lfs': [b'lfs'],
915 }
915 }
916
916
917 for requirement, names in sorted(autoextensions.items()):
917 for requirement, names in sorted(autoextensions.items()):
918 if requirement not in requirements:
918 if requirement not in requirements:
919 continue
919 continue
920
920
921 for name in names:
921 for name in names:
922 if not ui.hasconfig(b'extensions', name):
922 if not ui.hasconfig(b'extensions', name):
923 ui.setconfig(b'extensions', name, b'', source=b'autoload')
923 ui.setconfig(b'extensions', name, b'', source=b'autoload')
924
924
925
925
926 def gathersupportedrequirements(ui):
926 def gathersupportedrequirements(ui):
927 """Determine the complete set of recognized requirements."""
927 """Determine the complete set of recognized requirements."""
928 # Start with all requirements supported by this file.
928 # Start with all requirements supported by this file.
929 supported = set(localrepository._basesupported)
929 supported = set(localrepository._basesupported)
930
930
931 # Execute ``featuresetupfuncs`` entries if they belong to an extension
931 # Execute ``featuresetupfuncs`` entries if they belong to an extension
932 # relevant to this ui instance.
932 # relevant to this ui instance.
933 modules = {m.__name__ for n, m in extensions.extensions(ui)}
933 modules = {m.__name__ for n, m in extensions.extensions(ui)}
934
934
935 for fn in featuresetupfuncs:
935 for fn in featuresetupfuncs:
936 if fn.__module__ in modules:
936 if fn.__module__ in modules:
937 fn(ui, supported)
937 fn(ui, supported)
938
938
939 # Add derived requirements from registered compression engines.
939 # Add derived requirements from registered compression engines.
940 for name in util.compengines:
940 for name in util.compengines:
941 engine = util.compengines[name]
941 engine = util.compengines[name]
942 if engine.available() and engine.revlogheader():
942 if engine.available() and engine.revlogheader():
943 supported.add(b'exp-compression-%s' % name)
943 supported.add(b'exp-compression-%s' % name)
944 if engine.name() == b'zstd':
944 if engine.name() == b'zstd':
945 supported.add(requirementsmod.REVLOG_COMPRESSION_ZSTD)
945 supported.add(requirementsmod.REVLOG_COMPRESSION_ZSTD)
946
946
947 return supported
947 return supported
948
948
949
949
950 def ensurerequirementsrecognized(requirements, supported):
950 def ensurerequirementsrecognized(requirements, supported):
951 """Validate that a set of local requirements is recognized.
951 """Validate that a set of local requirements is recognized.
952
952
953 Receives a set of requirements. Raises an ``error.RepoError`` if there
953 Receives a set of requirements. Raises an ``error.RepoError`` if there
954 exists any requirement in that set that currently loaded code doesn't
954 exists any requirement in that set that currently loaded code doesn't
955 recognize.
955 recognize.
956
956
957 Returns a set of supported requirements.
957 Returns a set of supported requirements.
958 """
958 """
959 missing = set()
959 missing = set()
960
960
961 for requirement in requirements:
961 for requirement in requirements:
962 if requirement in supported:
962 if requirement in supported:
963 continue
963 continue
964
964
965 if not requirement or not requirement[0:1].isalnum():
965 if not requirement or not requirement[0:1].isalnum():
966 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
966 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
967
967
968 missing.add(requirement)
968 missing.add(requirement)
969
969
970 if missing:
970 if missing:
971 raise error.RequirementError(
971 raise error.RequirementError(
972 _(b'repository requires features unknown to this Mercurial: %s')
972 _(b'repository requires features unknown to this Mercurial: %s')
973 % b' '.join(sorted(missing)),
973 % b' '.join(sorted(missing)),
974 hint=_(
974 hint=_(
975 b'see https://mercurial-scm.org/wiki/MissingRequirement '
975 b'see https://mercurial-scm.org/wiki/MissingRequirement '
976 b'for more information'
976 b'for more information'
977 ),
977 ),
978 )
978 )
979
979
980
980
981 def ensurerequirementscompatible(ui, requirements):
981 def ensurerequirementscompatible(ui, requirements):
982 """Validates that a set of recognized requirements is mutually compatible.
982 """Validates that a set of recognized requirements is mutually compatible.
983
983
984 Some requirements may not be compatible with others or require
984 Some requirements may not be compatible with others or require
985 config options that aren't enabled. This function is called during
985 config options that aren't enabled. This function is called during
986 repository opening to ensure that the set of requirements needed
986 repository opening to ensure that the set of requirements needed
987 to open a repository is sane and compatible with config options.
987 to open a repository is sane and compatible with config options.
988
988
989 Extensions can monkeypatch this function to perform additional
989 Extensions can monkeypatch this function to perform additional
990 checking.
990 checking.
991
991
992 ``error.RepoError`` should be raised on failure.
992 ``error.RepoError`` should be raised on failure.
993 """
993 """
994 if (
994 if (
995 requirementsmod.SPARSE_REQUIREMENT in requirements
995 requirementsmod.SPARSE_REQUIREMENT in requirements
996 and not sparse.enabled
996 and not sparse.enabled
997 ):
997 ):
998 raise error.RepoError(
998 raise error.RepoError(
999 _(
999 _(
1000 b'repository is using sparse feature but '
1000 b'repository is using sparse feature but '
1001 b'sparse is not enabled; enable the '
1001 b'sparse is not enabled; enable the '
1002 b'"sparse" extensions to access'
1002 b'"sparse" extensions to access'
1003 )
1003 )
1004 )
1004 )
1005
1005
1006
1006
1007 def makestore(requirements, path, vfstype):
1007 def makestore(requirements, path, vfstype):
1008 """Construct a storage object for a repository."""
1008 """Construct a storage object for a repository."""
1009 if requirementsmod.STORE_REQUIREMENT in requirements:
1009 if requirementsmod.STORE_REQUIREMENT in requirements:
1010 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
1010 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
1011 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1011 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1012 return storemod.fncachestore(path, vfstype, dotencode)
1012 return storemod.fncachestore(path, vfstype, dotencode)
1013
1013
1014 return storemod.encodedstore(path, vfstype)
1014 return storemod.encodedstore(path, vfstype)
1015
1015
1016 return storemod.basicstore(path, vfstype)
1016 return storemod.basicstore(path, vfstype)
1017
1017
1018
1018
1019 def resolvestorevfsoptions(ui, requirements, features):
1019 def resolvestorevfsoptions(ui, requirements, features):
1020 """Resolve the options to pass to the store vfs opener.
1020 """Resolve the options to pass to the store vfs opener.
1021
1021
1022 The returned dict is used to influence behavior of the storage layer.
1022 The returned dict is used to influence behavior of the storage layer.
1023 """
1023 """
1024 options = {}
1024 options = {}
1025
1025
1026 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1026 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1027 options[b'treemanifest'] = True
1027 options[b'treemanifest'] = True
1028
1028
1029 # experimental config: format.manifestcachesize
1029 # experimental config: format.manifestcachesize
1030 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1030 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1031 if manifestcachesize is not None:
1031 if manifestcachesize is not None:
1032 options[b'manifestcachesize'] = manifestcachesize
1032 options[b'manifestcachesize'] = manifestcachesize
1033
1033
1034 # In the absence of another requirement superseding a revlog-related
1034 # In the absence of another requirement superseding a revlog-related
1035 # requirement, we have to assume the repo is using revlog version 0.
1035 # requirement, we have to assume the repo is using revlog version 0.
1036 # This revlog format is super old and we don't bother trying to parse
1036 # This revlog format is super old and we don't bother trying to parse
1037 # opener options for it because those options wouldn't do anything
1037 # opener options for it because those options wouldn't do anything
1038 # meaningful on such old repos.
1038 # meaningful on such old repos.
1039 if (
1039 if (
1040 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1040 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1041 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1041 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1042 ):
1042 ):
1043 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1043 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1044 else: # explicitly mark repo as using revlogv0
1044 else: # explicitly mark repo as using revlogv0
1045 options[b'revlogv0'] = True
1045 options[b'revlogv0'] = True
1046
1046
1047 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1047 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1048 options[b'copies-storage'] = b'changeset-sidedata'
1048 options[b'copies-storage'] = b'changeset-sidedata'
1049 else:
1049 else:
1050 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1050 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1051 copiesextramode = (b'changeset-only', b'compatibility')
1051 copiesextramode = (b'changeset-only', b'compatibility')
1052 if writecopiesto in copiesextramode:
1052 if writecopiesto in copiesextramode:
1053 options[b'copies-storage'] = b'extra'
1053 options[b'copies-storage'] = b'extra'
1054
1054
1055 return options
1055 return options
1056
1056
1057
1057
1058 def resolverevlogstorevfsoptions(ui, requirements, features):
1058 def resolverevlogstorevfsoptions(ui, requirements, features):
1059 """Resolve opener options specific to revlogs."""
1059 """Resolve opener options specific to revlogs."""
1060
1060
1061 options = {}
1061 options = {}
1062 options[b'flagprocessors'] = {}
1062 options[b'flagprocessors'] = {}
1063
1063
1064 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1064 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1065 options[b'revlogv1'] = True
1065 options[b'revlogv1'] = True
1066 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1066 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1067 options[b'revlogv2'] = True
1067 options[b'revlogv2'] = True
1068 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1068 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1069 options[b'changelogv2'] = True
1069 options[b'changelogv2'] = True
1070 cmp_rank = ui.configbool(b'experimental', b'changelog-v2.compute-rank')
1070 cmp_rank = ui.configbool(b'experimental', b'changelog-v2.compute-rank')
1071 options[b'changelogv2.compute-rank'] = cmp_rank
1071 options[b'changelogv2.compute-rank'] = cmp_rank
1072
1072
1073 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1073 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1074 options[b'generaldelta'] = True
1074 options[b'generaldelta'] = True
1075
1075
1076 # experimental config: format.chunkcachesize
1076 # experimental config: format.chunkcachesize
1077 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1077 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1078 if chunkcachesize is not None:
1078 if chunkcachesize is not None:
1079 options[b'chunkcachesize'] = chunkcachesize
1079 options[b'chunkcachesize'] = chunkcachesize
1080
1080
1081 deltabothparents = ui.configbool(
1081 deltabothparents = ui.configbool(
1082 b'storage', b'revlog.optimize-delta-parent-choice'
1082 b'storage', b'revlog.optimize-delta-parent-choice'
1083 )
1083 )
1084 options[b'deltabothparents'] = deltabothparents
1084 options[b'deltabothparents'] = deltabothparents
1085 dps_cgds = ui.configint(
1085 dps_cgds = ui.configint(
1086 b'storage',
1086 b'storage',
1087 b'revlog.delta-parent-search.candidate-group-chunk-size',
1087 b'revlog.delta-parent-search.candidate-group-chunk-size',
1088 )
1088 )
1089 options[b'delta-parent-search.candidate-group-chunk-size'] = dps_cgds
1089 options[b'delta-parent-search.candidate-group-chunk-size'] = dps_cgds
1090 options[b'debug-delta'] = ui.configbool(b'debug', b'revlog.debug-delta')
1090 options[b'debug-delta'] = ui.configbool(b'debug', b'revlog.debug-delta')
1091
1091
1092 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1092 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1093 options[b'issue6528.fix-incoming'] = issue6528
1093 options[b'issue6528.fix-incoming'] = issue6528
1094
1094
1095 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1095 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1096 lazydeltabase = False
1096 lazydeltabase = False
1097 if lazydelta:
1097 if lazydelta:
1098 lazydeltabase = ui.configbool(
1098 lazydeltabase = ui.configbool(
1099 b'storage', b'revlog.reuse-external-delta-parent'
1099 b'storage', b'revlog.reuse-external-delta-parent'
1100 )
1100 )
1101 if lazydeltabase is None:
1101 if lazydeltabase is None:
1102 lazydeltabase = not scmutil.gddeltaconfig(ui)
1102 lazydeltabase = not scmutil.gddeltaconfig(ui)
1103 options[b'lazydelta'] = lazydelta
1103 options[b'lazydelta'] = lazydelta
1104 options[b'lazydeltabase'] = lazydeltabase
1104 options[b'lazydeltabase'] = lazydeltabase
1105
1105
1106 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1106 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1107 if 0 <= chainspan:
1107 if 0 <= chainspan:
1108 options[b'maxdeltachainspan'] = chainspan
1108 options[b'maxdeltachainspan'] = chainspan
1109
1109
1110 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1110 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1111 if mmapindexthreshold is not None:
1111 if mmapindexthreshold is not None:
1112 options[b'mmapindexthreshold'] = mmapindexthreshold
1112 options[b'mmapindexthreshold'] = mmapindexthreshold
1113
1113
1114 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1114 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1115 srdensitythres = float(
1115 srdensitythres = float(
1116 ui.config(b'experimental', b'sparse-read.density-threshold')
1116 ui.config(b'experimental', b'sparse-read.density-threshold')
1117 )
1117 )
1118 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1118 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1119 options[b'with-sparse-read'] = withsparseread
1119 options[b'with-sparse-read'] = withsparseread
1120 options[b'sparse-read-density-threshold'] = srdensitythres
1120 options[b'sparse-read-density-threshold'] = srdensitythres
1121 options[b'sparse-read-min-gap-size'] = srmingapsize
1121 options[b'sparse-read-min-gap-size'] = srmingapsize
1122
1122
1123 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1123 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1124 options[b'sparse-revlog'] = sparserevlog
1124 options[b'sparse-revlog'] = sparserevlog
1125 if sparserevlog:
1125 if sparserevlog:
1126 options[b'generaldelta'] = True
1126 options[b'generaldelta'] = True
1127
1127
1128 maxchainlen = None
1128 maxchainlen = None
1129 if sparserevlog:
1129 if sparserevlog:
1130 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1130 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1131 # experimental config: format.maxchainlen
1131 # experimental config: format.maxchainlen
1132 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1132 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1133 if maxchainlen is not None:
1133 if maxchainlen is not None:
1134 options[b'maxchainlen'] = maxchainlen
1134 options[b'maxchainlen'] = maxchainlen
1135
1135
1136 for r in requirements:
1136 for r in requirements:
1137 # we allow multiple compression engine requirement to co-exist because
1137 # we allow multiple compression engine requirement to co-exist because
1138 # strickly speaking, revlog seems to support mixed compression style.
1138 # strickly speaking, revlog seems to support mixed compression style.
1139 #
1139 #
1140 # The compression used for new entries will be "the last one"
1140 # The compression used for new entries will be "the last one"
1141 prefix = r.startswith
1141 prefix = r.startswith
1142 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1142 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1143 options[b'compengine'] = r.split(b'-', 2)[2]
1143 options[b'compengine'] = r.split(b'-', 2)[2]
1144
1144
1145 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1145 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1146 if options[b'zlib.level'] is not None:
1146 if options[b'zlib.level'] is not None:
1147 if not (0 <= options[b'zlib.level'] <= 9):
1147 if not (0 <= options[b'zlib.level'] <= 9):
1148 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1148 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1149 raise error.Abort(msg % options[b'zlib.level'])
1149 raise error.Abort(msg % options[b'zlib.level'])
1150 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1150 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1151 if options[b'zstd.level'] is not None:
1151 if options[b'zstd.level'] is not None:
1152 if not (0 <= options[b'zstd.level'] <= 22):
1152 if not (0 <= options[b'zstd.level'] <= 22):
1153 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1153 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1154 raise error.Abort(msg % options[b'zstd.level'])
1154 raise error.Abort(msg % options[b'zstd.level'])
1155
1155
1156 if requirementsmod.NARROW_REQUIREMENT in requirements:
1156 if requirementsmod.NARROW_REQUIREMENT in requirements:
1157 options[b'enableellipsis'] = True
1157 options[b'enableellipsis'] = True
1158
1158
1159 if ui.configbool(b'experimental', b'rust.index'):
1159 if ui.configbool(b'experimental', b'rust.index'):
1160 options[b'rust.index'] = True
1160 options[b'rust.index'] = True
1161 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1161 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1162 slow_path = ui.config(
1162 slow_path = ui.config(
1163 b'storage', b'revlog.persistent-nodemap.slow-path'
1163 b'storage', b'revlog.persistent-nodemap.slow-path'
1164 )
1164 )
1165 if slow_path not in (b'allow', b'warn', b'abort'):
1165 if slow_path not in (b'allow', b'warn', b'abort'):
1166 default = ui.config_default(
1166 default = ui.config_default(
1167 b'storage', b'revlog.persistent-nodemap.slow-path'
1167 b'storage', b'revlog.persistent-nodemap.slow-path'
1168 )
1168 )
1169 msg = _(
1169 msg = _(
1170 b'unknown value for config '
1170 b'unknown value for config '
1171 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1171 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1172 )
1172 )
1173 ui.warn(msg % slow_path)
1173 ui.warn(msg % slow_path)
1174 if not ui.quiet:
1174 if not ui.quiet:
1175 ui.warn(_(b'falling back to default value: %s\n') % default)
1175 ui.warn(_(b'falling back to default value: %s\n') % default)
1176 slow_path = default
1176 slow_path = default
1177
1177
1178 msg = _(
1178 msg = _(
1179 b"accessing `persistent-nodemap` repository without associated "
1179 b"accessing `persistent-nodemap` repository without associated "
1180 b"fast implementation."
1180 b"fast implementation."
1181 )
1181 )
1182 hint = _(
1182 hint = _(
1183 b"check `hg help config.format.use-persistent-nodemap` "
1183 b"check `hg help config.format.use-persistent-nodemap` "
1184 b"for details"
1184 b"for details"
1185 )
1185 )
1186 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1186 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1187 if slow_path == b'warn':
1187 if slow_path == b'warn':
1188 msg = b"warning: " + msg + b'\n'
1188 msg = b"warning: " + msg + b'\n'
1189 ui.warn(msg)
1189 ui.warn(msg)
1190 if not ui.quiet:
1190 if not ui.quiet:
1191 hint = b'(' + hint + b')\n'
1191 hint = b'(' + hint + b')\n'
1192 ui.warn(hint)
1192 ui.warn(hint)
1193 if slow_path == b'abort':
1193 if slow_path == b'abort':
1194 raise error.Abort(msg, hint=hint)
1194 raise error.Abort(msg, hint=hint)
1195 options[b'persistent-nodemap'] = True
1195 options[b'persistent-nodemap'] = True
1196 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1196 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1197 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1197 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1198 if slow_path not in (b'allow', b'warn', b'abort'):
1198 if slow_path not in (b'allow', b'warn', b'abort'):
1199 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1199 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1200 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1200 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1201 ui.warn(msg % slow_path)
1201 ui.warn(msg % slow_path)
1202 if not ui.quiet:
1202 if not ui.quiet:
1203 ui.warn(_(b'falling back to default value: %s\n') % default)
1203 ui.warn(_(b'falling back to default value: %s\n') % default)
1204 slow_path = default
1204 slow_path = default
1205
1205
1206 msg = _(
1206 msg = _(
1207 b"accessing `dirstate-v2` repository without associated "
1207 b"accessing `dirstate-v2` repository without associated "
1208 b"fast implementation."
1208 b"fast implementation."
1209 )
1209 )
1210 hint = _(
1210 hint = _(
1211 b"check `hg help config.format.use-dirstate-v2` " b"for details"
1211 b"check `hg help config.format.use-dirstate-v2` " b"for details"
1212 )
1212 )
1213 if not dirstate.HAS_FAST_DIRSTATE_V2:
1213 if not dirstate.HAS_FAST_DIRSTATE_V2:
1214 if slow_path == b'warn':
1214 if slow_path == b'warn':
1215 msg = b"warning: " + msg + b'\n'
1215 msg = b"warning: " + msg + b'\n'
1216 ui.warn(msg)
1216 ui.warn(msg)
1217 if not ui.quiet:
1217 if not ui.quiet:
1218 hint = b'(' + hint + b')\n'
1218 hint = b'(' + hint + b')\n'
1219 ui.warn(hint)
1219 ui.warn(hint)
1220 if slow_path == b'abort':
1220 if slow_path == b'abort':
1221 raise error.Abort(msg, hint=hint)
1221 raise error.Abort(msg, hint=hint)
1222 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1222 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1223 options[b'persistent-nodemap.mmap'] = True
1223 options[b'persistent-nodemap.mmap'] = True
1224 if ui.configbool(b'devel', b'persistent-nodemap'):
1224 if ui.configbool(b'devel', b'persistent-nodemap'):
1225 options[b'devel-force-nodemap'] = True
1225 options[b'devel-force-nodemap'] = True
1226
1226
1227 return options
1227 return options
1228
1228
1229
1229
1230 def makemain(**kwargs):
1230 def makemain(**kwargs):
1231 """Produce a type conforming to ``ilocalrepositorymain``."""
1231 """Produce a type conforming to ``ilocalrepositorymain``."""
1232 return localrepository
1232 return localrepository
1233
1233
1234
1234
1235 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1235 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1236 class revlogfilestorage:
1236 class revlogfilestorage:
1237 """File storage when using revlogs."""
1237 """File storage when using revlogs."""
1238
1238
1239 def file(self, path):
1239 def file(self, path):
1240 if path.startswith(b'/'):
1240 if path.startswith(b'/'):
1241 path = path[1:]
1241 path = path[1:]
1242
1242
1243 return filelog.filelog(self.svfs, path)
1243 return filelog.filelog(self.svfs, path)
1244
1244
1245
1245
1246 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1246 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1247 class revlognarrowfilestorage:
1247 class revlognarrowfilestorage:
1248 """File storage when using revlogs and narrow files."""
1248 """File storage when using revlogs and narrow files."""
1249
1249
1250 def file(self, path):
1250 def file(self, path):
1251 if path.startswith(b'/'):
1251 if path.startswith(b'/'):
1252 path = path[1:]
1252 path = path[1:]
1253
1253
1254 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1254 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1255
1255
1256
1256
1257 def makefilestorage(requirements, features, **kwargs):
1257 def makefilestorage(requirements, features, **kwargs):
1258 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1258 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1259 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1259 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1260 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1260 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1261
1261
1262 if requirementsmod.NARROW_REQUIREMENT in requirements:
1262 if requirementsmod.NARROW_REQUIREMENT in requirements:
1263 return revlognarrowfilestorage
1263 return revlognarrowfilestorage
1264 else:
1264 else:
1265 return revlogfilestorage
1265 return revlogfilestorage
1266
1266
1267
1267
1268 # List of repository interfaces and factory functions for them. Each
1268 # List of repository interfaces and factory functions for them. Each
1269 # will be called in order during ``makelocalrepository()`` to iteratively
1269 # will be called in order during ``makelocalrepository()`` to iteratively
1270 # derive the final type for a local repository instance. We capture the
1270 # derive the final type for a local repository instance. We capture the
1271 # function as a lambda so we don't hold a reference and the module-level
1271 # function as a lambda so we don't hold a reference and the module-level
1272 # functions can be wrapped.
1272 # functions can be wrapped.
1273 REPO_INTERFACES = [
1273 REPO_INTERFACES = [
1274 (repository.ilocalrepositorymain, lambda: makemain),
1274 (repository.ilocalrepositorymain, lambda: makemain),
1275 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1275 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1276 ]
1276 ]
1277
1277
1278
1278
1279 @interfaceutil.implementer(repository.ilocalrepositorymain)
1279 @interfaceutil.implementer(repository.ilocalrepositorymain)
1280 class localrepository:
1280 class localrepository:
1281 """Main class for representing local repositories.
1281 """Main class for representing local repositories.
1282
1282
1283 All local repositories are instances of this class.
1283 All local repositories are instances of this class.
1284
1284
1285 Constructed on its own, instances of this class are not usable as
1285 Constructed on its own, instances of this class are not usable as
1286 repository objects. To obtain a usable repository object, call
1286 repository objects. To obtain a usable repository object, call
1287 ``hg.repository()``, ``localrepo.instance()``, or
1287 ``hg.repository()``, ``localrepo.instance()``, or
1288 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1288 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1289 ``instance()`` adds support for creating new repositories.
1289 ``instance()`` adds support for creating new repositories.
1290 ``hg.repository()`` adds more extension integration, including calling
1290 ``hg.repository()`` adds more extension integration, including calling
1291 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1291 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1292 used.
1292 used.
1293 """
1293 """
1294
1294
1295 _basesupported = {
1295 _basesupported = {
1296 requirementsmod.ARCHIVED_PHASE_REQUIREMENT,
1296 requirementsmod.ARCHIVED_PHASE_REQUIREMENT,
1297 requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT,
1297 requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT,
1298 requirementsmod.CHANGELOGV2_REQUIREMENT,
1298 requirementsmod.CHANGELOGV2_REQUIREMENT,
1299 requirementsmod.COPIESSDC_REQUIREMENT,
1299 requirementsmod.COPIESSDC_REQUIREMENT,
1300 requirementsmod.DIRSTATE_TRACKED_HINT_V1,
1300 requirementsmod.DIRSTATE_TRACKED_HINT_V1,
1301 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1301 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1302 requirementsmod.DOTENCODE_REQUIREMENT,
1302 requirementsmod.DOTENCODE_REQUIREMENT,
1303 requirementsmod.FNCACHE_REQUIREMENT,
1303 requirementsmod.FNCACHE_REQUIREMENT,
1304 requirementsmod.GENERALDELTA_REQUIREMENT,
1304 requirementsmod.GENERALDELTA_REQUIREMENT,
1305 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1305 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1306 requirementsmod.NODEMAP_REQUIREMENT,
1306 requirementsmod.NODEMAP_REQUIREMENT,
1307 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1307 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1308 requirementsmod.REVLOGV1_REQUIREMENT,
1308 requirementsmod.REVLOGV1_REQUIREMENT,
1309 requirementsmod.REVLOGV2_REQUIREMENT,
1309 requirementsmod.REVLOGV2_REQUIREMENT,
1310 requirementsmod.SHARED_REQUIREMENT,
1310 requirementsmod.SHARED_REQUIREMENT,
1311 requirementsmod.SHARESAFE_REQUIREMENT,
1311 requirementsmod.SHARESAFE_REQUIREMENT,
1312 requirementsmod.SPARSE_REQUIREMENT,
1312 requirementsmod.SPARSE_REQUIREMENT,
1313 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1313 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1314 requirementsmod.STORE_REQUIREMENT,
1314 requirementsmod.STORE_REQUIREMENT,
1315 requirementsmod.TREEMANIFEST_REQUIREMENT,
1315 requirementsmod.TREEMANIFEST_REQUIREMENT,
1316 }
1316 }
1317
1317
1318 # list of prefix for file which can be written without 'wlock'
1318 # list of prefix for file which can be written without 'wlock'
1319 # Extensions should extend this list when needed
1319 # Extensions should extend this list when needed
1320 _wlockfreeprefix = {
1320 _wlockfreeprefix = {
1321 # We migh consider requiring 'wlock' for the next
1321 # We migh consider requiring 'wlock' for the next
1322 # two, but pretty much all the existing code assume
1322 # two, but pretty much all the existing code assume
1323 # wlock is not needed so we keep them excluded for
1323 # wlock is not needed so we keep them excluded for
1324 # now.
1324 # now.
1325 b'hgrc',
1325 b'hgrc',
1326 b'requires',
1326 b'requires',
1327 # XXX cache is a complicatged business someone
1327 # XXX cache is a complicatged business someone
1328 # should investigate this in depth at some point
1328 # should investigate this in depth at some point
1329 b'cache/',
1329 b'cache/',
1330 # XXX bisect was still a bit too messy at the time
1330 # XXX bisect was still a bit too messy at the time
1331 # this changeset was introduced. Someone should fix
1331 # this changeset was introduced. Someone should fix
1332 # the remainig bit and drop this line
1332 # the remainig bit and drop this line
1333 b'bisect.state',
1333 b'bisect.state',
1334 }
1334 }
1335
1335
1336 def __init__(
1336 def __init__(
1337 self,
1337 self,
1338 baseui,
1338 baseui,
1339 ui,
1339 ui,
1340 origroot: bytes,
1340 origroot: bytes,
1341 wdirvfs: vfsmod.vfs,
1341 wdirvfs: vfsmod.vfs,
1342 hgvfs: vfsmod.vfs,
1342 hgvfs: vfsmod.vfs,
1343 requirements,
1343 requirements,
1344 supportedrequirements,
1344 supportedrequirements,
1345 sharedpath: bytes,
1345 sharedpath: bytes,
1346 store,
1346 store,
1347 cachevfs: vfsmod.vfs,
1347 cachevfs: vfsmod.vfs,
1348 wcachevfs: vfsmod.vfs,
1348 wcachevfs: vfsmod.vfs,
1349 features,
1349 features,
1350 intents=None,
1350 intents=None,
1351 ):
1351 ):
1352 """Create a new local repository instance.
1352 """Create a new local repository instance.
1353
1353
1354 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1354 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1355 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1355 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1356 object.
1356 object.
1357
1357
1358 Arguments:
1358 Arguments:
1359
1359
1360 baseui
1360 baseui
1361 ``ui.ui`` instance that ``ui`` argument was based off of.
1361 ``ui.ui`` instance that ``ui`` argument was based off of.
1362
1362
1363 ui
1363 ui
1364 ``ui.ui`` instance for use by the repository.
1364 ``ui.ui`` instance for use by the repository.
1365
1365
1366 origroot
1366 origroot
1367 ``bytes`` path to working directory root of this repository.
1367 ``bytes`` path to working directory root of this repository.
1368
1368
1369 wdirvfs
1369 wdirvfs
1370 ``vfs.vfs`` rooted at the working directory.
1370 ``vfs.vfs`` rooted at the working directory.
1371
1371
1372 hgvfs
1372 hgvfs
1373 ``vfs.vfs`` rooted at .hg/
1373 ``vfs.vfs`` rooted at .hg/
1374
1374
1375 requirements
1375 requirements
1376 ``set`` of bytestrings representing repository opening requirements.
1376 ``set`` of bytestrings representing repository opening requirements.
1377
1377
1378 supportedrequirements
1378 supportedrequirements
1379 ``set`` of bytestrings representing repository requirements that we
1379 ``set`` of bytestrings representing repository requirements that we
1380 know how to open. May be a supetset of ``requirements``.
1380 know how to open. May be a supetset of ``requirements``.
1381
1381
1382 sharedpath
1382 sharedpath
1383 ``bytes`` Defining path to storage base directory. Points to a
1383 ``bytes`` Defining path to storage base directory. Points to a
1384 ``.hg/`` directory somewhere.
1384 ``.hg/`` directory somewhere.
1385
1385
1386 store
1386 store
1387 ``store.basicstore`` (or derived) instance providing access to
1387 ``store.basicstore`` (or derived) instance providing access to
1388 versioned storage.
1388 versioned storage.
1389
1389
1390 cachevfs
1390 cachevfs
1391 ``vfs.vfs`` used for cache files.
1391 ``vfs.vfs`` used for cache files.
1392
1392
1393 wcachevfs
1393 wcachevfs
1394 ``vfs.vfs`` used for cache files related to the working copy.
1394 ``vfs.vfs`` used for cache files related to the working copy.
1395
1395
1396 features
1396 features
1397 ``set`` of bytestrings defining features/capabilities of this
1397 ``set`` of bytestrings defining features/capabilities of this
1398 instance.
1398 instance.
1399
1399
1400 intents
1400 intents
1401 ``set`` of system strings indicating what this repo will be used
1401 ``set`` of system strings indicating what this repo will be used
1402 for.
1402 for.
1403 """
1403 """
1404 self.baseui = baseui
1404 self.baseui = baseui
1405 self.ui = ui
1405 self.ui = ui
1406 self.origroot = origroot
1406 self.origroot = origroot
1407 # vfs rooted at working directory.
1407 # vfs rooted at working directory.
1408 self.wvfs = wdirvfs
1408 self.wvfs = wdirvfs
1409 self.root = wdirvfs.base
1409 self.root = wdirvfs.base
1410 # vfs rooted at .hg/. Used to access most non-store paths.
1410 # vfs rooted at .hg/. Used to access most non-store paths.
1411 self.vfs = hgvfs
1411 self.vfs = hgvfs
1412 self.path = hgvfs.base
1412 self.path = hgvfs.base
1413 self.requirements = requirements
1413 self.requirements = requirements
1414 self.nodeconstants = sha1nodeconstants
1414 self.nodeconstants = sha1nodeconstants
1415 self.nullid = self.nodeconstants.nullid
1415 self.nullid = self.nodeconstants.nullid
1416 self.supported = supportedrequirements
1416 self.supported = supportedrequirements
1417 self.sharedpath = sharedpath
1417 self.sharedpath = sharedpath
1418 self.store = store
1418 self.store = store
1419 self.cachevfs = cachevfs
1419 self.cachevfs = cachevfs
1420 self.wcachevfs = wcachevfs
1420 self.wcachevfs = wcachevfs
1421 self.features = features
1421 self.features = features
1422
1422
1423 self.filtername = None
1423 self.filtername = None
1424
1424
1425 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1425 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1426 b'devel', b'check-locks'
1426 b'devel', b'check-locks'
1427 ):
1427 ):
1428 self.vfs.audit = self._getvfsward(self.vfs.audit)
1428 self.vfs.audit = self._getvfsward(self.vfs.audit)
1429 # A list of callback to shape the phase if no data were found.
1429 # A list of callback to shape the phase if no data were found.
1430 # Callback are in the form: func(repo, roots) --> processed root.
1430 # Callback are in the form: func(repo, roots) --> processed root.
1431 # This list it to be filled by extension during repo setup
1431 # This list it to be filled by extension during repo setup
1432 self._phasedefaults = []
1432 self._phasedefaults = []
1433
1433
1434 color.setup(self.ui)
1434 color.setup(self.ui)
1435
1435
1436 self.spath = self.store.path
1436 self.spath = self.store.path
1437 self.svfs = self.store.vfs
1437 self.svfs = self.store.vfs
1438 self.sjoin = self.store.join
1438 self.sjoin = self.store.join
1439 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1439 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1440 b'devel', b'check-locks'
1440 b'devel', b'check-locks'
1441 ):
1441 ):
1442 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1442 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1443 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1443 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1444 else: # standard vfs
1444 else: # standard vfs
1445 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1445 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1446
1446
1447 self._dirstatevalidatewarned = False
1447 self._dirstatevalidatewarned = False
1448
1448
1449 self._branchcaches = branchmap.BranchMapCache()
1449 self._branchcaches = branchmap.BranchMapCache()
1450 self._revbranchcache = None
1450 self._revbranchcache = None
1451 self._filterpats = {}
1451 self._filterpats = {}
1452 self._datafilters = {}
1452 self._datafilters = {}
1453 self._transref = self._lockref = self._wlockref = None
1453 self._transref = self._lockref = self._wlockref = None
1454
1454
1455 # A cache for various files under .hg/ that tracks file changes,
1455 # A cache for various files under .hg/ that tracks file changes,
1456 # (used by the filecache decorator)
1456 # (used by the filecache decorator)
1457 #
1457 #
1458 # Maps a property name to its util.filecacheentry
1458 # Maps a property name to its util.filecacheentry
1459 self._filecache = {}
1459 self._filecache = {}
1460
1460
1461 # hold sets of revision to be filtered
1461 # hold sets of revision to be filtered
1462 # should be cleared when something might have changed the filter value:
1462 # should be cleared when something might have changed the filter value:
1463 # - new changesets,
1463 # - new changesets,
1464 # - phase change,
1464 # - phase change,
1465 # - new obsolescence marker,
1465 # - new obsolescence marker,
1466 # - working directory parent change,
1466 # - working directory parent change,
1467 # - bookmark changes
1467 # - bookmark changes
1468 self.filteredrevcache = {}
1468 self.filteredrevcache = {}
1469
1469
1470 self._dirstate = None
1470 self._dirstate = None
1471 # post-dirstate-status hooks
1471 # post-dirstate-status hooks
1472 self._postdsstatus = []
1472 self._postdsstatus = []
1473
1473
1474 self._pending_narrow_pats = None
1474 self._pending_narrow_pats = None
1475 self._pending_narrow_pats_dirstate = None
1475 self._pending_narrow_pats_dirstate = None
1476
1476
1477 # generic mapping between names and nodes
1477 # generic mapping between names and nodes
1478 self.names = namespaces.namespaces()
1478 self.names = namespaces.namespaces()
1479
1479
1480 # Key to signature value.
1480 # Key to signature value.
1481 self._sparsesignaturecache = {}
1481 self._sparsesignaturecache = {}
1482 # Signature to cached matcher instance.
1482 # Signature to cached matcher instance.
1483 self._sparsematchercache = {}
1483 self._sparsematchercache = {}
1484
1484
1485 self._extrafilterid = repoview.extrafilter(ui)
1485 self._extrafilterid = repoview.extrafilter(ui)
1486
1486
1487 self.filecopiesmode = None
1487 self.filecopiesmode = None
1488 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1488 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1489 self.filecopiesmode = b'changeset-sidedata'
1489 self.filecopiesmode = b'changeset-sidedata'
1490
1490
1491 self._wanted_sidedata = set()
1491 self._wanted_sidedata = set()
1492 self._sidedata_computers = {}
1492 self._sidedata_computers = {}
1493 sidedatamod.set_sidedata_spec_for_repo(self)
1493 sidedatamod.set_sidedata_spec_for_repo(self)
1494
1494
1495 def _getvfsward(self, origfunc):
1495 def _getvfsward(self, origfunc):
1496 """build a ward for self.vfs"""
1496 """build a ward for self.vfs"""
1497 rref = weakref.ref(self)
1497 rref = weakref.ref(self)
1498
1498
1499 def checkvfs(path, mode=None):
1499 def checkvfs(path, mode=None):
1500 ret = origfunc(path, mode=mode)
1500 ret = origfunc(path, mode=mode)
1501 repo = rref()
1501 repo = rref()
1502 if (
1502 if (
1503 repo is None
1503 repo is None
1504 or not util.safehasattr(repo, b'_wlockref')
1504 or not util.safehasattr(repo, b'_wlockref')
1505 or not util.safehasattr(repo, b'_lockref')
1505 or not util.safehasattr(repo, b'_lockref')
1506 ):
1506 ):
1507 return
1507 return
1508 if mode in (None, b'r', b'rb'):
1508 if mode in (None, b'r', b'rb'):
1509 return
1509 return
1510 if path.startswith(repo.path):
1510 if path.startswith(repo.path):
1511 # truncate name relative to the repository (.hg)
1511 # truncate name relative to the repository (.hg)
1512 path = path[len(repo.path) + 1 :]
1512 path = path[len(repo.path) + 1 :]
1513 if path.startswith(b'cache/'):
1513 if path.startswith(b'cache/'):
1514 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1514 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1515 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1515 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1516 # path prefixes covered by 'lock'
1516 # path prefixes covered by 'lock'
1517 vfs_path_prefixes = (
1517 vfs_path_prefixes = (
1518 b'journal.',
1518 b'journal.',
1519 b'undo.',
1519 b'undo.',
1520 b'strip-backup/',
1520 b'strip-backup/',
1521 b'cache/',
1521 b'cache/',
1522 )
1522 )
1523 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1523 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1524 if repo._currentlock(repo._lockref) is None:
1524 if repo._currentlock(repo._lockref) is None:
1525 repo.ui.develwarn(
1525 repo.ui.develwarn(
1526 b'write with no lock: "%s"' % path,
1526 b'write with no lock: "%s"' % path,
1527 stacklevel=3,
1527 stacklevel=3,
1528 config=b'check-locks',
1528 config=b'check-locks',
1529 )
1529 )
1530 elif repo._currentlock(repo._wlockref) is None:
1530 elif repo._currentlock(repo._wlockref) is None:
1531 # rest of vfs files are covered by 'wlock'
1531 # rest of vfs files are covered by 'wlock'
1532 #
1532 #
1533 # exclude special files
1533 # exclude special files
1534 for prefix in self._wlockfreeprefix:
1534 for prefix in self._wlockfreeprefix:
1535 if path.startswith(prefix):
1535 if path.startswith(prefix):
1536 return
1536 return
1537 repo.ui.develwarn(
1537 repo.ui.develwarn(
1538 b'write with no wlock: "%s"' % path,
1538 b'write with no wlock: "%s"' % path,
1539 stacklevel=3,
1539 stacklevel=3,
1540 config=b'check-locks',
1540 config=b'check-locks',
1541 )
1541 )
1542 return ret
1542 return ret
1543
1543
1544 return checkvfs
1544 return checkvfs
1545
1545
1546 def _getsvfsward(self, origfunc):
1546 def _getsvfsward(self, origfunc):
1547 """build a ward for self.svfs"""
1547 """build a ward for self.svfs"""
1548 rref = weakref.ref(self)
1548 rref = weakref.ref(self)
1549
1549
1550 def checksvfs(path, mode=None):
1550 def checksvfs(path, mode=None):
1551 ret = origfunc(path, mode=mode)
1551 ret = origfunc(path, mode=mode)
1552 repo = rref()
1552 repo = rref()
1553 if repo is None or not util.safehasattr(repo, b'_lockref'):
1553 if repo is None or not util.safehasattr(repo, b'_lockref'):
1554 return
1554 return
1555 if mode in (None, b'r', b'rb'):
1555 if mode in (None, b'r', b'rb'):
1556 return
1556 return
1557 if path.startswith(repo.sharedpath):
1557 if path.startswith(repo.sharedpath):
1558 # truncate name relative to the repository (.hg)
1558 # truncate name relative to the repository (.hg)
1559 path = path[len(repo.sharedpath) + 1 :]
1559 path = path[len(repo.sharedpath) + 1 :]
1560 if repo._currentlock(repo._lockref) is None:
1560 if repo._currentlock(repo._lockref) is None:
1561 repo.ui.develwarn(
1561 repo.ui.develwarn(
1562 b'write with no lock: "%s"' % path, stacklevel=4
1562 b'write with no lock: "%s"' % path, stacklevel=4
1563 )
1563 )
1564 return ret
1564 return ret
1565
1565
1566 return checksvfs
1566 return checksvfs
1567
1567
1568 def close(self):
1568 def close(self):
1569 self._writecaches()
1569 self._writecaches()
1570
1570
1571 def _writecaches(self):
1571 def _writecaches(self):
1572 if self._revbranchcache:
1572 if self._revbranchcache:
1573 self._revbranchcache.write()
1573 self._revbranchcache.write()
1574
1574
1575 def _restrictcapabilities(self, caps):
1575 def _restrictcapabilities(self, caps):
1576 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1576 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1577 caps = set(caps)
1577 caps = set(caps)
1578 capsblob = bundle2.encodecaps(
1578 capsblob = bundle2.encodecaps(
1579 bundle2.getrepocaps(self, role=b'client')
1579 bundle2.getrepocaps(self, role=b'client')
1580 )
1580 )
1581 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1581 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1582 if self.ui.configbool(b'experimental', b'narrow'):
1582 if self.ui.configbool(b'experimental', b'narrow'):
1583 caps.add(wireprototypes.NARROWCAP)
1583 caps.add(wireprototypes.NARROWCAP)
1584 return caps
1584 return caps
1585
1585
1586 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1586 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1587 # self -> auditor -> self._checknested -> self
1587 # self -> auditor -> self._checknested -> self
1588
1588
1589 @property
1589 @property
1590 def auditor(self):
1590 def auditor(self):
1591 # This is only used by context.workingctx.match in order to
1591 # This is only used by context.workingctx.match in order to
1592 # detect files in subrepos.
1592 # detect files in subrepos.
1593 return pathutil.pathauditor(self.root, callback=self._checknested)
1593 return pathutil.pathauditor(self.root, callback=self._checknested)
1594
1594
1595 @property
1595 @property
1596 def nofsauditor(self):
1596 def nofsauditor(self):
1597 # This is only used by context.basectx.match in order to detect
1597 # This is only used by context.basectx.match in order to detect
1598 # files in subrepos.
1598 # files in subrepos.
1599 return pathutil.pathauditor(
1599 return pathutil.pathauditor(
1600 self.root, callback=self._checknested, realfs=False, cached=True
1600 self.root, callback=self._checknested, realfs=False, cached=True
1601 )
1601 )
1602
1602
1603 def _checknested(self, path):
1603 def _checknested(self, path):
1604 """Determine if path is a legal nested repository."""
1604 """Determine if path is a legal nested repository."""
1605 if not path.startswith(self.root):
1605 if not path.startswith(self.root):
1606 return False
1606 return False
1607 subpath = path[len(self.root) + 1 :]
1607 subpath = path[len(self.root) + 1 :]
1608 normsubpath = util.pconvert(subpath)
1608 normsubpath = util.pconvert(subpath)
1609
1609
1610 # XXX: Checking against the current working copy is wrong in
1610 # XXX: Checking against the current working copy is wrong in
1611 # the sense that it can reject things like
1611 # the sense that it can reject things like
1612 #
1612 #
1613 # $ hg cat -r 10 sub/x.txt
1613 # $ hg cat -r 10 sub/x.txt
1614 #
1614 #
1615 # if sub/ is no longer a subrepository in the working copy
1615 # if sub/ is no longer a subrepository in the working copy
1616 # parent revision.
1616 # parent revision.
1617 #
1617 #
1618 # However, it can of course also allow things that would have
1618 # However, it can of course also allow things that would have
1619 # been rejected before, such as the above cat command if sub/
1619 # been rejected before, such as the above cat command if sub/
1620 # is a subrepository now, but was a normal directory before.
1620 # is a subrepository now, but was a normal directory before.
1621 # The old path auditor would have rejected by mistake since it
1621 # The old path auditor would have rejected by mistake since it
1622 # panics when it sees sub/.hg/.
1622 # panics when it sees sub/.hg/.
1623 #
1623 #
1624 # All in all, checking against the working copy seems sensible
1624 # All in all, checking against the working copy seems sensible
1625 # since we want to prevent access to nested repositories on
1625 # since we want to prevent access to nested repositories on
1626 # the filesystem *now*.
1626 # the filesystem *now*.
1627 ctx = self[None]
1627 ctx = self[None]
1628 parts = util.splitpath(subpath)
1628 parts = util.splitpath(subpath)
1629 while parts:
1629 while parts:
1630 prefix = b'/'.join(parts)
1630 prefix = b'/'.join(parts)
1631 if prefix in ctx.substate:
1631 if prefix in ctx.substate:
1632 if prefix == normsubpath:
1632 if prefix == normsubpath:
1633 return True
1633 return True
1634 else:
1634 else:
1635 sub = ctx.sub(prefix)
1635 sub = ctx.sub(prefix)
1636 return sub.checknested(subpath[len(prefix) + 1 :])
1636 return sub.checknested(subpath[len(prefix) + 1 :])
1637 else:
1637 else:
1638 parts.pop()
1638 parts.pop()
1639 return False
1639 return False
1640
1640
1641 def peer(self, path=None):
1641 def peer(self, path=None):
1642 return localpeer(self, path=path) # not cached to avoid reference cycle
1642 return localpeer(self, path=path) # not cached to avoid reference cycle
1643
1643
1644 def unfiltered(self):
1644 def unfiltered(self):
1645 """Return unfiltered version of the repository
1645 """Return unfiltered version of the repository
1646
1646
1647 Intended to be overwritten by filtered repo."""
1647 Intended to be overwritten by filtered repo."""
1648 return self
1648 return self
1649
1649
1650 def filtered(self, name, visibilityexceptions=None):
1650 def filtered(self, name, visibilityexceptions=None):
1651 """Return a filtered version of a repository
1651 """Return a filtered version of a repository
1652
1652
1653 The `name` parameter is the identifier of the requested view. This
1653 The `name` parameter is the identifier of the requested view. This
1654 will return a repoview object set "exactly" to the specified view.
1654 will return a repoview object set "exactly" to the specified view.
1655
1655
1656 This function does not apply recursive filtering to a repository. For
1656 This function does not apply recursive filtering to a repository. For
1657 example calling `repo.filtered("served")` will return a repoview using
1657 example calling `repo.filtered("served")` will return a repoview using
1658 the "served" view, regardless of the initial view used by `repo`.
1658 the "served" view, regardless of the initial view used by `repo`.
1659
1659
1660 In other word, there is always only one level of `repoview` "filtering".
1660 In other word, there is always only one level of `repoview` "filtering".
1661 """
1661 """
1662 if self._extrafilterid is not None and b'%' not in name:
1662 if self._extrafilterid is not None and b'%' not in name:
1663 name = name + b'%' + self._extrafilterid
1663 name = name + b'%' + self._extrafilterid
1664
1664
1665 cls = repoview.newtype(self.unfiltered().__class__)
1665 cls = repoview.newtype(self.unfiltered().__class__)
1666 return cls(self, name, visibilityexceptions)
1666 return cls(self, name, visibilityexceptions)
1667
1667
1668 @mixedrepostorecache(
1668 @mixedrepostorecache(
1669 (b'bookmarks', b'plain'),
1669 (b'bookmarks', b'plain'),
1670 (b'bookmarks.current', b'plain'),
1670 (b'bookmarks.current', b'plain'),
1671 (b'bookmarks', b''),
1671 (b'bookmarks', b''),
1672 (b'00changelog.i', b''),
1672 (b'00changelog.i', b''),
1673 )
1673 )
1674 def _bookmarks(self):
1674 def _bookmarks(self):
1675 # Since the multiple files involved in the transaction cannot be
1675 # Since the multiple files involved in the transaction cannot be
1676 # written atomically (with current repository format), there is a race
1676 # written atomically (with current repository format), there is a race
1677 # condition here.
1677 # condition here.
1678 #
1678 #
1679 # 1) changelog content A is read
1679 # 1) changelog content A is read
1680 # 2) outside transaction update changelog to content B
1680 # 2) outside transaction update changelog to content B
1681 # 3) outside transaction update bookmark file referring to content B
1681 # 3) outside transaction update bookmark file referring to content B
1682 # 4) bookmarks file content is read and filtered against changelog-A
1682 # 4) bookmarks file content is read and filtered against changelog-A
1683 #
1683 #
1684 # When this happens, bookmarks against nodes missing from A are dropped.
1684 # When this happens, bookmarks against nodes missing from A are dropped.
1685 #
1685 #
1686 # Having this happening during read is not great, but it become worse
1686 # Having this happening during read is not great, but it become worse
1687 # when this happen during write because the bookmarks to the "unknown"
1687 # when this happen during write because the bookmarks to the "unknown"
1688 # nodes will be dropped for good. However, writes happen within locks.
1688 # nodes will be dropped for good. However, writes happen within locks.
1689 # This locking makes it possible to have a race free consistent read.
1689 # This locking makes it possible to have a race free consistent read.
1690 # For this purpose data read from disc before locking are
1690 # For this purpose data read from disc before locking are
1691 # "invalidated" right after the locks are taken. This invalidations are
1691 # "invalidated" right after the locks are taken. This invalidations are
1692 # "light", the `filecache` mechanism keep the data in memory and will
1692 # "light", the `filecache` mechanism keep the data in memory and will
1693 # reuse them if the underlying files did not changed. Not parsing the
1693 # reuse them if the underlying files did not changed. Not parsing the
1694 # same data multiple times helps performances.
1694 # same data multiple times helps performances.
1695 #
1695 #
1696 # Unfortunately in the case describe above, the files tracked by the
1696 # Unfortunately in the case describe above, the files tracked by the
1697 # bookmarks file cache might not have changed, but the in-memory
1697 # bookmarks file cache might not have changed, but the in-memory
1698 # content is still "wrong" because we used an older changelog content
1698 # content is still "wrong" because we used an older changelog content
1699 # to process the on-disk data. So after locking, the changelog would be
1699 # to process the on-disk data. So after locking, the changelog would be
1700 # refreshed but `_bookmarks` would be preserved.
1700 # refreshed but `_bookmarks` would be preserved.
1701 # Adding `00changelog.i` to the list of tracked file is not
1701 # Adding `00changelog.i` to the list of tracked file is not
1702 # enough, because at the time we build the content for `_bookmarks` in
1702 # enough, because at the time we build the content for `_bookmarks` in
1703 # (4), the changelog file has already diverged from the content used
1703 # (4), the changelog file has already diverged from the content used
1704 # for loading `changelog` in (1)
1704 # for loading `changelog` in (1)
1705 #
1705 #
1706 # To prevent the issue, we force the changelog to be explicitly
1706 # To prevent the issue, we force the changelog to be explicitly
1707 # reloaded while computing `_bookmarks`. The data race can still happen
1707 # reloaded while computing `_bookmarks`. The data race can still happen
1708 # without the lock (with a narrower window), but it would no longer go
1708 # without the lock (with a narrower window), but it would no longer go
1709 # undetected during the lock time refresh.
1709 # undetected during the lock time refresh.
1710 #
1710 #
1711 # The new schedule is as follow
1711 # The new schedule is as follow
1712 #
1712 #
1713 # 1) filecache logic detect that `_bookmarks` needs to be computed
1713 # 1) filecache logic detect that `_bookmarks` needs to be computed
1714 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1714 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1715 # 3) We force `changelog` filecache to be tested
1715 # 3) We force `changelog` filecache to be tested
1716 # 4) cachestat for `changelog` are captured (for changelog)
1716 # 4) cachestat for `changelog` are captured (for changelog)
1717 # 5) `_bookmarks` is computed and cached
1717 # 5) `_bookmarks` is computed and cached
1718 #
1718 #
1719 # The step in (3) ensure we have a changelog at least as recent as the
1719 # The step in (3) ensure we have a changelog at least as recent as the
1720 # cache stat computed in (1). As a result at locking time:
1720 # cache stat computed in (1). As a result at locking time:
1721 # * if the changelog did not changed since (1) -> we can reuse the data
1721 # * if the changelog did not changed since (1) -> we can reuse the data
1722 # * otherwise -> the bookmarks get refreshed.
1722 # * otherwise -> the bookmarks get refreshed.
1723 self._refreshchangelog()
1723 self._refreshchangelog()
1724 return bookmarks.bmstore(self)
1724 return bookmarks.bmstore(self)
1725
1725
1726 def _refreshchangelog(self):
1726 def _refreshchangelog(self):
1727 """make sure the in memory changelog match the on-disk one"""
1727 """make sure the in memory changelog match the on-disk one"""
1728 if 'changelog' in vars(self) and self.currenttransaction() is None:
1728 if 'changelog' in vars(self) and self.currenttransaction() is None:
1729 del self.changelog
1729 del self.changelog
1730
1730
1731 @property
1731 @property
1732 def _activebookmark(self):
1732 def _activebookmark(self):
1733 return self._bookmarks.active
1733 return self._bookmarks.active
1734
1734
1735 # _phasesets depend on changelog. what we need is to call
1735 # _phasesets depend on changelog. what we need is to call
1736 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1736 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1737 # can't be easily expressed in filecache mechanism.
1737 # can't be easily expressed in filecache mechanism.
1738 @storecache(b'phaseroots', b'00changelog.i')
1738 @storecache(b'phaseroots', b'00changelog.i')
1739 def _phasecache(self):
1739 def _phasecache(self):
1740 return phases.phasecache(self, self._phasedefaults)
1740 return phases.phasecache(self, self._phasedefaults)
1741
1741
1742 @storecache(b'obsstore')
1742 @storecache(b'obsstore')
1743 def obsstore(self):
1743 def obsstore(self):
1744 return obsolete.makestore(self.ui, self)
1744 return obsolete.makestore(self.ui, self)
1745
1745
1746 @changelogcache()
1746 @changelogcache()
1747 def changelog(repo):
1747 def changelog(repo):
1748 # load dirstate before changelog to avoid race see issue6303
1748 # load dirstate before changelog to avoid race see issue6303
1749 repo.dirstate.prefetch_parents()
1749 repo.dirstate.prefetch_parents()
1750 return repo.store.changelog(
1750 return repo.store.changelog(
1751 txnutil.mayhavepending(repo.root),
1751 txnutil.mayhavepending(repo.root),
1752 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1752 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1753 )
1753 )
1754
1754
1755 @manifestlogcache()
1755 @manifestlogcache()
1756 def manifestlog(self):
1756 def manifestlog(self):
1757 return self.store.manifestlog(self, self._storenarrowmatch)
1757 return self.store.manifestlog(self, self._storenarrowmatch)
1758
1758
1759 @unfilteredpropertycache
1759 @unfilteredpropertycache
1760 def dirstate(self):
1760 def dirstate(self):
1761 if self._dirstate is None:
1761 if self._dirstate is None:
1762 self._dirstate = self._makedirstate()
1762 self._dirstate = self._makedirstate()
1763 else:
1763 else:
1764 self._dirstate.refresh()
1764 self._dirstate.refresh()
1765 return self._dirstate
1765 return self._dirstate
1766
1766
1767 def _makedirstate(self):
1767 def _makedirstate(self):
1768 """Extension point for wrapping the dirstate per-repo."""
1768 """Extension point for wrapping the dirstate per-repo."""
1769 sparsematchfn = None
1769 sparsematchfn = None
1770 if sparse.use_sparse(self):
1770 if sparse.use_sparse(self):
1771 sparsematchfn = lambda: sparse.matcher(self)
1771 sparsematchfn = lambda: sparse.matcher(self)
1772 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1772 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1773 th = requirementsmod.DIRSTATE_TRACKED_HINT_V1
1773 th = requirementsmod.DIRSTATE_TRACKED_HINT_V1
1774 use_dirstate_v2 = v2_req in self.requirements
1774 use_dirstate_v2 = v2_req in self.requirements
1775 use_tracked_hint = th in self.requirements
1775 use_tracked_hint = th in self.requirements
1776
1776
1777 return dirstate.dirstate(
1777 return dirstate.dirstate(
1778 self.vfs,
1778 self.vfs,
1779 self.ui,
1779 self.ui,
1780 self.root,
1780 self.root,
1781 self._dirstatevalidate,
1781 self._dirstatevalidate,
1782 sparsematchfn,
1782 sparsematchfn,
1783 self.nodeconstants,
1783 self.nodeconstants,
1784 use_dirstate_v2,
1784 use_dirstate_v2,
1785 use_tracked_hint=use_tracked_hint,
1785 use_tracked_hint=use_tracked_hint,
1786 )
1786 )
1787
1787
1788 def _dirstatevalidate(self, node):
1788 def _dirstatevalidate(self, node):
1789 try:
1789 try:
1790 self.changelog.rev(node)
1790 self.changelog.rev(node)
1791 return node
1791 return node
1792 except error.LookupError:
1792 except error.LookupError:
1793 if not self._dirstatevalidatewarned:
1793 if not self._dirstatevalidatewarned:
1794 self._dirstatevalidatewarned = True
1794 self._dirstatevalidatewarned = True
1795 self.ui.warn(
1795 self.ui.warn(
1796 _(b"warning: ignoring unknown working parent %s!\n")
1796 _(b"warning: ignoring unknown working parent %s!\n")
1797 % short(node)
1797 % short(node)
1798 )
1798 )
1799 return self.nullid
1799 return self.nullid
1800
1800
1801 @storecache(narrowspec.FILENAME)
1801 @storecache(narrowspec.FILENAME)
1802 def narrowpats(self):
1802 def narrowpats(self):
1803 """matcher patterns for this repository's narrowspec
1803 """matcher patterns for this repository's narrowspec
1804
1804
1805 A tuple of (includes, excludes).
1805 A tuple of (includes, excludes).
1806 """
1806 """
1807 # the narrow management should probably move into its own object
1807 # the narrow management should probably move into its own object
1808 val = self._pending_narrow_pats
1808 val = self._pending_narrow_pats
1809 if val is None:
1809 if val is None:
1810 val = narrowspec.load(self)
1810 val = narrowspec.load(self)
1811 return val
1811 return val
1812
1812
1813 @storecache(narrowspec.FILENAME)
1813 @storecache(narrowspec.FILENAME)
1814 def _storenarrowmatch(self):
1814 def _storenarrowmatch(self):
1815 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1815 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1816 return matchmod.always()
1816 return matchmod.always()
1817 include, exclude = self.narrowpats
1817 include, exclude = self.narrowpats
1818 return narrowspec.match(self.root, include=include, exclude=exclude)
1818 return narrowspec.match(self.root, include=include, exclude=exclude)
1819
1819
1820 @storecache(narrowspec.FILENAME)
1820 @storecache(narrowspec.FILENAME)
1821 def _narrowmatch(self):
1821 def _narrowmatch(self):
1822 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1822 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1823 return matchmod.always()
1823 return matchmod.always()
1824 narrowspec.checkworkingcopynarrowspec(self)
1824 narrowspec.checkworkingcopynarrowspec(self)
1825 include, exclude = self.narrowpats
1825 include, exclude = self.narrowpats
1826 return narrowspec.match(self.root, include=include, exclude=exclude)
1826 return narrowspec.match(self.root, include=include, exclude=exclude)
1827
1827
1828 def narrowmatch(self, match=None, includeexact=False):
1828 def narrowmatch(self, match=None, includeexact=False):
1829 """matcher corresponding the the repo's narrowspec
1829 """matcher corresponding the the repo's narrowspec
1830
1830
1831 If `match` is given, then that will be intersected with the narrow
1831 If `match` is given, then that will be intersected with the narrow
1832 matcher.
1832 matcher.
1833
1833
1834 If `includeexact` is True, then any exact matches from `match` will
1834 If `includeexact` is True, then any exact matches from `match` will
1835 be included even if they're outside the narrowspec.
1835 be included even if they're outside the narrowspec.
1836 """
1836 """
1837 if match:
1837 if match:
1838 if includeexact and not self._narrowmatch.always():
1838 if includeexact and not self._narrowmatch.always():
1839 # do not exclude explicitly-specified paths so that they can
1839 # do not exclude explicitly-specified paths so that they can
1840 # be warned later on
1840 # be warned later on
1841 em = matchmod.exact(match.files())
1841 em = matchmod.exact(match.files())
1842 nm = matchmod.unionmatcher([self._narrowmatch, em])
1842 nm = matchmod.unionmatcher([self._narrowmatch, em])
1843 return matchmod.intersectmatchers(match, nm)
1843 return matchmod.intersectmatchers(match, nm)
1844 return matchmod.intersectmatchers(match, self._narrowmatch)
1844 return matchmod.intersectmatchers(match, self._narrowmatch)
1845 return self._narrowmatch
1845 return self._narrowmatch
1846
1846
1847 def setnarrowpats(self, newincludes, newexcludes):
1847 def setnarrowpats(self, newincludes, newexcludes):
1848 narrowspec.save(self, newincludes, newexcludes)
1848 narrowspec.save(self, newincludes, newexcludes)
1849 self.invalidate(clearfilecache=True)
1849 self.invalidate(clearfilecache=True)
1850
1850
1851 @unfilteredpropertycache
1851 @unfilteredpropertycache
1852 def _quick_access_changeid_null(self):
1852 def _quick_access_changeid_null(self):
1853 return {
1853 return {
1854 b'null': (nullrev, self.nodeconstants.nullid),
1854 b'null': (nullrev, self.nodeconstants.nullid),
1855 nullrev: (nullrev, self.nodeconstants.nullid),
1855 nullrev: (nullrev, self.nodeconstants.nullid),
1856 self.nullid: (nullrev, self.nullid),
1856 self.nullid: (nullrev, self.nullid),
1857 }
1857 }
1858
1858
1859 @unfilteredpropertycache
1859 @unfilteredpropertycache
1860 def _quick_access_changeid_wc(self):
1860 def _quick_access_changeid_wc(self):
1861 # also fast path access to the working copy parents
1861 # also fast path access to the working copy parents
1862 # however, only do it for filter that ensure wc is visible.
1862 # however, only do it for filter that ensure wc is visible.
1863 quick = self._quick_access_changeid_null.copy()
1863 quick = self._quick_access_changeid_null.copy()
1864 cl = self.unfiltered().changelog
1864 cl = self.unfiltered().changelog
1865 for node in self.dirstate.parents():
1865 for node in self.dirstate.parents():
1866 if node == self.nullid:
1866 if node == self.nullid:
1867 continue
1867 continue
1868 rev = cl.index.get_rev(node)
1868 rev = cl.index.get_rev(node)
1869 if rev is None:
1869 if rev is None:
1870 # unknown working copy parent case:
1870 # unknown working copy parent case:
1871 #
1871 #
1872 # skip the fast path and let higher code deal with it
1872 # skip the fast path and let higher code deal with it
1873 continue
1873 continue
1874 pair = (rev, node)
1874 pair = (rev, node)
1875 quick[rev] = pair
1875 quick[rev] = pair
1876 quick[node] = pair
1876 quick[node] = pair
1877 # also add the parents of the parents
1877 # also add the parents of the parents
1878 for r in cl.parentrevs(rev):
1878 for r in cl.parentrevs(rev):
1879 if r == nullrev:
1879 if r == nullrev:
1880 continue
1880 continue
1881 n = cl.node(r)
1881 n = cl.node(r)
1882 pair = (r, n)
1882 pair = (r, n)
1883 quick[r] = pair
1883 quick[r] = pair
1884 quick[n] = pair
1884 quick[n] = pair
1885 p1node = self.dirstate.p1()
1885 p1node = self.dirstate.p1()
1886 if p1node != self.nullid:
1886 if p1node != self.nullid:
1887 quick[b'.'] = quick[p1node]
1887 quick[b'.'] = quick[p1node]
1888 return quick
1888 return quick
1889
1889
1890 @unfilteredmethod
1890 @unfilteredmethod
1891 def _quick_access_changeid_invalidate(self):
1891 def _quick_access_changeid_invalidate(self):
1892 if '_quick_access_changeid_wc' in vars(self):
1892 if '_quick_access_changeid_wc' in vars(self):
1893 del self.__dict__['_quick_access_changeid_wc']
1893 del self.__dict__['_quick_access_changeid_wc']
1894
1894
1895 @property
1895 @property
1896 def _quick_access_changeid(self):
1896 def _quick_access_changeid(self):
1897 """an helper dictionnary for __getitem__ calls
1897 """an helper dictionnary for __getitem__ calls
1898
1898
1899 This contains a list of symbol we can recognise right away without
1899 This contains a list of symbol we can recognise right away without
1900 further processing.
1900 further processing.
1901 """
1901 """
1902 if self.filtername in repoview.filter_has_wc:
1902 if self.filtername in repoview.filter_has_wc:
1903 return self._quick_access_changeid_wc
1903 return self._quick_access_changeid_wc
1904 return self._quick_access_changeid_null
1904 return self._quick_access_changeid_null
1905
1905
1906 def __getitem__(self, changeid):
1906 def __getitem__(self, changeid):
1907 # dealing with special cases
1907 # dealing with special cases
1908 if changeid is None:
1908 if changeid is None:
1909 return context.workingctx(self)
1909 return context.workingctx(self)
1910 if isinstance(changeid, context.basectx):
1910 if isinstance(changeid, context.basectx):
1911 return changeid
1911 return changeid
1912
1912
1913 # dealing with multiple revisions
1913 # dealing with multiple revisions
1914 if isinstance(changeid, slice):
1914 if isinstance(changeid, slice):
1915 # wdirrev isn't contiguous so the slice shouldn't include it
1915 # wdirrev isn't contiguous so the slice shouldn't include it
1916 return [
1916 return [
1917 self[i]
1917 self[i]
1918 for i in range(*changeid.indices(len(self)))
1918 for i in range(*changeid.indices(len(self)))
1919 if i not in self.changelog.filteredrevs
1919 if i not in self.changelog.filteredrevs
1920 ]
1920 ]
1921
1921
1922 # dealing with some special values
1922 # dealing with some special values
1923 quick_access = self._quick_access_changeid.get(changeid)
1923 quick_access = self._quick_access_changeid.get(changeid)
1924 if quick_access is not None:
1924 if quick_access is not None:
1925 rev, node = quick_access
1925 rev, node = quick_access
1926 return context.changectx(self, rev, node, maybe_filtered=False)
1926 return context.changectx(self, rev, node, maybe_filtered=False)
1927 if changeid == b'tip':
1927 if changeid == b'tip':
1928 node = self.changelog.tip()
1928 node = self.changelog.tip()
1929 rev = self.changelog.rev(node)
1929 rev = self.changelog.rev(node)
1930 return context.changectx(self, rev, node)
1930 return context.changectx(self, rev, node)
1931
1931
1932 # dealing with arbitrary values
1932 # dealing with arbitrary values
1933 try:
1933 try:
1934 if isinstance(changeid, int):
1934 if isinstance(changeid, int):
1935 node = self.changelog.node(changeid)
1935 node = self.changelog.node(changeid)
1936 rev = changeid
1936 rev = changeid
1937 elif changeid == b'.':
1937 elif changeid == b'.':
1938 # this is a hack to delay/avoid loading obsmarkers
1938 # this is a hack to delay/avoid loading obsmarkers
1939 # when we know that '.' won't be hidden
1939 # when we know that '.' won't be hidden
1940 node = self.dirstate.p1()
1940 node = self.dirstate.p1()
1941 rev = self.unfiltered().changelog.rev(node)
1941 rev = self.unfiltered().changelog.rev(node)
1942 elif len(changeid) == self.nodeconstants.nodelen:
1942 elif len(changeid) == self.nodeconstants.nodelen:
1943 try:
1943 try:
1944 node = changeid
1944 node = changeid
1945 rev = self.changelog.rev(changeid)
1945 rev = self.changelog.rev(changeid)
1946 except error.FilteredLookupError:
1946 except error.FilteredLookupError:
1947 changeid = hex(changeid) # for the error message
1947 changeid = hex(changeid) # for the error message
1948 raise
1948 raise
1949 except LookupError:
1949 except LookupError:
1950 # check if it might have come from damaged dirstate
1950 # check if it might have come from damaged dirstate
1951 #
1951 #
1952 # XXX we could avoid the unfiltered if we had a recognizable
1952 # XXX we could avoid the unfiltered if we had a recognizable
1953 # exception for filtered changeset access
1953 # exception for filtered changeset access
1954 if (
1954 if (
1955 self.local()
1955 self.local()
1956 and changeid in self.unfiltered().dirstate.parents()
1956 and changeid in self.unfiltered().dirstate.parents()
1957 ):
1957 ):
1958 msg = _(b"working directory has unknown parent '%s'!")
1958 msg = _(b"working directory has unknown parent '%s'!")
1959 raise error.Abort(msg % short(changeid))
1959 raise error.Abort(msg % short(changeid))
1960 changeid = hex(changeid) # for the error message
1960 changeid = hex(changeid) # for the error message
1961 raise
1961 raise
1962
1962
1963 elif len(changeid) == 2 * self.nodeconstants.nodelen:
1963 elif len(changeid) == 2 * self.nodeconstants.nodelen:
1964 node = bin(changeid)
1964 node = bin(changeid)
1965 rev = self.changelog.rev(node)
1965 rev = self.changelog.rev(node)
1966 else:
1966 else:
1967 raise error.ProgrammingError(
1967 raise error.ProgrammingError(
1968 b"unsupported changeid '%s' of type %s"
1968 b"unsupported changeid '%s' of type %s"
1969 % (changeid, pycompat.bytestr(type(changeid)))
1969 % (changeid, pycompat.bytestr(type(changeid)))
1970 )
1970 )
1971
1971
1972 return context.changectx(self, rev, node)
1972 return context.changectx(self, rev, node)
1973
1973
1974 except (error.FilteredIndexError, error.FilteredLookupError):
1974 except (error.FilteredIndexError, error.FilteredLookupError):
1975 raise error.FilteredRepoLookupError(
1975 raise error.FilteredRepoLookupError(
1976 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1976 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1977 )
1977 )
1978 except (IndexError, LookupError):
1978 except (IndexError, LookupError):
1979 raise error.RepoLookupError(
1979 raise error.RepoLookupError(
1980 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1980 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1981 )
1981 )
1982 except error.WdirUnsupported:
1982 except error.WdirUnsupported:
1983 return context.workingctx(self)
1983 return context.workingctx(self)
1984
1984
1985 def __contains__(self, changeid):
1985 def __contains__(self, changeid):
1986 """True if the given changeid exists"""
1986 """True if the given changeid exists"""
1987 try:
1987 try:
1988 self[changeid]
1988 self[changeid]
1989 return True
1989 return True
1990 except error.RepoLookupError:
1990 except error.RepoLookupError:
1991 return False
1991 return False
1992
1992
1993 def __nonzero__(self):
1993 def __nonzero__(self):
1994 return True
1994 return True
1995
1995
1996 __bool__ = __nonzero__
1996 __bool__ = __nonzero__
1997
1997
1998 def __len__(self):
1998 def __len__(self):
1999 # no need to pay the cost of repoview.changelog
1999 # no need to pay the cost of repoview.changelog
2000 unfi = self.unfiltered()
2000 unfi = self.unfiltered()
2001 return len(unfi.changelog)
2001 return len(unfi.changelog)
2002
2002
2003 def __iter__(self):
2003 def __iter__(self):
2004 return iter(self.changelog)
2004 return iter(self.changelog)
2005
2005
2006 def revs(self, expr: bytes, *args):
2006 def revs(self, expr: bytes, *args):
2007 """Find revisions matching a revset.
2007 """Find revisions matching a revset.
2008
2008
2009 The revset is specified as a string ``expr`` that may contain
2009 The revset is specified as a string ``expr`` that may contain
2010 %-formatting to escape certain types. See ``revsetlang.formatspec``.
2010 %-formatting to escape certain types. See ``revsetlang.formatspec``.
2011
2011
2012 Revset aliases from the configuration are not expanded. To expand
2012 Revset aliases from the configuration are not expanded. To expand
2013 user aliases, consider calling ``scmutil.revrange()`` or
2013 user aliases, consider calling ``scmutil.revrange()`` or
2014 ``repo.anyrevs([expr], user=True)``.
2014 ``repo.anyrevs([expr], user=True)``.
2015
2015
2016 Returns a smartset.abstractsmartset, which is a list-like interface
2016 Returns a smartset.abstractsmartset, which is a list-like interface
2017 that contains integer revisions.
2017 that contains integer revisions.
2018 """
2018 """
2019 tree = revsetlang.spectree(expr, *args)
2019 tree = revsetlang.spectree(expr, *args)
2020 return revset.makematcher(tree)(self)
2020 return revset.makematcher(tree)(self)
2021
2021
2022 def set(self, expr: bytes, *args):
2022 def set(self, expr: bytes, *args):
2023 """Find revisions matching a revset and emit changectx instances.
2023 """Find revisions matching a revset and emit changectx instances.
2024
2024
2025 This is a convenience wrapper around ``revs()`` that iterates the
2025 This is a convenience wrapper around ``revs()`` that iterates the
2026 result and is a generator of changectx instances.
2026 result and is a generator of changectx instances.
2027
2027
2028 Revset aliases from the configuration are not expanded. To expand
2028 Revset aliases from the configuration are not expanded. To expand
2029 user aliases, consider calling ``scmutil.revrange()``.
2029 user aliases, consider calling ``scmutil.revrange()``.
2030 """
2030 """
2031 for r in self.revs(expr, *args):
2031 for r in self.revs(expr, *args):
2032 yield self[r]
2032 yield self[r]
2033
2033
2034 def anyrevs(self, specs: bytes, user=False, localalias=None):
2034 def anyrevs(self, specs: bytes, user=False, localalias=None):
2035 """Find revisions matching one of the given revsets.
2035 """Find revisions matching one of the given revsets.
2036
2036
2037 Revset aliases from the configuration are not expanded by default. To
2037 Revset aliases from the configuration are not expanded by default. To
2038 expand user aliases, specify ``user=True``. To provide some local
2038 expand user aliases, specify ``user=True``. To provide some local
2039 definitions overriding user aliases, set ``localalias`` to
2039 definitions overriding user aliases, set ``localalias`` to
2040 ``{name: definitionstring}``.
2040 ``{name: definitionstring}``.
2041 """
2041 """
2042 if specs == [b'null']:
2042 if specs == [b'null']:
2043 return revset.baseset([nullrev])
2043 return revset.baseset([nullrev])
2044 if specs == [b'.']:
2044 if specs == [b'.']:
2045 quick_data = self._quick_access_changeid.get(b'.')
2045 quick_data = self._quick_access_changeid.get(b'.')
2046 if quick_data is not None:
2046 if quick_data is not None:
2047 return revset.baseset([quick_data[0]])
2047 return revset.baseset([quick_data[0]])
2048 if user:
2048 if user:
2049 m = revset.matchany(
2049 m = revset.matchany(
2050 self.ui,
2050 self.ui,
2051 specs,
2051 specs,
2052 lookup=revset.lookupfn(self),
2052 lookup=revset.lookupfn(self),
2053 localalias=localalias,
2053 localalias=localalias,
2054 )
2054 )
2055 else:
2055 else:
2056 m = revset.matchany(None, specs, localalias=localalias)
2056 m = revset.matchany(None, specs, localalias=localalias)
2057 return m(self)
2057 return m(self)
2058
2058
2059 def url(self) -> bytes:
2059 def url(self) -> bytes:
2060 return b'file:' + self.root
2060 return b'file:' + self.root
2061
2061
2062 def hook(self, name, throw=False, **args):
2062 def hook(self, name, throw=False, **args):
2063 """Call a hook, passing this repo instance.
2063 """Call a hook, passing this repo instance.
2064
2064
2065 This a convenience method to aid invoking hooks. Extensions likely
2065 This a convenience method to aid invoking hooks. Extensions likely
2066 won't call this unless they have registered a custom hook or are
2066 won't call this unless they have registered a custom hook or are
2067 replacing code that is expected to call a hook.
2067 replacing code that is expected to call a hook.
2068 """
2068 """
2069 return hook.hook(self.ui, self, name, throw, **args)
2069 return hook.hook(self.ui, self, name, throw, **args)
2070
2070
2071 @filteredpropertycache
2071 @filteredpropertycache
2072 def _tagscache(self):
2072 def _tagscache(self):
2073 """Returns a tagscache object that contains various tags related
2073 """Returns a tagscache object that contains various tags related
2074 caches."""
2074 caches."""
2075
2075
2076 # This simplifies its cache management by having one decorated
2076 # This simplifies its cache management by having one decorated
2077 # function (this one) and the rest simply fetch things from it.
2077 # function (this one) and the rest simply fetch things from it.
2078 class tagscache:
2078 class tagscache:
2079 def __init__(self):
2079 def __init__(self):
2080 # These two define the set of tags for this repository. tags
2080 # These two define the set of tags for this repository. tags
2081 # maps tag name to node; tagtypes maps tag name to 'global' or
2081 # maps tag name to node; tagtypes maps tag name to 'global' or
2082 # 'local'. (Global tags are defined by .hgtags across all
2082 # 'local'. (Global tags are defined by .hgtags across all
2083 # heads, and local tags are defined in .hg/localtags.)
2083 # heads, and local tags are defined in .hg/localtags.)
2084 # They constitute the in-memory cache of tags.
2084 # They constitute the in-memory cache of tags.
2085 self.tags = self.tagtypes = None
2085 self.tags = self.tagtypes = None
2086
2086
2087 self.nodetagscache = self.tagslist = None
2087 self.nodetagscache = self.tagslist = None
2088
2088
2089 cache = tagscache()
2089 cache = tagscache()
2090 cache.tags, cache.tagtypes = self._findtags()
2090 cache.tags, cache.tagtypes = self._findtags()
2091
2091
2092 return cache
2092 return cache
2093
2093
2094 def tags(self):
2094 def tags(self):
2095 '''return a mapping of tag to node'''
2095 '''return a mapping of tag to node'''
2096 t = {}
2096 t = {}
2097 if self.changelog.filteredrevs:
2097 if self.changelog.filteredrevs:
2098 tags, tt = self._findtags()
2098 tags, tt = self._findtags()
2099 else:
2099 else:
2100 tags = self._tagscache.tags
2100 tags = self._tagscache.tags
2101 rev = self.changelog.rev
2101 rev = self.changelog.rev
2102 for k, v in tags.items():
2102 for k, v in tags.items():
2103 try:
2103 try:
2104 # ignore tags to unknown nodes
2104 # ignore tags to unknown nodes
2105 rev(v)
2105 rev(v)
2106 t[k] = v
2106 t[k] = v
2107 except (error.LookupError, ValueError):
2107 except (error.LookupError, ValueError):
2108 pass
2108 pass
2109 return t
2109 return t
2110
2110
2111 def _findtags(self):
2111 def _findtags(self):
2112 """Do the hard work of finding tags. Return a pair of dicts
2112 """Do the hard work of finding tags. Return a pair of dicts
2113 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2113 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2114 maps tag name to a string like \'global\' or \'local\'.
2114 maps tag name to a string like \'global\' or \'local\'.
2115 Subclasses or extensions are free to add their own tags, but
2115 Subclasses or extensions are free to add their own tags, but
2116 should be aware that the returned dicts will be retained for the
2116 should be aware that the returned dicts will be retained for the
2117 duration of the localrepo object."""
2117 duration of the localrepo object."""
2118
2118
2119 # XXX what tagtype should subclasses/extensions use? Currently
2119 # XXX what tagtype should subclasses/extensions use? Currently
2120 # mq and bookmarks add tags, but do not set the tagtype at all.
2120 # mq and bookmarks add tags, but do not set the tagtype at all.
2121 # Should each extension invent its own tag type? Should there
2121 # Should each extension invent its own tag type? Should there
2122 # be one tagtype for all such "virtual" tags? Or is the status
2122 # be one tagtype for all such "virtual" tags? Or is the status
2123 # quo fine?
2123 # quo fine?
2124
2124
2125 # map tag name to (node, hist)
2125 # map tag name to (node, hist)
2126 alltags = tagsmod.findglobaltags(self.ui, self)
2126 alltags = tagsmod.findglobaltags(self.ui, self)
2127 # map tag name to tag type
2127 # map tag name to tag type
2128 tagtypes = {tag: b'global' for tag in alltags}
2128 tagtypes = {tag: b'global' for tag in alltags}
2129
2129
2130 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2130 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2131
2131
2132 # Build the return dicts. Have to re-encode tag names because
2132 # Build the return dicts. Have to re-encode tag names because
2133 # the tags module always uses UTF-8 (in order not to lose info
2133 # the tags module always uses UTF-8 (in order not to lose info
2134 # writing to the cache), but the rest of Mercurial wants them in
2134 # writing to the cache), but the rest of Mercurial wants them in
2135 # local encoding.
2135 # local encoding.
2136 tags = {}
2136 tags = {}
2137 for name, (node, hist) in alltags.items():
2137 for name, (node, hist) in alltags.items():
2138 if node != self.nullid:
2138 if node != self.nullid:
2139 tags[encoding.tolocal(name)] = node
2139 tags[encoding.tolocal(name)] = node
2140 tags[b'tip'] = self.changelog.tip()
2140 tags[b'tip'] = self.changelog.tip()
2141 tagtypes = {
2141 tagtypes = {
2142 encoding.tolocal(name): value for (name, value) in tagtypes.items()
2142 encoding.tolocal(name): value for (name, value) in tagtypes.items()
2143 }
2143 }
2144 return (tags, tagtypes)
2144 return (tags, tagtypes)
2145
2145
2146 def tagtype(self, tagname):
2146 def tagtype(self, tagname):
2147 """
2147 """
2148 return the type of the given tag. result can be:
2148 return the type of the given tag. result can be:
2149
2149
2150 'local' : a local tag
2150 'local' : a local tag
2151 'global' : a global tag
2151 'global' : a global tag
2152 None : tag does not exist
2152 None : tag does not exist
2153 """
2153 """
2154
2154
2155 return self._tagscache.tagtypes.get(tagname)
2155 return self._tagscache.tagtypes.get(tagname)
2156
2156
2157 def tagslist(self):
2157 def tagslist(self):
2158 '''return a list of tags ordered by revision'''
2158 '''return a list of tags ordered by revision'''
2159 if not self._tagscache.tagslist:
2159 if not self._tagscache.tagslist:
2160 l = []
2160 l = []
2161 for t, n in self.tags().items():
2161 for t, n in self.tags().items():
2162 l.append((self.changelog.rev(n), t, n))
2162 l.append((self.changelog.rev(n), t, n))
2163 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2163 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2164
2164
2165 return self._tagscache.tagslist
2165 return self._tagscache.tagslist
2166
2166
2167 def nodetags(self, node):
2167 def nodetags(self, node):
2168 '''return the tags associated with a node'''
2168 '''return the tags associated with a node'''
2169 if not self._tagscache.nodetagscache:
2169 if not self._tagscache.nodetagscache:
2170 nodetagscache = {}
2170 nodetagscache = {}
2171 for t, n in self._tagscache.tags.items():
2171 for t, n in self._tagscache.tags.items():
2172 nodetagscache.setdefault(n, []).append(t)
2172 nodetagscache.setdefault(n, []).append(t)
2173 for tags in nodetagscache.values():
2173 for tags in nodetagscache.values():
2174 tags.sort()
2174 tags.sort()
2175 self._tagscache.nodetagscache = nodetagscache
2175 self._tagscache.nodetagscache = nodetagscache
2176 return self._tagscache.nodetagscache.get(node, [])
2176 return self._tagscache.nodetagscache.get(node, [])
2177
2177
2178 def nodebookmarks(self, node):
2178 def nodebookmarks(self, node):
2179 """return the list of bookmarks pointing to the specified node"""
2179 """return the list of bookmarks pointing to the specified node"""
2180 return self._bookmarks.names(node)
2180 return self._bookmarks.names(node)
2181
2181
2182 def branchmap(self):
2182 def branchmap(self):
2183 """returns a dictionary {branch: [branchheads]} with branchheads
2183 """returns a dictionary {branch: [branchheads]} with branchheads
2184 ordered by increasing revision number"""
2184 ordered by increasing revision number"""
2185 return self._branchcaches[self]
2185 return self._branchcaches[self]
2186
2186
2187 @unfilteredmethod
2187 @unfilteredmethod
2188 def revbranchcache(self):
2188 def revbranchcache(self):
2189 if not self._revbranchcache:
2189 if not self._revbranchcache:
2190 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2190 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2191 return self._revbranchcache
2191 return self._revbranchcache
2192
2192
2193 def register_changeset(self, rev, changelogrevision):
2193 def register_changeset(self, rev, changelogrevision):
2194 self.revbranchcache().setdata(rev, changelogrevision)
2194 self.revbranchcache().setdata(rev, changelogrevision)
2195
2195
2196 def branchtip(self, branch, ignoremissing=False):
2196 def branchtip(self, branch, ignoremissing=False):
2197 """return the tip node for a given branch
2197 """return the tip node for a given branch
2198
2198
2199 If ignoremissing is True, then this method will not raise an error.
2199 If ignoremissing is True, then this method will not raise an error.
2200 This is helpful for callers that only expect None for a missing branch
2200 This is helpful for callers that only expect None for a missing branch
2201 (e.g. namespace).
2201 (e.g. namespace).
2202
2202
2203 """
2203 """
2204 try:
2204 try:
2205 return self.branchmap().branchtip(branch)
2205 return self.branchmap().branchtip(branch)
2206 except KeyError:
2206 except KeyError:
2207 if not ignoremissing:
2207 if not ignoremissing:
2208 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2208 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2209 else:
2209 else:
2210 pass
2210 pass
2211
2211
2212 def lookup(self, key):
2212 def lookup(self, key):
2213 node = scmutil.revsymbol(self, key).node()
2213 node = scmutil.revsymbol(self, key).node()
2214 if node is None:
2214 if node is None:
2215 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2215 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2216 return node
2216 return node
2217
2217
2218 def lookupbranch(self, key):
2218 def lookupbranch(self, key):
2219 if self.branchmap().hasbranch(key):
2219 if self.branchmap().hasbranch(key):
2220 return key
2220 return key
2221
2221
2222 return scmutil.revsymbol(self, key).branch()
2222 return scmutil.revsymbol(self, key).branch()
2223
2223
2224 def known(self, nodes):
2224 def known(self, nodes):
2225 cl = self.changelog
2225 cl = self.changelog
2226 get_rev = cl.index.get_rev
2226 get_rev = cl.index.get_rev
2227 filtered = cl.filteredrevs
2227 filtered = cl.filteredrevs
2228 result = []
2228 result = []
2229 for n in nodes:
2229 for n in nodes:
2230 r = get_rev(n)
2230 r = get_rev(n)
2231 resp = not (r is None or r in filtered)
2231 resp = not (r is None or r in filtered)
2232 result.append(resp)
2232 result.append(resp)
2233 return result
2233 return result
2234
2234
2235 def local(self):
2235 def local(self):
2236 return self
2236 return self
2237
2237
2238 def publishing(self):
2238 def publishing(self):
2239 # it's safe (and desirable) to trust the publish flag unconditionally
2239 # it's safe (and desirable) to trust the publish flag unconditionally
2240 # so that we don't finalize changes shared between users via ssh or nfs
2240 # so that we don't finalize changes shared between users via ssh or nfs
2241 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2241 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2242
2242
2243 def cancopy(self):
2243 def cancopy(self):
2244 # so statichttprepo's override of local() works
2244 # so statichttprepo's override of local() works
2245 if not self.local():
2245 if not self.local():
2246 return False
2246 return False
2247 if not self.publishing():
2247 if not self.publishing():
2248 return True
2248 return True
2249 # if publishing we can't copy if there is filtered content
2249 # if publishing we can't copy if there is filtered content
2250 return not self.filtered(b'visible').changelog.filteredrevs
2250 return not self.filtered(b'visible').changelog.filteredrevs
2251
2251
2252 def shared(self):
2252 def shared(self):
2253 '''the type of shared repository (None if not shared)'''
2253 '''the type of shared repository (None if not shared)'''
2254 if self.sharedpath != self.path:
2254 if self.sharedpath != self.path:
2255 return b'store'
2255 return b'store'
2256 return None
2256 return None
2257
2257
2258 def wjoin(self, f: bytes, *insidef: bytes) -> bytes:
2258 def wjoin(self, f: bytes, *insidef: bytes) -> bytes:
2259 return self.vfs.reljoin(self.root, f, *insidef)
2259 return self.vfs.reljoin(self.root, f, *insidef)
2260
2260
2261 def setparents(self, p1, p2=None):
2261 def setparents(self, p1, p2=None):
2262 if p2 is None:
2262 if p2 is None:
2263 p2 = self.nullid
2263 p2 = self.nullid
2264 self[None].setparents(p1, p2)
2264 self[None].setparents(p1, p2)
2265 self._quick_access_changeid_invalidate()
2265 self._quick_access_changeid_invalidate()
2266
2266
2267 def filectx(self, path: bytes, changeid=None, fileid=None, changectx=None):
2267 def filectx(self, path: bytes, changeid=None, fileid=None, changectx=None):
2268 """changeid must be a changeset revision, if specified.
2268 """changeid must be a changeset revision, if specified.
2269 fileid can be a file revision or node."""
2269 fileid can be a file revision or node."""
2270 return context.filectx(
2270 return context.filectx(
2271 self, path, changeid, fileid, changectx=changectx
2271 self, path, changeid, fileid, changectx=changectx
2272 )
2272 )
2273
2273
2274 def getcwd(self) -> bytes:
2274 def getcwd(self) -> bytes:
2275 return self.dirstate.getcwd()
2275 return self.dirstate.getcwd()
2276
2276
2277 def pathto(self, f: bytes, cwd: Optional[bytes] = None) -> bytes:
2277 def pathto(self, f: bytes, cwd: Optional[bytes] = None) -> bytes:
2278 return self.dirstate.pathto(f, cwd)
2278 return self.dirstate.pathto(f, cwd)
2279
2279
2280 def _loadfilter(self, filter):
2280 def _loadfilter(self, filter):
2281 if filter not in self._filterpats:
2281 if filter not in self._filterpats:
2282 l = []
2282 l = []
2283 for pat, cmd in self.ui.configitems(filter):
2283 for pat, cmd in self.ui.configitems(filter):
2284 if cmd == b'!':
2284 if cmd == b'!':
2285 continue
2285 continue
2286 mf = matchmod.match(self.root, b'', [pat])
2286 mf = matchmod.match(self.root, b'', [pat])
2287 fn = None
2287 fn = None
2288 params = cmd
2288 params = cmd
2289 for name, filterfn in self._datafilters.items():
2289 for name, filterfn in self._datafilters.items():
2290 if cmd.startswith(name):
2290 if cmd.startswith(name):
2291 fn = filterfn
2291 fn = filterfn
2292 params = cmd[len(name) :].lstrip()
2292 params = cmd[len(name) :].lstrip()
2293 break
2293 break
2294 if not fn:
2294 if not fn:
2295 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2295 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2296 fn.__name__ = 'commandfilter'
2296 fn.__name__ = 'commandfilter'
2297 # Wrap old filters not supporting keyword arguments
2297 # Wrap old filters not supporting keyword arguments
2298 if not pycompat.getargspec(fn)[2]:
2298 if not pycompat.getargspec(fn)[2]:
2299 oldfn = fn
2299 oldfn = fn
2300 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2300 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2301 fn.__name__ = 'compat-' + oldfn.__name__
2301 fn.__name__ = 'compat-' + oldfn.__name__
2302 l.append((mf, fn, params))
2302 l.append((mf, fn, params))
2303 self._filterpats[filter] = l
2303 self._filterpats[filter] = l
2304 return self._filterpats[filter]
2304 return self._filterpats[filter]
2305
2305
2306 def _filter(self, filterpats, filename, data):
2306 def _filter(self, filterpats, filename, data):
2307 for mf, fn, cmd in filterpats:
2307 for mf, fn, cmd in filterpats:
2308 if mf(filename):
2308 if mf(filename):
2309 self.ui.debug(
2309 self.ui.debug(
2310 b"filtering %s through %s\n"
2310 b"filtering %s through %s\n"
2311 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2311 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2312 )
2312 )
2313 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2313 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2314 break
2314 break
2315
2315
2316 return data
2316 return data
2317
2317
2318 @unfilteredpropertycache
2318 @unfilteredpropertycache
2319 def _encodefilterpats(self):
2319 def _encodefilterpats(self):
2320 return self._loadfilter(b'encode')
2320 return self._loadfilter(b'encode')
2321
2321
2322 @unfilteredpropertycache
2322 @unfilteredpropertycache
2323 def _decodefilterpats(self):
2323 def _decodefilterpats(self):
2324 return self._loadfilter(b'decode')
2324 return self._loadfilter(b'decode')
2325
2325
2326 def adddatafilter(self, name, filter):
2326 def adddatafilter(self, name, filter):
2327 self._datafilters[name] = filter
2327 self._datafilters[name] = filter
2328
2328
2329 def wread(self, filename: bytes) -> bytes:
2329 def wread(self, filename: bytes) -> bytes:
2330 if self.wvfs.islink(filename):
2330 if self.wvfs.islink(filename):
2331 data = self.wvfs.readlink(filename)
2331 data = self.wvfs.readlink(filename)
2332 else:
2332 else:
2333 data = self.wvfs.read(filename)
2333 data = self.wvfs.read(filename)
2334 return self._filter(self._encodefilterpats, filename, data)
2334 return self._filter(self._encodefilterpats, filename, data)
2335
2335
2336 def wwrite(
2336 def wwrite(
2337 self,
2337 self,
2338 filename: bytes,
2338 filename: bytes,
2339 data: bytes,
2339 data: bytes,
2340 flags: bytes,
2340 flags: bytes,
2341 backgroundclose=False,
2341 backgroundclose=False,
2342 **kwargs
2342 **kwargs
2343 ) -> int:
2343 ) -> int:
2344 """write ``data`` into ``filename`` in the working directory
2344 """write ``data`` into ``filename`` in the working directory
2345
2345
2346 This returns length of written (maybe decoded) data.
2346 This returns length of written (maybe decoded) data.
2347 """
2347 """
2348 data = self._filter(self._decodefilterpats, filename, data)
2348 data = self._filter(self._decodefilterpats, filename, data)
2349 if b'l' in flags:
2349 if b'l' in flags:
2350 self.wvfs.symlink(data, filename)
2350 self.wvfs.symlink(data, filename)
2351 else:
2351 else:
2352 self.wvfs.write(
2352 self.wvfs.write(
2353 filename, data, backgroundclose=backgroundclose, **kwargs
2353 filename, data, backgroundclose=backgroundclose, **kwargs
2354 )
2354 )
2355 if b'x' in flags:
2355 if b'x' in flags:
2356 self.wvfs.setflags(filename, False, True)
2356 self.wvfs.setflags(filename, False, True)
2357 else:
2357 else:
2358 self.wvfs.setflags(filename, False, False)
2358 self.wvfs.setflags(filename, False, False)
2359 return len(data)
2359 return len(data)
2360
2360
2361 def wwritedata(self, filename: bytes, data: bytes) -> bytes:
2361 def wwritedata(self, filename: bytes, data: bytes) -> bytes:
2362 return self._filter(self._decodefilterpats, filename, data)
2362 return self._filter(self._decodefilterpats, filename, data)
2363
2363
2364 def currenttransaction(self):
2364 def currenttransaction(self):
2365 """return the current transaction or None if non exists"""
2365 """return the current transaction or None if non exists"""
2366 if self._transref:
2366 if self._transref:
2367 tr = self._transref()
2367 tr = self._transref()
2368 else:
2368 else:
2369 tr = None
2369 tr = None
2370
2370
2371 if tr and tr.running():
2371 if tr and tr.running():
2372 return tr
2372 return tr
2373 return None
2373 return None
2374
2374
2375 def transaction(self, desc, report=None):
2375 def transaction(self, desc, report=None):
2376 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2376 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2377 b'devel', b'check-locks'
2377 b'devel', b'check-locks'
2378 ):
2378 ):
2379 if self._currentlock(self._lockref) is None:
2379 if self._currentlock(self._lockref) is None:
2380 raise error.ProgrammingError(b'transaction requires locking')
2380 raise error.ProgrammingError(b'transaction requires locking')
2381 tr = self.currenttransaction()
2381 tr = self.currenttransaction()
2382 if tr is not None:
2382 if tr is not None:
2383 return tr.nest(name=desc)
2383 return tr.nest(name=desc)
2384
2384
2385 # abort here if the journal already exists
2385 # abort here if the journal already exists
2386 if self.svfs.exists(b"journal"):
2386 if self.svfs.exists(b"journal"):
2387 raise error.RepoError(
2387 raise error.RepoError(
2388 _(b"abandoned transaction found"),
2388 _(b"abandoned transaction found"),
2389 hint=_(b"run 'hg recover' to clean up transaction"),
2389 hint=_(b"run 'hg recover' to clean up transaction"),
2390 )
2390 )
2391
2391
2392 # At that point your dirstate should be clean:
2392 # At that point your dirstate should be clean:
2393 #
2393 #
2394 # - If you don't have the wlock, why would you still have a dirty
2394 # - If you don't have the wlock, why would you still have a dirty
2395 # dirstate ?
2395 # dirstate ?
2396 #
2396 #
2397 # - If you hold the wlock, you should not be opening a transaction in
2397 # - If you hold the wlock, you should not be opening a transaction in
2398 # the middle of a `distate.changing_*` block. The transaction needs to
2398 # the middle of a `distate.changing_*` block. The transaction needs to
2399 # be open before that and wrap the change-context.
2399 # be open before that and wrap the change-context.
2400 #
2400 #
2401 # - If you are not within a `dirstate.changing_*` context, why is our
2401 # - If you are not within a `dirstate.changing_*` context, why is our
2402 # dirstate dirty?
2402 # dirstate dirty?
2403 if self.dirstate._dirty:
2403 if self.dirstate._dirty:
2404 m = "cannot open a transaction with a dirty dirstate"
2404 m = "cannot open a transaction with a dirty dirstate"
2405 raise error.ProgrammingError(m)
2405 raise error.ProgrammingError(m)
2406
2406
2407 idbase = b"%.40f#%f" % (random.random(), time.time())
2407 idbase = b"%.40f#%f" % (random.random(), time.time())
2408 ha = hex(hashutil.sha1(idbase).digest())
2408 ha = hex(hashutil.sha1(idbase).digest())
2409 txnid = b'TXN:' + ha
2409 txnid = b'TXN:' + ha
2410 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2410 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2411
2411
2412 self._writejournal(desc)
2412 self._writejournal(desc)
2413 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2413 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2414 if report:
2414 if report:
2415 rp = report
2415 rp = report
2416 else:
2416 else:
2417 rp = self.ui.warn
2417 rp = self.ui.warn
2418 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2418 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2419 # we must avoid cyclic reference between repo and transaction.
2419 # we must avoid cyclic reference between repo and transaction.
2420 reporef = weakref.ref(self)
2420 reporef = weakref.ref(self)
2421 # Code to track tag movement
2421 # Code to track tag movement
2422 #
2422 #
2423 # Since tags are all handled as file content, it is actually quite hard
2423 # Since tags are all handled as file content, it is actually quite hard
2424 # to track these movement from a code perspective. So we fallback to a
2424 # to track these movement from a code perspective. So we fallback to a
2425 # tracking at the repository level. One could envision to track changes
2425 # tracking at the repository level. One could envision to track changes
2426 # to the '.hgtags' file through changegroup apply but that fails to
2426 # to the '.hgtags' file through changegroup apply but that fails to
2427 # cope with case where transaction expose new heads without changegroup
2427 # cope with case where transaction expose new heads without changegroup
2428 # being involved (eg: phase movement).
2428 # being involved (eg: phase movement).
2429 #
2429 #
2430 # For now, We gate the feature behind a flag since this likely comes
2430 # For now, We gate the feature behind a flag since this likely comes
2431 # with performance impacts. The current code run more often than needed
2431 # with performance impacts. The current code run more often than needed
2432 # and do not use caches as much as it could. The current focus is on
2432 # and do not use caches as much as it could. The current focus is on
2433 # the behavior of the feature so we disable it by default. The flag
2433 # the behavior of the feature so we disable it by default. The flag
2434 # will be removed when we are happy with the performance impact.
2434 # will be removed when we are happy with the performance impact.
2435 #
2435 #
2436 # Once this feature is no longer experimental move the following
2436 # Once this feature is no longer experimental move the following
2437 # documentation to the appropriate help section:
2437 # documentation to the appropriate help section:
2438 #
2438 #
2439 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2439 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2440 # tags (new or changed or deleted tags). In addition the details of
2440 # tags (new or changed or deleted tags). In addition the details of
2441 # these changes are made available in a file at:
2441 # these changes are made available in a file at:
2442 # ``REPOROOT/.hg/changes/tags.changes``.
2442 # ``REPOROOT/.hg/changes/tags.changes``.
2443 # Make sure you check for HG_TAG_MOVED before reading that file as it
2443 # Make sure you check for HG_TAG_MOVED before reading that file as it
2444 # might exist from a previous transaction even if no tag were touched
2444 # might exist from a previous transaction even if no tag were touched
2445 # in this one. Changes are recorded in a line base format::
2445 # in this one. Changes are recorded in a line base format::
2446 #
2446 #
2447 # <action> <hex-node> <tag-name>\n
2447 # <action> <hex-node> <tag-name>\n
2448 #
2448 #
2449 # Actions are defined as follow:
2449 # Actions are defined as follow:
2450 # "-R": tag is removed,
2450 # "-R": tag is removed,
2451 # "+A": tag is added,
2451 # "+A": tag is added,
2452 # "-M": tag is moved (old value),
2452 # "-M": tag is moved (old value),
2453 # "+M": tag is moved (new value),
2453 # "+M": tag is moved (new value),
2454 tracktags = lambda x: None
2454 tracktags = lambda x: None
2455 # experimental config: experimental.hook-track-tags
2455 # experimental config: experimental.hook-track-tags
2456 shouldtracktags = self.ui.configbool(
2456 shouldtracktags = self.ui.configbool(
2457 b'experimental', b'hook-track-tags'
2457 b'experimental', b'hook-track-tags'
2458 )
2458 )
2459 if desc != b'strip' and shouldtracktags:
2459 if desc != b'strip' and shouldtracktags:
2460 oldheads = self.changelog.headrevs()
2460 oldheads = self.changelog.headrevs()
2461
2461
2462 def tracktags(tr2):
2462 def tracktags(tr2):
2463 repo = reporef()
2463 repo = reporef()
2464 assert repo is not None # help pytype
2464 assert repo is not None # help pytype
2465 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2465 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2466 newheads = repo.changelog.headrevs()
2466 newheads = repo.changelog.headrevs()
2467 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2467 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2468 # notes: we compare lists here.
2468 # notes: we compare lists here.
2469 # As we do it only once buiding set would not be cheaper
2469 # As we do it only once buiding set would not be cheaper
2470 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2470 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2471 if changes:
2471 if changes:
2472 tr2.hookargs[b'tag_moved'] = b'1'
2472 tr2.hookargs[b'tag_moved'] = b'1'
2473 with repo.vfs(
2473 with repo.vfs(
2474 b'changes/tags.changes', b'w', atomictemp=True
2474 b'changes/tags.changes', b'w', atomictemp=True
2475 ) as changesfile:
2475 ) as changesfile:
2476 # note: we do not register the file to the transaction
2476 # note: we do not register the file to the transaction
2477 # because we needs it to still exist on the transaction
2477 # because we needs it to still exist on the transaction
2478 # is close (for txnclose hooks)
2478 # is close (for txnclose hooks)
2479 tagsmod.writediff(changesfile, changes)
2479 tagsmod.writediff(changesfile, changes)
2480
2480
2481 def validate(tr2):
2481 def validate(tr2):
2482 """will run pre-closing hooks"""
2482 """will run pre-closing hooks"""
2483 # XXX the transaction API is a bit lacking here so we take a hacky
2483 # XXX the transaction API is a bit lacking here so we take a hacky
2484 # path for now
2484 # path for now
2485 #
2485 #
2486 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2486 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2487 # dict is copied before these run. In addition we needs the data
2487 # dict is copied before these run. In addition we needs the data
2488 # available to in memory hooks too.
2488 # available to in memory hooks too.
2489 #
2489 #
2490 # Moreover, we also need to make sure this runs before txnclose
2490 # Moreover, we also need to make sure this runs before txnclose
2491 # hooks and there is no "pending" mechanism that would execute
2491 # hooks and there is no "pending" mechanism that would execute
2492 # logic only if hooks are about to run.
2492 # logic only if hooks are about to run.
2493 #
2493 #
2494 # Fixing this limitation of the transaction is also needed to track
2494 # Fixing this limitation of the transaction is also needed to track
2495 # other families of changes (bookmarks, phases, obsolescence).
2495 # other families of changes (bookmarks, phases, obsolescence).
2496 #
2496 #
2497 # This will have to be fixed before we remove the experimental
2497 # This will have to be fixed before we remove the experimental
2498 # gating.
2498 # gating.
2499 tracktags(tr2)
2499 tracktags(tr2)
2500 repo = reporef()
2500 repo = reporef()
2501 assert repo is not None # help pytype
2501 assert repo is not None # help pytype
2502
2502
2503 singleheadopt = (b'experimental', b'single-head-per-branch')
2503 singleheadopt = (b'experimental', b'single-head-per-branch')
2504 singlehead = repo.ui.configbool(*singleheadopt)
2504 singlehead = repo.ui.configbool(*singleheadopt)
2505 if singlehead:
2505 if singlehead:
2506 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2506 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2507 accountclosed = singleheadsub.get(
2507 accountclosed = singleheadsub.get(
2508 b"account-closed-heads", False
2508 b"account-closed-heads", False
2509 )
2509 )
2510 if singleheadsub.get(b"public-changes-only", False):
2510 if singleheadsub.get(b"public-changes-only", False):
2511 filtername = b"immutable"
2511 filtername = b"immutable"
2512 else:
2512 else:
2513 filtername = b"visible"
2513 filtername = b"visible"
2514 scmutil.enforcesinglehead(
2514 scmutil.enforcesinglehead(
2515 repo, tr2, desc, accountclosed, filtername
2515 repo, tr2, desc, accountclosed, filtername
2516 )
2516 )
2517 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2517 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2518 for name, (old, new) in sorted(
2518 for name, (old, new) in sorted(
2519 tr.changes[b'bookmarks'].items()
2519 tr.changes[b'bookmarks'].items()
2520 ):
2520 ):
2521 args = tr.hookargs.copy()
2521 args = tr.hookargs.copy()
2522 args.update(bookmarks.preparehookargs(name, old, new))
2522 args.update(bookmarks.preparehookargs(name, old, new))
2523 repo.hook(
2523 repo.hook(
2524 b'pretxnclose-bookmark',
2524 b'pretxnclose-bookmark',
2525 throw=True,
2525 throw=True,
2526 **pycompat.strkwargs(args)
2526 **pycompat.strkwargs(args)
2527 )
2527 )
2528 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2528 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2529 cl = repo.unfiltered().changelog
2529 cl = repo.unfiltered().changelog
2530 for revs, (old, new) in tr.changes[b'phases']:
2530 for revs, (old, new) in tr.changes[b'phases']:
2531 for rev in revs:
2531 for rev in revs:
2532 args = tr.hookargs.copy()
2532 args = tr.hookargs.copy()
2533 node = hex(cl.node(rev))
2533 node = hex(cl.node(rev))
2534 args.update(phases.preparehookargs(node, old, new))
2534 args.update(phases.preparehookargs(node, old, new))
2535 repo.hook(
2535 repo.hook(
2536 b'pretxnclose-phase',
2536 b'pretxnclose-phase',
2537 throw=True,
2537 throw=True,
2538 **pycompat.strkwargs(args)
2538 **pycompat.strkwargs(args)
2539 )
2539 )
2540
2540
2541 repo.hook(
2541 repo.hook(
2542 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2542 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2543 )
2543 )
2544
2544
2545 def releasefn(tr, success):
2545 def releasefn(tr, success):
2546 repo = reporef()
2546 repo = reporef()
2547 if repo is None:
2547 if repo is None:
2548 # If the repo has been GC'd (and this release function is being
2548 # If the repo has been GC'd (and this release function is being
2549 # called from transaction.__del__), there's not much we can do,
2549 # called from transaction.__del__), there's not much we can do,
2550 # so just leave the unfinished transaction there and let the
2550 # so just leave the unfinished transaction there and let the
2551 # user run `hg recover`.
2551 # user run `hg recover`.
2552 return
2552 return
2553 if success:
2553 if success:
2554 # this should be explicitly invoked here, because
2554 # this should be explicitly invoked here, because
2555 # in-memory changes aren't written out at closing
2555 # in-memory changes aren't written out at closing
2556 # transaction, if tr.addfilegenerator (via
2556 # transaction, if tr.addfilegenerator (via
2557 # dirstate.write or so) isn't invoked while
2557 # dirstate.write or so) isn't invoked while
2558 # transaction running
2558 # transaction running
2559 repo.dirstate.write(None)
2559 repo.dirstate.write(None)
2560 else:
2560 else:
2561 # discard all changes (including ones already written
2561 # discard all changes (including ones already written
2562 # out) in this transaction
2562 # out) in this transaction
2563 repo.invalidate(clearfilecache=True)
2563 repo.invalidate(clearfilecache=True)
2564
2564
2565 tr = transaction.transaction(
2565 tr = transaction.transaction(
2566 rp,
2566 rp,
2567 self.svfs,
2567 self.svfs,
2568 vfsmap,
2568 vfsmap,
2569 b"journal",
2569 b"journal",
2570 b"undo",
2570 b"undo",
2571 aftertrans(renames),
2571 aftertrans(renames),
2572 self.store.createmode,
2572 self.store.createmode,
2573 validator=validate,
2573 validator=validate,
2574 releasefn=releasefn,
2574 releasefn=releasefn,
2575 checkambigfiles=_cachedfiles,
2575 checkambigfiles=_cachedfiles,
2576 name=desc,
2576 name=desc,
2577 )
2577 )
2578 tr.changes[b'origrepolen'] = len(self)
2578 tr.changes[b'origrepolen'] = len(self)
2579 tr.changes[b'obsmarkers'] = set()
2579 tr.changes[b'obsmarkers'] = set()
2580 tr.changes[b'phases'] = []
2580 tr.changes[b'phases'] = []
2581 tr.changes[b'bookmarks'] = {}
2581 tr.changes[b'bookmarks'] = {}
2582
2582
2583 tr.hookargs[b'txnid'] = txnid
2583 tr.hookargs[b'txnid'] = txnid
2584 tr.hookargs[b'txnname'] = desc
2584 tr.hookargs[b'txnname'] = desc
2585 tr.hookargs[b'changes'] = tr.changes
2585 tr.hookargs[b'changes'] = tr.changes
2586 # note: writing the fncache only during finalize mean that the file is
2586 # note: writing the fncache only during finalize mean that the file is
2587 # outdated when running hooks. As fncache is used for streaming clone,
2587 # outdated when running hooks. As fncache is used for streaming clone,
2588 # this is not expected to break anything that happen during the hooks.
2588 # this is not expected to break anything that happen during the hooks.
2589 tr.addfinalize(b'flush-fncache', self.store.write)
2589 tr.addfinalize(b'flush-fncache', self.store.write)
2590
2590
2591 def txnclosehook(tr2):
2591 def txnclosehook(tr2):
2592 """To be run if transaction is successful, will schedule a hook run"""
2592 """To be run if transaction is successful, will schedule a hook run"""
2593 # Don't reference tr2 in hook() so we don't hold a reference.
2593 # Don't reference tr2 in hook() so we don't hold a reference.
2594 # This reduces memory consumption when there are multiple
2594 # This reduces memory consumption when there are multiple
2595 # transactions per lock. This can likely go away if issue5045
2595 # transactions per lock. This can likely go away if issue5045
2596 # fixes the function accumulation.
2596 # fixes the function accumulation.
2597 hookargs = tr2.hookargs
2597 hookargs = tr2.hookargs
2598
2598
2599 def hookfunc(unused_success):
2599 def hookfunc(unused_success):
2600 repo = reporef()
2600 repo = reporef()
2601 assert repo is not None # help pytype
2601 assert repo is not None # help pytype
2602
2602
2603 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2603 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2604 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2604 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2605 for name, (old, new) in bmchanges:
2605 for name, (old, new) in bmchanges:
2606 args = tr.hookargs.copy()
2606 args = tr.hookargs.copy()
2607 args.update(bookmarks.preparehookargs(name, old, new))
2607 args.update(bookmarks.preparehookargs(name, old, new))
2608 repo.hook(
2608 repo.hook(
2609 b'txnclose-bookmark',
2609 b'txnclose-bookmark',
2610 throw=False,
2610 throw=False,
2611 **pycompat.strkwargs(args)
2611 **pycompat.strkwargs(args)
2612 )
2612 )
2613
2613
2614 if hook.hashook(repo.ui, b'txnclose-phase'):
2614 if hook.hashook(repo.ui, b'txnclose-phase'):
2615 cl = repo.unfiltered().changelog
2615 cl = repo.unfiltered().changelog
2616 phasemv = sorted(
2616 phasemv = sorted(
2617 tr.changes[b'phases'], key=lambda r: r[0][0]
2617 tr.changes[b'phases'], key=lambda r: r[0][0]
2618 )
2618 )
2619 for revs, (old, new) in phasemv:
2619 for revs, (old, new) in phasemv:
2620 for rev in revs:
2620 for rev in revs:
2621 args = tr.hookargs.copy()
2621 args = tr.hookargs.copy()
2622 node = hex(cl.node(rev))
2622 node = hex(cl.node(rev))
2623 args.update(phases.preparehookargs(node, old, new))
2623 args.update(phases.preparehookargs(node, old, new))
2624 repo.hook(
2624 repo.hook(
2625 b'txnclose-phase',
2625 b'txnclose-phase',
2626 throw=False,
2626 throw=False,
2627 **pycompat.strkwargs(args)
2627 **pycompat.strkwargs(args)
2628 )
2628 )
2629
2629
2630 repo.hook(
2630 repo.hook(
2631 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2631 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2632 )
2632 )
2633
2633
2634 repo = reporef()
2634 repo = reporef()
2635 assert repo is not None # help pytype
2635 assert repo is not None # help pytype
2636 repo._afterlock(hookfunc)
2636 repo._afterlock(hookfunc)
2637
2637
2638 tr.addfinalize(b'txnclose-hook', txnclosehook)
2638 tr.addfinalize(b'txnclose-hook', txnclosehook)
2639 # Include a leading "-" to make it happen before the transaction summary
2639 # Include a leading "-" to make it happen before the transaction summary
2640 # reports registered via scmutil.registersummarycallback() whose names
2640 # reports registered via scmutil.registersummarycallback() whose names
2641 # are 00-txnreport etc. That way, the caches will be warm when the
2641 # are 00-txnreport etc. That way, the caches will be warm when the
2642 # callbacks run.
2642 # callbacks run.
2643 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2643 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2644
2644
2645 def txnaborthook(tr2):
2645 def txnaborthook(tr2):
2646 """To be run if transaction is aborted"""
2646 """To be run if transaction is aborted"""
2647 repo = reporef()
2647 repo = reporef()
2648 assert repo is not None # help pytype
2648 assert repo is not None # help pytype
2649 repo.hook(
2649 repo.hook(
2650 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2650 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2651 )
2651 )
2652
2652
2653 tr.addabort(b'txnabort-hook', txnaborthook)
2653 tr.addabort(b'txnabort-hook', txnaborthook)
2654 # avoid eager cache invalidation. in-memory data should be identical
2654 # avoid eager cache invalidation. in-memory data should be identical
2655 # to stored data if transaction has no error.
2655 # to stored data if transaction has no error.
2656 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2656 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2657 self._transref = weakref.ref(tr)
2657 self._transref = weakref.ref(tr)
2658 scmutil.registersummarycallback(self, tr, desc)
2658 scmutil.registersummarycallback(self, tr, desc)
2659 # This only exist to deal with the need of rollback to have viable
2659 # This only exist to deal with the need of rollback to have viable
2660 # parents at the end of the operation. So backup viable parents at the
2660 # parents at the end of the operation. So backup viable parents at the
2661 # time of this operation.
2661 # time of this operation.
2662 #
2662 #
2663 # We only do it when the `wlock` is taken, otherwise other might be
2663 # We only do it when the `wlock` is taken, otherwise other might be
2664 # altering the dirstate under us.
2664 # altering the dirstate under us.
2665 #
2665 #
2666 # This is really not a great way to do this (first, because we cannot
2666 # This is really not a great way to do this (first, because we cannot
2667 # always do it). There are more viable alternative that exists
2667 # always do it). There are more viable alternative that exists
2668 #
2668 #
2669 # - backing only the working copy parent in a dedicated files and doing
2669 # - backing only the working copy parent in a dedicated files and doing
2670 # a clean "keep-update" to them on `hg rollback`.
2670 # a clean "keep-update" to them on `hg rollback`.
2671 #
2671 #
2672 # - slightly changing the behavior an applying a logic similar to "hg
2672 # - slightly changing the behavior an applying a logic similar to "hg
2673 # strip" to pick a working copy destination on `hg rollback`
2673 # strip" to pick a working copy destination on `hg rollback`
2674 if self.currentwlock() is not None:
2674 if self.currentwlock() is not None:
2675 ds = self.dirstate
2675 ds = self.dirstate
2676 if ds.branch() == b'default':
2676 if ds.branch() == b'default':
2677 # force a file to be written if None exist
2677 # force a file to be written if None exist
2678 ds.setbranch(b'default')
2678 ds.setbranch(b'default', None)
2679 # we cannot simply add "branch" to `all_file_names` because branch
2679 # we cannot simply add "branch" to `all_file_names` because branch
2680 # is written outside of the transaction control. So we need to
2680 # is written outside of the transaction control. So we need to
2681 # backup early.
2681 # backup early.
2682 tr.addbackup(b"branch", hardlink=True, location=b'plain')
2682 tr.addbackup(b"branch", hardlink=True, location=b'plain')
2683
2683
2684 def backup_dirstate(tr):
2684 def backup_dirstate(tr):
2685 for f in ds.all_file_names():
2685 for f in ds.all_file_names():
2686 # hardlink backup is okay because `dirstate` is always
2686 # hardlink backup is okay because `dirstate` is always
2687 # atomically written and possible data file are append only
2687 # atomically written and possible data file are append only
2688 # and resistant to trailing data.
2688 # and resistant to trailing data.
2689 tr.addbackup(f, hardlink=True, location=b'plain')
2689 tr.addbackup(f, hardlink=True, location=b'plain')
2690
2690
2691 tr.addvalidator(b'dirstate-backup', backup_dirstate)
2691 tr.addvalidator(b'dirstate-backup', backup_dirstate)
2692 return tr
2692 return tr
2693
2693
2694 def _journalfiles(self):
2694 def _journalfiles(self):
2695 return (
2695 return (
2696 (self.svfs, b'journal'),
2696 (self.svfs, b'journal'),
2697 (self.vfs, b'journal.desc'),
2697 (self.vfs, b'journal.desc'),
2698 )
2698 )
2699
2699
2700 def undofiles(self):
2700 def undofiles(self):
2701 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2701 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2702
2702
2703 @unfilteredmethod
2703 @unfilteredmethod
2704 def _writejournal(self, desc):
2704 def _writejournal(self, desc):
2705 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2705 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2706
2706
2707 def recover(self):
2707 def recover(self):
2708 with self.lock():
2708 with self.lock():
2709 if self.svfs.exists(b"journal"):
2709 if self.svfs.exists(b"journal"):
2710 self.ui.status(_(b"rolling back interrupted transaction\n"))
2710 self.ui.status(_(b"rolling back interrupted transaction\n"))
2711 vfsmap = {
2711 vfsmap = {
2712 b'': self.svfs,
2712 b'': self.svfs,
2713 b'plain': self.vfs,
2713 b'plain': self.vfs,
2714 }
2714 }
2715 transaction.rollback(
2715 transaction.rollback(
2716 self.svfs,
2716 self.svfs,
2717 vfsmap,
2717 vfsmap,
2718 b"journal",
2718 b"journal",
2719 self.ui.warn,
2719 self.ui.warn,
2720 checkambigfiles=_cachedfiles,
2720 checkambigfiles=_cachedfiles,
2721 )
2721 )
2722 self.invalidate()
2722 self.invalidate()
2723 return True
2723 return True
2724 else:
2724 else:
2725 self.ui.warn(_(b"no interrupted transaction available\n"))
2725 self.ui.warn(_(b"no interrupted transaction available\n"))
2726 return False
2726 return False
2727
2727
2728 def rollback(self, dryrun=False, force=False):
2728 def rollback(self, dryrun=False, force=False):
2729 wlock = lock = None
2729 wlock = lock = None
2730 try:
2730 try:
2731 wlock = self.wlock()
2731 wlock = self.wlock()
2732 lock = self.lock()
2732 lock = self.lock()
2733 if self.svfs.exists(b"undo"):
2733 if self.svfs.exists(b"undo"):
2734 return self._rollback(dryrun, force)
2734 return self._rollback(dryrun, force)
2735 else:
2735 else:
2736 self.ui.warn(_(b"no rollback information available\n"))
2736 self.ui.warn(_(b"no rollback information available\n"))
2737 return 1
2737 return 1
2738 finally:
2738 finally:
2739 release(lock, wlock)
2739 release(lock, wlock)
2740
2740
2741 @unfilteredmethod # Until we get smarter cache management
2741 @unfilteredmethod # Until we get smarter cache management
2742 def _rollback(self, dryrun, force):
2742 def _rollback(self, dryrun, force):
2743 ui = self.ui
2743 ui = self.ui
2744
2744
2745 parents = self.dirstate.parents()
2745 parents = self.dirstate.parents()
2746 try:
2746 try:
2747 args = self.vfs.read(b'undo.desc').splitlines()
2747 args = self.vfs.read(b'undo.desc').splitlines()
2748 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2748 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2749 if len(args) >= 3:
2749 if len(args) >= 3:
2750 detail = args[2]
2750 detail = args[2]
2751 oldtip = oldlen - 1
2751 oldtip = oldlen - 1
2752
2752
2753 if detail and ui.verbose:
2753 if detail and ui.verbose:
2754 msg = _(
2754 msg = _(
2755 b'repository tip rolled back to revision %d'
2755 b'repository tip rolled back to revision %d'
2756 b' (undo %s: %s)\n'
2756 b' (undo %s: %s)\n'
2757 ) % (oldtip, desc, detail)
2757 ) % (oldtip, desc, detail)
2758 else:
2758 else:
2759 msg = _(
2759 msg = _(
2760 b'repository tip rolled back to revision %d (undo %s)\n'
2760 b'repository tip rolled back to revision %d (undo %s)\n'
2761 ) % (oldtip, desc)
2761 ) % (oldtip, desc)
2762 parentgone = any(self[p].rev() > oldtip for p in parents)
2762 parentgone = any(self[p].rev() > oldtip for p in parents)
2763 except IOError:
2763 except IOError:
2764 msg = _(b'rolling back unknown transaction\n')
2764 msg = _(b'rolling back unknown transaction\n')
2765 desc = None
2765 desc = None
2766 parentgone = True
2766 parentgone = True
2767
2767
2768 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2768 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2769 raise error.Abort(
2769 raise error.Abort(
2770 _(
2770 _(
2771 b'rollback of last commit while not checked out '
2771 b'rollback of last commit while not checked out '
2772 b'may lose data'
2772 b'may lose data'
2773 ),
2773 ),
2774 hint=_(b'use -f to force'),
2774 hint=_(b'use -f to force'),
2775 )
2775 )
2776
2776
2777 ui.status(msg)
2777 ui.status(msg)
2778 if dryrun:
2778 if dryrun:
2779 return 0
2779 return 0
2780
2780
2781 self.destroying()
2781 self.destroying()
2782 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2782 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2783 skip_journal_pattern = None
2783 skip_journal_pattern = None
2784 if not parentgone:
2784 if not parentgone:
2785 skip_journal_pattern = RE_SKIP_DIRSTATE_ROLLBACK
2785 skip_journal_pattern = RE_SKIP_DIRSTATE_ROLLBACK
2786 transaction.rollback(
2786 transaction.rollback(
2787 self.svfs,
2787 self.svfs,
2788 vfsmap,
2788 vfsmap,
2789 b'undo',
2789 b'undo',
2790 ui.warn,
2790 ui.warn,
2791 checkambigfiles=_cachedfiles,
2791 checkambigfiles=_cachedfiles,
2792 skip_journal_pattern=skip_journal_pattern,
2792 skip_journal_pattern=skip_journal_pattern,
2793 )
2793 )
2794 self.invalidate()
2794 self.invalidate()
2795 self.dirstate.invalidate()
2795 self.dirstate.invalidate()
2796
2796
2797 if parentgone:
2797 if parentgone:
2798 # replace this with some explicit parent update in the future.
2798 # replace this with some explicit parent update in the future.
2799 has_node = self.changelog.index.has_node
2799 has_node = self.changelog.index.has_node
2800 if not all(has_node(p) for p in self.dirstate._pl):
2800 if not all(has_node(p) for p in self.dirstate._pl):
2801 # There was no dirstate to backup initially, we need to drop
2801 # There was no dirstate to backup initially, we need to drop
2802 # the existing one.
2802 # the existing one.
2803 with self.dirstate.changing_parents(self):
2803 with self.dirstate.changing_parents(self):
2804 self.dirstate.setparents(self.nullid)
2804 self.dirstate.setparents(self.nullid)
2805 self.dirstate.clear()
2805 self.dirstate.clear()
2806
2806
2807 parents = tuple([p.rev() for p in self[None].parents()])
2807 parents = tuple([p.rev() for p in self[None].parents()])
2808 if len(parents) > 1:
2808 if len(parents) > 1:
2809 ui.status(
2809 ui.status(
2810 _(
2810 _(
2811 b'working directory now based on '
2811 b'working directory now based on '
2812 b'revisions %d and %d\n'
2812 b'revisions %d and %d\n'
2813 )
2813 )
2814 % parents
2814 % parents
2815 )
2815 )
2816 else:
2816 else:
2817 ui.status(
2817 ui.status(
2818 _(b'working directory now based on revision %d\n') % parents
2818 _(b'working directory now based on revision %d\n') % parents
2819 )
2819 )
2820 mergestatemod.mergestate.clean(self)
2820 mergestatemod.mergestate.clean(self)
2821
2821
2822 # TODO: if we know which new heads may result from this rollback, pass
2822 # TODO: if we know which new heads may result from this rollback, pass
2823 # them to destroy(), which will prevent the branchhead cache from being
2823 # them to destroy(), which will prevent the branchhead cache from being
2824 # invalidated.
2824 # invalidated.
2825 self.destroyed()
2825 self.destroyed()
2826 return 0
2826 return 0
2827
2827
2828 def _buildcacheupdater(self, newtransaction):
2828 def _buildcacheupdater(self, newtransaction):
2829 """called during transaction to build the callback updating cache
2829 """called during transaction to build the callback updating cache
2830
2830
2831 Lives on the repository to help extension who might want to augment
2831 Lives on the repository to help extension who might want to augment
2832 this logic. For this purpose, the created transaction is passed to the
2832 this logic. For this purpose, the created transaction is passed to the
2833 method.
2833 method.
2834 """
2834 """
2835 # we must avoid cyclic reference between repo and transaction.
2835 # we must avoid cyclic reference between repo and transaction.
2836 reporef = weakref.ref(self)
2836 reporef = weakref.ref(self)
2837
2837
2838 def updater(tr):
2838 def updater(tr):
2839 repo = reporef()
2839 repo = reporef()
2840 assert repo is not None # help pytype
2840 assert repo is not None # help pytype
2841 repo.updatecaches(tr)
2841 repo.updatecaches(tr)
2842
2842
2843 return updater
2843 return updater
2844
2844
2845 @unfilteredmethod
2845 @unfilteredmethod
2846 def updatecaches(self, tr=None, full=False, caches=None):
2846 def updatecaches(self, tr=None, full=False, caches=None):
2847 """warm appropriate caches
2847 """warm appropriate caches
2848
2848
2849 If this function is called after a transaction closed. The transaction
2849 If this function is called after a transaction closed. The transaction
2850 will be available in the 'tr' argument. This can be used to selectively
2850 will be available in the 'tr' argument. This can be used to selectively
2851 update caches relevant to the changes in that transaction.
2851 update caches relevant to the changes in that transaction.
2852
2852
2853 If 'full' is set, make sure all caches the function knows about have
2853 If 'full' is set, make sure all caches the function knows about have
2854 up-to-date data. Even the ones usually loaded more lazily.
2854 up-to-date data. Even the ones usually loaded more lazily.
2855
2855
2856 The `full` argument can take a special "post-clone" value. In this case
2856 The `full` argument can take a special "post-clone" value. In this case
2857 the cache warming is made after a clone and of the slower cache might
2857 the cache warming is made after a clone and of the slower cache might
2858 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2858 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2859 as we plan for a cleaner way to deal with this for 5.9.
2859 as we plan for a cleaner way to deal with this for 5.9.
2860 """
2860 """
2861 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2861 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2862 # During strip, many caches are invalid but
2862 # During strip, many caches are invalid but
2863 # later call to `destroyed` will refresh them.
2863 # later call to `destroyed` will refresh them.
2864 return
2864 return
2865
2865
2866 unfi = self.unfiltered()
2866 unfi = self.unfiltered()
2867
2867
2868 if full:
2868 if full:
2869 msg = (
2869 msg = (
2870 "`full` argument for `repo.updatecaches` is deprecated\n"
2870 "`full` argument for `repo.updatecaches` is deprecated\n"
2871 "(use `caches=repository.CACHE_ALL` instead)"
2871 "(use `caches=repository.CACHE_ALL` instead)"
2872 )
2872 )
2873 self.ui.deprecwarn(msg, b"5.9")
2873 self.ui.deprecwarn(msg, b"5.9")
2874 caches = repository.CACHES_ALL
2874 caches = repository.CACHES_ALL
2875 if full == b"post-clone":
2875 if full == b"post-clone":
2876 caches = repository.CACHES_POST_CLONE
2876 caches = repository.CACHES_POST_CLONE
2877 caches = repository.CACHES_ALL
2877 caches = repository.CACHES_ALL
2878 elif caches is None:
2878 elif caches is None:
2879 caches = repository.CACHES_DEFAULT
2879 caches = repository.CACHES_DEFAULT
2880
2880
2881 if repository.CACHE_BRANCHMAP_SERVED in caches:
2881 if repository.CACHE_BRANCHMAP_SERVED in caches:
2882 if tr is None or tr.changes[b'origrepolen'] < len(self):
2882 if tr is None or tr.changes[b'origrepolen'] < len(self):
2883 # accessing the 'served' branchmap should refresh all the others,
2883 # accessing the 'served' branchmap should refresh all the others,
2884 self.ui.debug(b'updating the branch cache\n')
2884 self.ui.debug(b'updating the branch cache\n')
2885 self.filtered(b'served').branchmap()
2885 self.filtered(b'served').branchmap()
2886 self.filtered(b'served.hidden').branchmap()
2886 self.filtered(b'served.hidden').branchmap()
2887 # flush all possibly delayed write.
2887 # flush all possibly delayed write.
2888 self._branchcaches.write_delayed(self)
2888 self._branchcaches.write_delayed(self)
2889
2889
2890 if repository.CACHE_CHANGELOG_CACHE in caches:
2890 if repository.CACHE_CHANGELOG_CACHE in caches:
2891 self.changelog.update_caches(transaction=tr)
2891 self.changelog.update_caches(transaction=tr)
2892
2892
2893 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2893 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2894 self.manifestlog.update_caches(transaction=tr)
2894 self.manifestlog.update_caches(transaction=tr)
2895
2895
2896 if repository.CACHE_REV_BRANCH in caches:
2896 if repository.CACHE_REV_BRANCH in caches:
2897 rbc = unfi.revbranchcache()
2897 rbc = unfi.revbranchcache()
2898 for r in unfi.changelog:
2898 for r in unfi.changelog:
2899 rbc.branchinfo(r)
2899 rbc.branchinfo(r)
2900 rbc.write()
2900 rbc.write()
2901
2901
2902 if repository.CACHE_FULL_MANIFEST in caches:
2902 if repository.CACHE_FULL_MANIFEST in caches:
2903 # ensure the working copy parents are in the manifestfulltextcache
2903 # ensure the working copy parents are in the manifestfulltextcache
2904 for ctx in self[b'.'].parents():
2904 for ctx in self[b'.'].parents():
2905 ctx.manifest() # accessing the manifest is enough
2905 ctx.manifest() # accessing the manifest is enough
2906
2906
2907 if repository.CACHE_FILE_NODE_TAGS in caches:
2907 if repository.CACHE_FILE_NODE_TAGS in caches:
2908 # accessing fnode cache warms the cache
2908 # accessing fnode cache warms the cache
2909 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2909 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2910
2910
2911 if repository.CACHE_TAGS_DEFAULT in caches:
2911 if repository.CACHE_TAGS_DEFAULT in caches:
2912 # accessing tags warm the cache
2912 # accessing tags warm the cache
2913 self.tags()
2913 self.tags()
2914 if repository.CACHE_TAGS_SERVED in caches:
2914 if repository.CACHE_TAGS_SERVED in caches:
2915 self.filtered(b'served').tags()
2915 self.filtered(b'served').tags()
2916
2916
2917 if repository.CACHE_BRANCHMAP_ALL in caches:
2917 if repository.CACHE_BRANCHMAP_ALL in caches:
2918 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2918 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2919 # so we're forcing a write to cause these caches to be warmed up
2919 # so we're forcing a write to cause these caches to be warmed up
2920 # even if they haven't explicitly been requested yet (if they've
2920 # even if they haven't explicitly been requested yet (if they've
2921 # never been used by hg, they won't ever have been written, even if
2921 # never been used by hg, they won't ever have been written, even if
2922 # they're a subset of another kind of cache that *has* been used).
2922 # they're a subset of another kind of cache that *has* been used).
2923 for filt in repoview.filtertable.keys():
2923 for filt in repoview.filtertable.keys():
2924 filtered = self.filtered(filt)
2924 filtered = self.filtered(filt)
2925 filtered.branchmap().write(filtered)
2925 filtered.branchmap().write(filtered)
2926
2926
2927 def invalidatecaches(self):
2927 def invalidatecaches(self):
2928 if '_tagscache' in vars(self):
2928 if '_tagscache' in vars(self):
2929 # can't use delattr on proxy
2929 # can't use delattr on proxy
2930 del self.__dict__['_tagscache']
2930 del self.__dict__['_tagscache']
2931
2931
2932 self._branchcaches.clear()
2932 self._branchcaches.clear()
2933 self.invalidatevolatilesets()
2933 self.invalidatevolatilesets()
2934 self._sparsesignaturecache.clear()
2934 self._sparsesignaturecache.clear()
2935
2935
2936 def invalidatevolatilesets(self):
2936 def invalidatevolatilesets(self):
2937 self.filteredrevcache.clear()
2937 self.filteredrevcache.clear()
2938 obsolete.clearobscaches(self)
2938 obsolete.clearobscaches(self)
2939 self._quick_access_changeid_invalidate()
2939 self._quick_access_changeid_invalidate()
2940
2940
2941 def invalidatedirstate(self):
2941 def invalidatedirstate(self):
2942 """Invalidates the dirstate, causing the next call to dirstate
2942 """Invalidates the dirstate, causing the next call to dirstate
2943 to check if it was modified since the last time it was read,
2943 to check if it was modified since the last time it was read,
2944 rereading it if it has.
2944 rereading it if it has.
2945
2945
2946 This is different to dirstate.invalidate() that it doesn't always
2946 This is different to dirstate.invalidate() that it doesn't always
2947 rereads the dirstate. Use dirstate.invalidate() if you want to
2947 rereads the dirstate. Use dirstate.invalidate() if you want to
2948 explicitly read the dirstate again (i.e. restoring it to a previous
2948 explicitly read the dirstate again (i.e. restoring it to a previous
2949 known good state)."""
2949 known good state)."""
2950 unfi = self.unfiltered()
2950 unfi = self.unfiltered()
2951 if 'dirstate' in unfi.__dict__:
2951 if 'dirstate' in unfi.__dict__:
2952 del unfi.__dict__['dirstate']
2952 del unfi.__dict__['dirstate']
2953
2953
2954 def invalidate(self, clearfilecache=False):
2954 def invalidate(self, clearfilecache=False):
2955 """Invalidates both store and non-store parts other than dirstate
2955 """Invalidates both store and non-store parts other than dirstate
2956
2956
2957 If a transaction is running, invalidation of store is omitted,
2957 If a transaction is running, invalidation of store is omitted,
2958 because discarding in-memory changes might cause inconsistency
2958 because discarding in-memory changes might cause inconsistency
2959 (e.g. incomplete fncache causes unintentional failure, but
2959 (e.g. incomplete fncache causes unintentional failure, but
2960 redundant one doesn't).
2960 redundant one doesn't).
2961 """
2961 """
2962 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2962 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2963 for k in list(self._filecache.keys()):
2963 for k in list(self._filecache.keys()):
2964 if (
2964 if (
2965 k == b'changelog'
2965 k == b'changelog'
2966 and self.currenttransaction()
2966 and self.currenttransaction()
2967 and self.changelog._delayed
2967 and self.changelog._delayed
2968 ):
2968 ):
2969 # The changelog object may store unwritten revisions. We don't
2969 # The changelog object may store unwritten revisions. We don't
2970 # want to lose them.
2970 # want to lose them.
2971 # TODO: Solve the problem instead of working around it.
2971 # TODO: Solve the problem instead of working around it.
2972 continue
2972 continue
2973
2973
2974 if clearfilecache:
2974 if clearfilecache:
2975 del self._filecache[k]
2975 del self._filecache[k]
2976 try:
2976 try:
2977 delattr(unfiltered, k)
2977 delattr(unfiltered, k)
2978 except AttributeError:
2978 except AttributeError:
2979 pass
2979 pass
2980 self.invalidatecaches()
2980 self.invalidatecaches()
2981 if not self.currenttransaction():
2981 if not self.currenttransaction():
2982 # TODO: Changing contents of store outside transaction
2982 # TODO: Changing contents of store outside transaction
2983 # causes inconsistency. We should make in-memory store
2983 # causes inconsistency. We should make in-memory store
2984 # changes detectable, and abort if changed.
2984 # changes detectable, and abort if changed.
2985 self.store.invalidatecaches()
2985 self.store.invalidatecaches()
2986
2986
2987 def invalidateall(self):
2987 def invalidateall(self):
2988 """Fully invalidates both store and non-store parts, causing the
2988 """Fully invalidates both store and non-store parts, causing the
2989 subsequent operation to reread any outside changes."""
2989 subsequent operation to reread any outside changes."""
2990 # extension should hook this to invalidate its caches
2990 # extension should hook this to invalidate its caches
2991 self.invalidate()
2991 self.invalidate()
2992 self.invalidatedirstate()
2992 self.invalidatedirstate()
2993
2993
2994 @unfilteredmethod
2994 @unfilteredmethod
2995 def _refreshfilecachestats(self, tr):
2995 def _refreshfilecachestats(self, tr):
2996 """Reload stats of cached files so that they are flagged as valid"""
2996 """Reload stats of cached files so that they are flagged as valid"""
2997 for k, ce in self._filecache.items():
2997 for k, ce in self._filecache.items():
2998 k = pycompat.sysstr(k)
2998 k = pycompat.sysstr(k)
2999 if k == 'dirstate' or k not in self.__dict__:
2999 if k == 'dirstate' or k not in self.__dict__:
3000 continue
3000 continue
3001 ce.refresh()
3001 ce.refresh()
3002
3002
3003 def _lock(
3003 def _lock(
3004 self,
3004 self,
3005 vfs,
3005 vfs,
3006 lockname,
3006 lockname,
3007 wait,
3007 wait,
3008 releasefn,
3008 releasefn,
3009 acquirefn,
3009 acquirefn,
3010 desc,
3010 desc,
3011 ):
3011 ):
3012 timeout = 0
3012 timeout = 0
3013 warntimeout = 0
3013 warntimeout = 0
3014 if wait:
3014 if wait:
3015 timeout = self.ui.configint(b"ui", b"timeout")
3015 timeout = self.ui.configint(b"ui", b"timeout")
3016 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
3016 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
3017 # internal config: ui.signal-safe-lock
3017 # internal config: ui.signal-safe-lock
3018 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
3018 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
3019
3019
3020 l = lockmod.trylock(
3020 l = lockmod.trylock(
3021 self.ui,
3021 self.ui,
3022 vfs,
3022 vfs,
3023 lockname,
3023 lockname,
3024 timeout,
3024 timeout,
3025 warntimeout,
3025 warntimeout,
3026 releasefn=releasefn,
3026 releasefn=releasefn,
3027 acquirefn=acquirefn,
3027 acquirefn=acquirefn,
3028 desc=desc,
3028 desc=desc,
3029 signalsafe=signalsafe,
3029 signalsafe=signalsafe,
3030 )
3030 )
3031 return l
3031 return l
3032
3032
3033 def _afterlock(self, callback):
3033 def _afterlock(self, callback):
3034 """add a callback to be run when the repository is fully unlocked
3034 """add a callback to be run when the repository is fully unlocked
3035
3035
3036 The callback will be executed when the outermost lock is released
3036 The callback will be executed when the outermost lock is released
3037 (with wlock being higher level than 'lock')."""
3037 (with wlock being higher level than 'lock')."""
3038 for ref in (self._wlockref, self._lockref):
3038 for ref in (self._wlockref, self._lockref):
3039 l = ref and ref()
3039 l = ref and ref()
3040 if l and l.held:
3040 if l and l.held:
3041 l.postrelease.append(callback)
3041 l.postrelease.append(callback)
3042 break
3042 break
3043 else: # no lock have been found.
3043 else: # no lock have been found.
3044 callback(True)
3044 callback(True)
3045
3045
3046 def lock(self, wait=True):
3046 def lock(self, wait=True):
3047 """Lock the repository store (.hg/store) and return a weak reference
3047 """Lock the repository store (.hg/store) and return a weak reference
3048 to the lock. Use this before modifying the store (e.g. committing or
3048 to the lock. Use this before modifying the store (e.g. committing or
3049 stripping). If you are opening a transaction, get a lock as well.)
3049 stripping). If you are opening a transaction, get a lock as well.)
3050
3050
3051 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3051 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3052 'wlock' first to avoid a dead-lock hazard."""
3052 'wlock' first to avoid a dead-lock hazard."""
3053 l = self._currentlock(self._lockref)
3053 l = self._currentlock(self._lockref)
3054 if l is not None:
3054 if l is not None:
3055 l.lock()
3055 l.lock()
3056 return l
3056 return l
3057
3057
3058 l = self._lock(
3058 l = self._lock(
3059 vfs=self.svfs,
3059 vfs=self.svfs,
3060 lockname=b"lock",
3060 lockname=b"lock",
3061 wait=wait,
3061 wait=wait,
3062 releasefn=None,
3062 releasefn=None,
3063 acquirefn=self.invalidate,
3063 acquirefn=self.invalidate,
3064 desc=_(b'repository %s') % self.origroot,
3064 desc=_(b'repository %s') % self.origroot,
3065 )
3065 )
3066 self._lockref = weakref.ref(l)
3066 self._lockref = weakref.ref(l)
3067 return l
3067 return l
3068
3068
3069 def wlock(self, wait=True):
3069 def wlock(self, wait=True):
3070 """Lock the non-store parts of the repository (everything under
3070 """Lock the non-store parts of the repository (everything under
3071 .hg except .hg/store) and return a weak reference to the lock.
3071 .hg except .hg/store) and return a weak reference to the lock.
3072
3072
3073 Use this before modifying files in .hg.
3073 Use this before modifying files in .hg.
3074
3074
3075 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3075 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3076 'wlock' first to avoid a dead-lock hazard."""
3076 'wlock' first to avoid a dead-lock hazard."""
3077 l = self._wlockref() if self._wlockref else None
3077 l = self._wlockref() if self._wlockref else None
3078 if l is not None and l.held:
3078 if l is not None and l.held:
3079 l.lock()
3079 l.lock()
3080 return l
3080 return l
3081
3081
3082 # We do not need to check for non-waiting lock acquisition. Such
3082 # We do not need to check for non-waiting lock acquisition. Such
3083 # acquisition would not cause dead-lock as they would just fail.
3083 # acquisition would not cause dead-lock as they would just fail.
3084 if wait and (
3084 if wait and (
3085 self.ui.configbool(b'devel', b'all-warnings')
3085 self.ui.configbool(b'devel', b'all-warnings')
3086 or self.ui.configbool(b'devel', b'check-locks')
3086 or self.ui.configbool(b'devel', b'check-locks')
3087 ):
3087 ):
3088 if self._currentlock(self._lockref) is not None:
3088 if self._currentlock(self._lockref) is not None:
3089 self.ui.develwarn(b'"wlock" acquired after "lock"')
3089 self.ui.develwarn(b'"wlock" acquired after "lock"')
3090
3090
3091 def unlock():
3091 def unlock():
3092 if self.dirstate.is_changing_any:
3092 if self.dirstate.is_changing_any:
3093 msg = b"wlock release in the middle of a changing parents"
3093 msg = b"wlock release in the middle of a changing parents"
3094 self.ui.develwarn(msg)
3094 self.ui.develwarn(msg)
3095 self.dirstate.invalidate()
3095 self.dirstate.invalidate()
3096 else:
3096 else:
3097 if self.dirstate._dirty:
3097 if self.dirstate._dirty:
3098 msg = b"dirty dirstate on wlock release"
3098 msg = b"dirty dirstate on wlock release"
3099 self.ui.develwarn(msg)
3099 self.ui.develwarn(msg)
3100 self.dirstate.write(None)
3100 self.dirstate.write(None)
3101
3101
3102 unfi = self.unfiltered()
3102 unfi = self.unfiltered()
3103 if 'dirstate' in unfi.__dict__:
3103 if 'dirstate' in unfi.__dict__:
3104 del unfi.__dict__['dirstate']
3104 del unfi.__dict__['dirstate']
3105
3105
3106 l = self._lock(
3106 l = self._lock(
3107 self.vfs,
3107 self.vfs,
3108 b"wlock",
3108 b"wlock",
3109 wait,
3109 wait,
3110 unlock,
3110 unlock,
3111 self.invalidatedirstate,
3111 self.invalidatedirstate,
3112 _(b'working directory of %s') % self.origroot,
3112 _(b'working directory of %s') % self.origroot,
3113 )
3113 )
3114 self._wlockref = weakref.ref(l)
3114 self._wlockref = weakref.ref(l)
3115 return l
3115 return l
3116
3116
3117 def _currentlock(self, lockref):
3117 def _currentlock(self, lockref):
3118 """Returns the lock if it's held, or None if it's not."""
3118 """Returns the lock if it's held, or None if it's not."""
3119 if lockref is None:
3119 if lockref is None:
3120 return None
3120 return None
3121 l = lockref()
3121 l = lockref()
3122 if l is None or not l.held:
3122 if l is None or not l.held:
3123 return None
3123 return None
3124 return l
3124 return l
3125
3125
3126 def currentwlock(self):
3126 def currentwlock(self):
3127 """Returns the wlock if it's held, or None if it's not."""
3127 """Returns the wlock if it's held, or None if it's not."""
3128 return self._currentlock(self._wlockref)
3128 return self._currentlock(self._wlockref)
3129
3129
3130 def checkcommitpatterns(self, wctx, match, status, fail):
3130 def checkcommitpatterns(self, wctx, match, status, fail):
3131 """check for commit arguments that aren't committable"""
3131 """check for commit arguments that aren't committable"""
3132 if match.isexact() or match.prefix():
3132 if match.isexact() or match.prefix():
3133 matched = set(status.modified + status.added + status.removed)
3133 matched = set(status.modified + status.added + status.removed)
3134
3134
3135 for f in match.files():
3135 for f in match.files():
3136 f = self.dirstate.normalize(f)
3136 f = self.dirstate.normalize(f)
3137 if f == b'.' or f in matched or f in wctx.substate:
3137 if f == b'.' or f in matched or f in wctx.substate:
3138 continue
3138 continue
3139 if f in status.deleted:
3139 if f in status.deleted:
3140 fail(f, _(b'file not found!'))
3140 fail(f, _(b'file not found!'))
3141 # Is it a directory that exists or used to exist?
3141 # Is it a directory that exists or used to exist?
3142 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3142 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3143 d = f + b'/'
3143 d = f + b'/'
3144 for mf in matched:
3144 for mf in matched:
3145 if mf.startswith(d):
3145 if mf.startswith(d):
3146 break
3146 break
3147 else:
3147 else:
3148 fail(f, _(b"no match under directory!"))
3148 fail(f, _(b"no match under directory!"))
3149 elif f not in self.dirstate:
3149 elif f not in self.dirstate:
3150 fail(f, _(b"file not tracked!"))
3150 fail(f, _(b"file not tracked!"))
3151
3151
3152 @unfilteredmethod
3152 @unfilteredmethod
3153 def commit(
3153 def commit(
3154 self,
3154 self,
3155 text=b"",
3155 text=b"",
3156 user=None,
3156 user=None,
3157 date=None,
3157 date=None,
3158 match=None,
3158 match=None,
3159 force=False,
3159 force=False,
3160 editor=None,
3160 editor=None,
3161 extra=None,
3161 extra=None,
3162 ):
3162 ):
3163 """Add a new revision to current repository.
3163 """Add a new revision to current repository.
3164
3164
3165 Revision information is gathered from the working directory,
3165 Revision information is gathered from the working directory,
3166 match can be used to filter the committed files. If editor is
3166 match can be used to filter the committed files. If editor is
3167 supplied, it is called to get a commit message.
3167 supplied, it is called to get a commit message.
3168 """
3168 """
3169 if extra is None:
3169 if extra is None:
3170 extra = {}
3170 extra = {}
3171
3171
3172 def fail(f, msg):
3172 def fail(f, msg):
3173 raise error.InputError(b'%s: %s' % (f, msg))
3173 raise error.InputError(b'%s: %s' % (f, msg))
3174
3174
3175 if not match:
3175 if not match:
3176 match = matchmod.always()
3176 match = matchmod.always()
3177
3177
3178 if not force:
3178 if not force:
3179 match.bad = fail
3179 match.bad = fail
3180
3180
3181 # lock() for recent changelog (see issue4368)
3181 # lock() for recent changelog (see issue4368)
3182 with self.wlock(), self.lock():
3182 with self.wlock(), self.lock():
3183 wctx = self[None]
3183 wctx = self[None]
3184 merge = len(wctx.parents()) > 1
3184 merge = len(wctx.parents()) > 1
3185
3185
3186 if not force and merge and not match.always():
3186 if not force and merge and not match.always():
3187 raise error.Abort(
3187 raise error.Abort(
3188 _(
3188 _(
3189 b'cannot partially commit a merge '
3189 b'cannot partially commit a merge '
3190 b'(do not specify files or patterns)'
3190 b'(do not specify files or patterns)'
3191 )
3191 )
3192 )
3192 )
3193
3193
3194 status = self.status(match=match, clean=force)
3194 status = self.status(match=match, clean=force)
3195 if force:
3195 if force:
3196 status.modified.extend(
3196 status.modified.extend(
3197 status.clean
3197 status.clean
3198 ) # mq may commit clean files
3198 ) # mq may commit clean files
3199
3199
3200 # check subrepos
3200 # check subrepos
3201 subs, commitsubs, newstate = subrepoutil.precommit(
3201 subs, commitsubs, newstate = subrepoutil.precommit(
3202 self.ui, wctx, status, match, force=force
3202 self.ui, wctx, status, match, force=force
3203 )
3203 )
3204
3204
3205 # make sure all explicit patterns are matched
3205 # make sure all explicit patterns are matched
3206 if not force:
3206 if not force:
3207 self.checkcommitpatterns(wctx, match, status, fail)
3207 self.checkcommitpatterns(wctx, match, status, fail)
3208
3208
3209 cctx = context.workingcommitctx(
3209 cctx = context.workingcommitctx(
3210 self, status, text, user, date, extra
3210 self, status, text, user, date, extra
3211 )
3211 )
3212
3212
3213 ms = mergestatemod.mergestate.read(self)
3213 ms = mergestatemod.mergestate.read(self)
3214 mergeutil.checkunresolved(ms)
3214 mergeutil.checkunresolved(ms)
3215
3215
3216 # internal config: ui.allowemptycommit
3216 # internal config: ui.allowemptycommit
3217 if cctx.isempty() and not self.ui.configbool(
3217 if cctx.isempty() and not self.ui.configbool(
3218 b'ui', b'allowemptycommit'
3218 b'ui', b'allowemptycommit'
3219 ):
3219 ):
3220 self.ui.debug(b'nothing to commit, clearing merge state\n')
3220 self.ui.debug(b'nothing to commit, clearing merge state\n')
3221 ms.reset()
3221 ms.reset()
3222 return None
3222 return None
3223
3223
3224 if merge and cctx.deleted():
3224 if merge and cctx.deleted():
3225 raise error.Abort(_(b"cannot commit merge with missing files"))
3225 raise error.Abort(_(b"cannot commit merge with missing files"))
3226
3226
3227 if editor:
3227 if editor:
3228 cctx._text = editor(self, cctx, subs)
3228 cctx._text = editor(self, cctx, subs)
3229 edited = text != cctx._text
3229 edited = text != cctx._text
3230
3230
3231 # Save commit message in case this transaction gets rolled back
3231 # Save commit message in case this transaction gets rolled back
3232 # (e.g. by a pretxncommit hook). Leave the content alone on
3232 # (e.g. by a pretxncommit hook). Leave the content alone on
3233 # the assumption that the user will use the same editor again.
3233 # the assumption that the user will use the same editor again.
3234 msg_path = self.savecommitmessage(cctx._text)
3234 msg_path = self.savecommitmessage(cctx._text)
3235
3235
3236 # commit subs and write new state
3236 # commit subs and write new state
3237 if subs:
3237 if subs:
3238 uipathfn = scmutil.getuipathfn(self)
3238 uipathfn = scmutil.getuipathfn(self)
3239 for s in sorted(commitsubs):
3239 for s in sorted(commitsubs):
3240 sub = wctx.sub(s)
3240 sub = wctx.sub(s)
3241 self.ui.status(
3241 self.ui.status(
3242 _(b'committing subrepository %s\n')
3242 _(b'committing subrepository %s\n')
3243 % uipathfn(subrepoutil.subrelpath(sub))
3243 % uipathfn(subrepoutil.subrelpath(sub))
3244 )
3244 )
3245 sr = sub.commit(cctx._text, user, date)
3245 sr = sub.commit(cctx._text, user, date)
3246 newstate[s] = (newstate[s][0], sr)
3246 newstate[s] = (newstate[s][0], sr)
3247 subrepoutil.writestate(self, newstate)
3247 subrepoutil.writestate(self, newstate)
3248
3248
3249 p1, p2 = self.dirstate.parents()
3249 p1, p2 = self.dirstate.parents()
3250 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3250 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3251 try:
3251 try:
3252 self.hook(
3252 self.hook(
3253 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3253 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3254 )
3254 )
3255 with self.transaction(b'commit'):
3255 with self.transaction(b'commit'):
3256 ret = self.commitctx(cctx, True)
3256 ret = self.commitctx(cctx, True)
3257 # update bookmarks, dirstate and mergestate
3257 # update bookmarks, dirstate and mergestate
3258 bookmarks.update(self, [p1, p2], ret)
3258 bookmarks.update(self, [p1, p2], ret)
3259 cctx.markcommitted(ret)
3259 cctx.markcommitted(ret)
3260 ms.reset()
3260 ms.reset()
3261 except: # re-raises
3261 except: # re-raises
3262 if edited:
3262 if edited:
3263 self.ui.write(
3263 self.ui.write(
3264 _(b'note: commit message saved in %s\n') % msg_path
3264 _(b'note: commit message saved in %s\n') % msg_path
3265 )
3265 )
3266 self.ui.write(
3266 self.ui.write(
3267 _(
3267 _(
3268 b"note: use 'hg commit --logfile "
3268 b"note: use 'hg commit --logfile "
3269 b"%s --edit' to reuse it\n"
3269 b"%s --edit' to reuse it\n"
3270 )
3270 )
3271 % msg_path
3271 % msg_path
3272 )
3272 )
3273 raise
3273 raise
3274
3274
3275 def commithook(unused_success):
3275 def commithook(unused_success):
3276 # hack for command that use a temporary commit (eg: histedit)
3276 # hack for command that use a temporary commit (eg: histedit)
3277 # temporary commit got stripped before hook release
3277 # temporary commit got stripped before hook release
3278 if self.changelog.hasnode(ret):
3278 if self.changelog.hasnode(ret):
3279 self.hook(
3279 self.hook(
3280 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3280 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3281 )
3281 )
3282
3282
3283 self._afterlock(commithook)
3283 self._afterlock(commithook)
3284 return ret
3284 return ret
3285
3285
3286 @unfilteredmethod
3286 @unfilteredmethod
3287 def commitctx(self, ctx, error=False, origctx=None):
3287 def commitctx(self, ctx, error=False, origctx=None):
3288 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3288 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3289
3289
3290 @unfilteredmethod
3290 @unfilteredmethod
3291 def destroying(self):
3291 def destroying(self):
3292 """Inform the repository that nodes are about to be destroyed.
3292 """Inform the repository that nodes are about to be destroyed.
3293 Intended for use by strip and rollback, so there's a common
3293 Intended for use by strip and rollback, so there's a common
3294 place for anything that has to be done before destroying history.
3294 place for anything that has to be done before destroying history.
3295
3295
3296 This is mostly useful for saving state that is in memory and waiting
3296 This is mostly useful for saving state that is in memory and waiting
3297 to be flushed when the current lock is released. Because a call to
3297 to be flushed when the current lock is released. Because a call to
3298 destroyed is imminent, the repo will be invalidated causing those
3298 destroyed is imminent, the repo will be invalidated causing those
3299 changes to stay in memory (waiting for the next unlock), or vanish
3299 changes to stay in memory (waiting for the next unlock), or vanish
3300 completely.
3300 completely.
3301 """
3301 """
3302 # When using the same lock to commit and strip, the phasecache is left
3302 # When using the same lock to commit and strip, the phasecache is left
3303 # dirty after committing. Then when we strip, the repo is invalidated,
3303 # dirty after committing. Then when we strip, the repo is invalidated,
3304 # causing those changes to disappear.
3304 # causing those changes to disappear.
3305 if '_phasecache' in vars(self):
3305 if '_phasecache' in vars(self):
3306 self._phasecache.write()
3306 self._phasecache.write()
3307
3307
3308 @unfilteredmethod
3308 @unfilteredmethod
3309 def destroyed(self):
3309 def destroyed(self):
3310 """Inform the repository that nodes have been destroyed.
3310 """Inform the repository that nodes have been destroyed.
3311 Intended for use by strip and rollback, so there's a common
3311 Intended for use by strip and rollback, so there's a common
3312 place for anything that has to be done after destroying history.
3312 place for anything that has to be done after destroying history.
3313 """
3313 """
3314 # When one tries to:
3314 # When one tries to:
3315 # 1) destroy nodes thus calling this method (e.g. strip)
3315 # 1) destroy nodes thus calling this method (e.g. strip)
3316 # 2) use phasecache somewhere (e.g. commit)
3316 # 2) use phasecache somewhere (e.g. commit)
3317 #
3317 #
3318 # then 2) will fail because the phasecache contains nodes that were
3318 # then 2) will fail because the phasecache contains nodes that were
3319 # removed. We can either remove phasecache from the filecache,
3319 # removed. We can either remove phasecache from the filecache,
3320 # causing it to reload next time it is accessed, or simply filter
3320 # causing it to reload next time it is accessed, or simply filter
3321 # the removed nodes now and write the updated cache.
3321 # the removed nodes now and write the updated cache.
3322 self._phasecache.filterunknown(self)
3322 self._phasecache.filterunknown(self)
3323 self._phasecache.write()
3323 self._phasecache.write()
3324
3324
3325 # refresh all repository caches
3325 # refresh all repository caches
3326 self.updatecaches()
3326 self.updatecaches()
3327
3327
3328 # Ensure the persistent tag cache is updated. Doing it now
3328 # Ensure the persistent tag cache is updated. Doing it now
3329 # means that the tag cache only has to worry about destroyed
3329 # means that the tag cache only has to worry about destroyed
3330 # heads immediately after a strip/rollback. That in turn
3330 # heads immediately after a strip/rollback. That in turn
3331 # guarantees that "cachetip == currenttip" (comparing both rev
3331 # guarantees that "cachetip == currenttip" (comparing both rev
3332 # and node) always means no nodes have been added or destroyed.
3332 # and node) always means no nodes have been added or destroyed.
3333
3333
3334 # XXX this is suboptimal when qrefresh'ing: we strip the current
3334 # XXX this is suboptimal when qrefresh'ing: we strip the current
3335 # head, refresh the tag cache, then immediately add a new head.
3335 # head, refresh the tag cache, then immediately add a new head.
3336 # But I think doing it this way is necessary for the "instant
3336 # But I think doing it this way is necessary for the "instant
3337 # tag cache retrieval" case to work.
3337 # tag cache retrieval" case to work.
3338 self.invalidate()
3338 self.invalidate()
3339
3339
3340 def status(
3340 def status(
3341 self,
3341 self,
3342 node1=b'.',
3342 node1=b'.',
3343 node2=None,
3343 node2=None,
3344 match=None,
3344 match=None,
3345 ignored=False,
3345 ignored=False,
3346 clean=False,
3346 clean=False,
3347 unknown=False,
3347 unknown=False,
3348 listsubrepos=False,
3348 listsubrepos=False,
3349 ):
3349 ):
3350 '''a convenience method that calls node1.status(node2)'''
3350 '''a convenience method that calls node1.status(node2)'''
3351 return self[node1].status(
3351 return self[node1].status(
3352 node2, match, ignored, clean, unknown, listsubrepos
3352 node2, match, ignored, clean, unknown, listsubrepos
3353 )
3353 )
3354
3354
3355 def addpostdsstatus(self, ps):
3355 def addpostdsstatus(self, ps):
3356 """Add a callback to run within the wlock, at the point at which status
3356 """Add a callback to run within the wlock, at the point at which status
3357 fixups happen.
3357 fixups happen.
3358
3358
3359 On status completion, callback(wctx, status) will be called with the
3359 On status completion, callback(wctx, status) will be called with the
3360 wlock held, unless the dirstate has changed from underneath or the wlock
3360 wlock held, unless the dirstate has changed from underneath or the wlock
3361 couldn't be grabbed.
3361 couldn't be grabbed.
3362
3362
3363 Callbacks should not capture and use a cached copy of the dirstate --
3363 Callbacks should not capture and use a cached copy of the dirstate --
3364 it might change in the meanwhile. Instead, they should access the
3364 it might change in the meanwhile. Instead, they should access the
3365 dirstate via wctx.repo().dirstate.
3365 dirstate via wctx.repo().dirstate.
3366
3366
3367 This list is emptied out after each status run -- extensions should
3367 This list is emptied out after each status run -- extensions should
3368 make sure it adds to this list each time dirstate.status is called.
3368 make sure it adds to this list each time dirstate.status is called.
3369 Extensions should also make sure they don't call this for statuses
3369 Extensions should also make sure they don't call this for statuses
3370 that don't involve the dirstate.
3370 that don't involve the dirstate.
3371 """
3371 """
3372
3372
3373 # The list is located here for uniqueness reasons -- it is actually
3373 # The list is located here for uniqueness reasons -- it is actually
3374 # managed by the workingctx, but that isn't unique per-repo.
3374 # managed by the workingctx, but that isn't unique per-repo.
3375 self._postdsstatus.append(ps)
3375 self._postdsstatus.append(ps)
3376
3376
3377 def postdsstatus(self):
3377 def postdsstatus(self):
3378 """Used by workingctx to get the list of post-dirstate-status hooks."""
3378 """Used by workingctx to get the list of post-dirstate-status hooks."""
3379 return self._postdsstatus
3379 return self._postdsstatus
3380
3380
3381 def clearpostdsstatus(self):
3381 def clearpostdsstatus(self):
3382 """Used by workingctx to clear post-dirstate-status hooks."""
3382 """Used by workingctx to clear post-dirstate-status hooks."""
3383 del self._postdsstatus[:]
3383 del self._postdsstatus[:]
3384
3384
3385 def heads(self, start=None):
3385 def heads(self, start=None):
3386 if start is None:
3386 if start is None:
3387 cl = self.changelog
3387 cl = self.changelog
3388 headrevs = reversed(cl.headrevs())
3388 headrevs = reversed(cl.headrevs())
3389 return [cl.node(rev) for rev in headrevs]
3389 return [cl.node(rev) for rev in headrevs]
3390
3390
3391 heads = self.changelog.heads(start)
3391 heads = self.changelog.heads(start)
3392 # sort the output in rev descending order
3392 # sort the output in rev descending order
3393 return sorted(heads, key=self.changelog.rev, reverse=True)
3393 return sorted(heads, key=self.changelog.rev, reverse=True)
3394
3394
3395 def branchheads(self, branch=None, start=None, closed=False):
3395 def branchheads(self, branch=None, start=None, closed=False):
3396 """return a (possibly filtered) list of heads for the given branch
3396 """return a (possibly filtered) list of heads for the given branch
3397
3397
3398 Heads are returned in topological order, from newest to oldest.
3398 Heads are returned in topological order, from newest to oldest.
3399 If branch is None, use the dirstate branch.
3399 If branch is None, use the dirstate branch.
3400 If start is not None, return only heads reachable from start.
3400 If start is not None, return only heads reachable from start.
3401 If closed is True, return heads that are marked as closed as well.
3401 If closed is True, return heads that are marked as closed as well.
3402 """
3402 """
3403 if branch is None:
3403 if branch is None:
3404 branch = self[None].branch()
3404 branch = self[None].branch()
3405 branches = self.branchmap()
3405 branches = self.branchmap()
3406 if not branches.hasbranch(branch):
3406 if not branches.hasbranch(branch):
3407 return []
3407 return []
3408 # the cache returns heads ordered lowest to highest
3408 # the cache returns heads ordered lowest to highest
3409 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3409 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3410 if start is not None:
3410 if start is not None:
3411 # filter out the heads that cannot be reached from startrev
3411 # filter out the heads that cannot be reached from startrev
3412 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3412 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3413 bheads = [h for h in bheads if h in fbheads]
3413 bheads = [h for h in bheads if h in fbheads]
3414 return bheads
3414 return bheads
3415
3415
3416 def branches(self, nodes):
3416 def branches(self, nodes):
3417 if not nodes:
3417 if not nodes:
3418 nodes = [self.changelog.tip()]
3418 nodes = [self.changelog.tip()]
3419 b = []
3419 b = []
3420 for n in nodes:
3420 for n in nodes:
3421 t = n
3421 t = n
3422 while True:
3422 while True:
3423 p = self.changelog.parents(n)
3423 p = self.changelog.parents(n)
3424 if p[1] != self.nullid or p[0] == self.nullid:
3424 if p[1] != self.nullid or p[0] == self.nullid:
3425 b.append((t, n, p[0], p[1]))
3425 b.append((t, n, p[0], p[1]))
3426 break
3426 break
3427 n = p[0]
3427 n = p[0]
3428 return b
3428 return b
3429
3429
3430 def between(self, pairs):
3430 def between(self, pairs):
3431 r = []
3431 r = []
3432
3432
3433 for top, bottom in pairs:
3433 for top, bottom in pairs:
3434 n, l, i = top, [], 0
3434 n, l, i = top, [], 0
3435 f = 1
3435 f = 1
3436
3436
3437 while n != bottom and n != self.nullid:
3437 while n != bottom and n != self.nullid:
3438 p = self.changelog.parents(n)[0]
3438 p = self.changelog.parents(n)[0]
3439 if i == f:
3439 if i == f:
3440 l.append(n)
3440 l.append(n)
3441 f = f * 2
3441 f = f * 2
3442 n = p
3442 n = p
3443 i += 1
3443 i += 1
3444
3444
3445 r.append(l)
3445 r.append(l)
3446
3446
3447 return r
3447 return r
3448
3448
3449 def checkpush(self, pushop):
3449 def checkpush(self, pushop):
3450 """Extensions can override this function if additional checks have
3450 """Extensions can override this function if additional checks have
3451 to be performed before pushing, or call it if they override push
3451 to be performed before pushing, or call it if they override push
3452 command.
3452 command.
3453 """
3453 """
3454
3454
3455 @unfilteredpropertycache
3455 @unfilteredpropertycache
3456 def prepushoutgoinghooks(self):
3456 def prepushoutgoinghooks(self):
3457 """Return util.hooks consists of a pushop with repo, remote, outgoing
3457 """Return util.hooks consists of a pushop with repo, remote, outgoing
3458 methods, which are called before pushing changesets.
3458 methods, which are called before pushing changesets.
3459 """
3459 """
3460 return util.hooks()
3460 return util.hooks()
3461
3461
3462 def pushkey(self, namespace, key, old, new):
3462 def pushkey(self, namespace, key, old, new):
3463 try:
3463 try:
3464 tr = self.currenttransaction()
3464 tr = self.currenttransaction()
3465 hookargs = {}
3465 hookargs = {}
3466 if tr is not None:
3466 if tr is not None:
3467 hookargs.update(tr.hookargs)
3467 hookargs.update(tr.hookargs)
3468 hookargs = pycompat.strkwargs(hookargs)
3468 hookargs = pycompat.strkwargs(hookargs)
3469 hookargs['namespace'] = namespace
3469 hookargs['namespace'] = namespace
3470 hookargs['key'] = key
3470 hookargs['key'] = key
3471 hookargs['old'] = old
3471 hookargs['old'] = old
3472 hookargs['new'] = new
3472 hookargs['new'] = new
3473 self.hook(b'prepushkey', throw=True, **hookargs)
3473 self.hook(b'prepushkey', throw=True, **hookargs)
3474 except error.HookAbort as exc:
3474 except error.HookAbort as exc:
3475 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3475 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3476 if exc.hint:
3476 if exc.hint:
3477 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3477 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3478 return False
3478 return False
3479 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3479 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3480 ret = pushkey.push(self, namespace, key, old, new)
3480 ret = pushkey.push(self, namespace, key, old, new)
3481
3481
3482 def runhook(unused_success):
3482 def runhook(unused_success):
3483 self.hook(
3483 self.hook(
3484 b'pushkey',
3484 b'pushkey',
3485 namespace=namespace,
3485 namespace=namespace,
3486 key=key,
3486 key=key,
3487 old=old,
3487 old=old,
3488 new=new,
3488 new=new,
3489 ret=ret,
3489 ret=ret,
3490 )
3490 )
3491
3491
3492 self._afterlock(runhook)
3492 self._afterlock(runhook)
3493 return ret
3493 return ret
3494
3494
3495 def listkeys(self, namespace):
3495 def listkeys(self, namespace):
3496 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3496 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3497 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3497 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3498 values = pushkey.list(self, namespace)
3498 values = pushkey.list(self, namespace)
3499 self.hook(b'listkeys', namespace=namespace, values=values)
3499 self.hook(b'listkeys', namespace=namespace, values=values)
3500 return values
3500 return values
3501
3501
3502 def debugwireargs(self, one, two, three=None, four=None, five=None):
3502 def debugwireargs(self, one, two, three=None, four=None, five=None):
3503 '''used to test argument passing over the wire'''
3503 '''used to test argument passing over the wire'''
3504 return b"%s %s %s %s %s" % (
3504 return b"%s %s %s %s %s" % (
3505 one,
3505 one,
3506 two,
3506 two,
3507 pycompat.bytestr(three),
3507 pycompat.bytestr(three),
3508 pycompat.bytestr(four),
3508 pycompat.bytestr(four),
3509 pycompat.bytestr(five),
3509 pycompat.bytestr(five),
3510 )
3510 )
3511
3511
3512 def savecommitmessage(self, text):
3512 def savecommitmessage(self, text):
3513 fp = self.vfs(b'last-message.txt', b'wb')
3513 fp = self.vfs(b'last-message.txt', b'wb')
3514 try:
3514 try:
3515 fp.write(text)
3515 fp.write(text)
3516 finally:
3516 finally:
3517 fp.close()
3517 fp.close()
3518 return self.pathto(fp.name[len(self.root) + 1 :])
3518 return self.pathto(fp.name[len(self.root) + 1 :])
3519
3519
3520 def register_wanted_sidedata(self, category):
3520 def register_wanted_sidedata(self, category):
3521 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3521 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3522 # Only revlogv2 repos can want sidedata.
3522 # Only revlogv2 repos can want sidedata.
3523 return
3523 return
3524 self._wanted_sidedata.add(pycompat.bytestr(category))
3524 self._wanted_sidedata.add(pycompat.bytestr(category))
3525
3525
3526 def register_sidedata_computer(
3526 def register_sidedata_computer(
3527 self, kind, category, keys, computer, flags, replace=False
3527 self, kind, category, keys, computer, flags, replace=False
3528 ):
3528 ):
3529 if kind not in revlogconst.ALL_KINDS:
3529 if kind not in revlogconst.ALL_KINDS:
3530 msg = _(b"unexpected revlog kind '%s'.")
3530 msg = _(b"unexpected revlog kind '%s'.")
3531 raise error.ProgrammingError(msg % kind)
3531 raise error.ProgrammingError(msg % kind)
3532 category = pycompat.bytestr(category)
3532 category = pycompat.bytestr(category)
3533 already_registered = category in self._sidedata_computers.get(kind, [])
3533 already_registered = category in self._sidedata_computers.get(kind, [])
3534 if already_registered and not replace:
3534 if already_registered and not replace:
3535 msg = _(
3535 msg = _(
3536 b"cannot register a sidedata computer twice for category '%s'."
3536 b"cannot register a sidedata computer twice for category '%s'."
3537 )
3537 )
3538 raise error.ProgrammingError(msg % category)
3538 raise error.ProgrammingError(msg % category)
3539 if replace and not already_registered:
3539 if replace and not already_registered:
3540 msg = _(
3540 msg = _(
3541 b"cannot replace a sidedata computer that isn't registered "
3541 b"cannot replace a sidedata computer that isn't registered "
3542 b"for category '%s'."
3542 b"for category '%s'."
3543 )
3543 )
3544 raise error.ProgrammingError(msg % category)
3544 raise error.ProgrammingError(msg % category)
3545 self._sidedata_computers.setdefault(kind, {})
3545 self._sidedata_computers.setdefault(kind, {})
3546 self._sidedata_computers[kind][category] = (keys, computer, flags)
3546 self._sidedata_computers[kind][category] = (keys, computer, flags)
3547
3547
3548
3548
3549 # used to avoid circular references so destructors work
3549 # used to avoid circular references so destructors work
3550 def aftertrans(files):
3550 def aftertrans(files):
3551 renamefiles = [tuple(t) for t in files]
3551 renamefiles = [tuple(t) for t in files]
3552
3552
3553 def a():
3553 def a():
3554 for vfs, src, dest in renamefiles:
3554 for vfs, src, dest in renamefiles:
3555 # if src and dest refer to a same file, vfs.rename is a no-op,
3555 # if src and dest refer to a same file, vfs.rename is a no-op,
3556 # leaving both src and dest on disk. delete dest to make sure
3556 # leaving both src and dest on disk. delete dest to make sure
3557 # the rename couldn't be such a no-op.
3557 # the rename couldn't be such a no-op.
3558 vfs.tryunlink(dest)
3558 vfs.tryunlink(dest)
3559 try:
3559 try:
3560 vfs.rename(src, dest)
3560 vfs.rename(src, dest)
3561 except FileNotFoundError: # journal file does not yet exist
3561 except FileNotFoundError: # journal file does not yet exist
3562 pass
3562 pass
3563
3563
3564 return a
3564 return a
3565
3565
3566
3566
3567 def undoname(fn: bytes) -> bytes:
3567 def undoname(fn: bytes) -> bytes:
3568 base, name = os.path.split(fn)
3568 base, name = os.path.split(fn)
3569 assert name.startswith(b'journal')
3569 assert name.startswith(b'journal')
3570 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3570 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3571
3571
3572
3572
3573 def instance(ui, path: bytes, create, intents=None, createopts=None):
3573 def instance(ui, path: bytes, create, intents=None, createopts=None):
3574 # prevent cyclic import localrepo -> upgrade -> localrepo
3574 # prevent cyclic import localrepo -> upgrade -> localrepo
3575 from . import upgrade
3575 from . import upgrade
3576
3576
3577 localpath = urlutil.urllocalpath(path)
3577 localpath = urlutil.urllocalpath(path)
3578 if create:
3578 if create:
3579 createrepository(ui, localpath, createopts=createopts)
3579 createrepository(ui, localpath, createopts=createopts)
3580
3580
3581 def repo_maker():
3581 def repo_maker():
3582 return makelocalrepository(ui, localpath, intents=intents)
3582 return makelocalrepository(ui, localpath, intents=intents)
3583
3583
3584 repo = repo_maker()
3584 repo = repo_maker()
3585 repo = upgrade.may_auto_upgrade(repo, repo_maker)
3585 repo = upgrade.may_auto_upgrade(repo, repo_maker)
3586 return repo
3586 return repo
3587
3587
3588
3588
3589 def islocal(path: bytes) -> bool:
3589 def islocal(path: bytes) -> bool:
3590 return True
3590 return True
3591
3591
3592
3592
3593 def defaultcreateopts(ui, createopts=None):
3593 def defaultcreateopts(ui, createopts=None):
3594 """Populate the default creation options for a repository.
3594 """Populate the default creation options for a repository.
3595
3595
3596 A dictionary of explicitly requested creation options can be passed
3596 A dictionary of explicitly requested creation options can be passed
3597 in. Missing keys will be populated.
3597 in. Missing keys will be populated.
3598 """
3598 """
3599 createopts = dict(createopts or {})
3599 createopts = dict(createopts or {})
3600
3600
3601 if b'backend' not in createopts:
3601 if b'backend' not in createopts:
3602 # experimental config: storage.new-repo-backend
3602 # experimental config: storage.new-repo-backend
3603 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3603 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3604
3604
3605 return createopts
3605 return createopts
3606
3606
3607
3607
3608 def clone_requirements(ui, createopts, srcrepo):
3608 def clone_requirements(ui, createopts, srcrepo):
3609 """clone the requirements of a local repo for a local clone
3609 """clone the requirements of a local repo for a local clone
3610
3610
3611 The store requirements are unchanged while the working copy requirements
3611 The store requirements are unchanged while the working copy requirements
3612 depends on the configuration
3612 depends on the configuration
3613 """
3613 """
3614 target_requirements = set()
3614 target_requirements = set()
3615 if not srcrepo.requirements:
3615 if not srcrepo.requirements:
3616 # this is a legacy revlog "v0" repository, we cannot do anything fancy
3616 # this is a legacy revlog "v0" repository, we cannot do anything fancy
3617 # with it.
3617 # with it.
3618 return target_requirements
3618 return target_requirements
3619 createopts = defaultcreateopts(ui, createopts=createopts)
3619 createopts = defaultcreateopts(ui, createopts=createopts)
3620 for r in newreporequirements(ui, createopts):
3620 for r in newreporequirements(ui, createopts):
3621 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3621 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3622 target_requirements.add(r)
3622 target_requirements.add(r)
3623
3623
3624 for r in srcrepo.requirements:
3624 for r in srcrepo.requirements:
3625 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3625 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3626 target_requirements.add(r)
3626 target_requirements.add(r)
3627 return target_requirements
3627 return target_requirements
3628
3628
3629
3629
3630 def newreporequirements(ui, createopts):
3630 def newreporequirements(ui, createopts):
3631 """Determine the set of requirements for a new local repository.
3631 """Determine the set of requirements for a new local repository.
3632
3632
3633 Extensions can wrap this function to specify custom requirements for
3633 Extensions can wrap this function to specify custom requirements for
3634 new repositories.
3634 new repositories.
3635 """
3635 """
3636
3636
3637 if b'backend' not in createopts:
3637 if b'backend' not in createopts:
3638 raise error.ProgrammingError(
3638 raise error.ProgrammingError(
3639 b'backend key not present in createopts; '
3639 b'backend key not present in createopts; '
3640 b'was defaultcreateopts() called?'
3640 b'was defaultcreateopts() called?'
3641 )
3641 )
3642
3642
3643 if createopts[b'backend'] != b'revlogv1':
3643 if createopts[b'backend'] != b'revlogv1':
3644 raise error.Abort(
3644 raise error.Abort(
3645 _(
3645 _(
3646 b'unable to determine repository requirements for '
3646 b'unable to determine repository requirements for '
3647 b'storage backend: %s'
3647 b'storage backend: %s'
3648 )
3648 )
3649 % createopts[b'backend']
3649 % createopts[b'backend']
3650 )
3650 )
3651
3651
3652 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3652 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3653 if ui.configbool(b'format', b'usestore'):
3653 if ui.configbool(b'format', b'usestore'):
3654 requirements.add(requirementsmod.STORE_REQUIREMENT)
3654 requirements.add(requirementsmod.STORE_REQUIREMENT)
3655 if ui.configbool(b'format', b'usefncache'):
3655 if ui.configbool(b'format', b'usefncache'):
3656 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3656 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3657 if ui.configbool(b'format', b'dotencode'):
3657 if ui.configbool(b'format', b'dotencode'):
3658 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3658 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3659
3659
3660 compengines = ui.configlist(b'format', b'revlog-compression')
3660 compengines = ui.configlist(b'format', b'revlog-compression')
3661 for compengine in compengines:
3661 for compengine in compengines:
3662 if compengine in util.compengines:
3662 if compengine in util.compengines:
3663 engine = util.compengines[compengine]
3663 engine = util.compengines[compengine]
3664 if engine.available() and engine.revlogheader():
3664 if engine.available() and engine.revlogheader():
3665 break
3665 break
3666 else:
3666 else:
3667 raise error.Abort(
3667 raise error.Abort(
3668 _(
3668 _(
3669 b'compression engines %s defined by '
3669 b'compression engines %s defined by '
3670 b'format.revlog-compression not available'
3670 b'format.revlog-compression not available'
3671 )
3671 )
3672 % b', '.join(b'"%s"' % e for e in compengines),
3672 % b', '.join(b'"%s"' % e for e in compengines),
3673 hint=_(
3673 hint=_(
3674 b'run "hg debuginstall" to list available '
3674 b'run "hg debuginstall" to list available '
3675 b'compression engines'
3675 b'compression engines'
3676 ),
3676 ),
3677 )
3677 )
3678
3678
3679 # zlib is the historical default and doesn't need an explicit requirement.
3679 # zlib is the historical default and doesn't need an explicit requirement.
3680 if compengine == b'zstd':
3680 if compengine == b'zstd':
3681 requirements.add(b'revlog-compression-zstd')
3681 requirements.add(b'revlog-compression-zstd')
3682 elif compengine != b'zlib':
3682 elif compengine != b'zlib':
3683 requirements.add(b'exp-compression-%s' % compengine)
3683 requirements.add(b'exp-compression-%s' % compengine)
3684
3684
3685 if scmutil.gdinitconfig(ui):
3685 if scmutil.gdinitconfig(ui):
3686 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3686 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3687 if ui.configbool(b'format', b'sparse-revlog'):
3687 if ui.configbool(b'format', b'sparse-revlog'):
3688 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3688 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3689
3689
3690 # experimental config: format.use-dirstate-v2
3690 # experimental config: format.use-dirstate-v2
3691 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3691 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3692 if ui.configbool(b'format', b'use-dirstate-v2'):
3692 if ui.configbool(b'format', b'use-dirstate-v2'):
3693 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3693 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3694
3694
3695 # experimental config: format.exp-use-copies-side-data-changeset
3695 # experimental config: format.exp-use-copies-side-data-changeset
3696 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3696 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3697 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3697 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3698 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3698 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3699 if ui.configbool(b'experimental', b'treemanifest'):
3699 if ui.configbool(b'experimental', b'treemanifest'):
3700 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3700 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3701
3701
3702 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3702 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3703 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3703 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3704 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3704 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3705
3705
3706 revlogv2 = ui.config(b'experimental', b'revlogv2')
3706 revlogv2 = ui.config(b'experimental', b'revlogv2')
3707 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3707 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3708 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3708 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3709 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3709 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3710 # experimental config: format.internal-phase
3710 # experimental config: format.internal-phase
3711 if ui.configbool(b'format', b'use-internal-phase'):
3711 if ui.configbool(b'format', b'use-internal-phase'):
3712 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3712 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3713
3713
3714 # experimental config: format.exp-archived-phase
3714 # experimental config: format.exp-archived-phase
3715 if ui.configbool(b'format', b'exp-archived-phase'):
3715 if ui.configbool(b'format', b'exp-archived-phase'):
3716 requirements.add(requirementsmod.ARCHIVED_PHASE_REQUIREMENT)
3716 requirements.add(requirementsmod.ARCHIVED_PHASE_REQUIREMENT)
3717
3717
3718 if createopts.get(b'narrowfiles'):
3718 if createopts.get(b'narrowfiles'):
3719 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3719 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3720
3720
3721 if createopts.get(b'lfs'):
3721 if createopts.get(b'lfs'):
3722 requirements.add(b'lfs')
3722 requirements.add(b'lfs')
3723
3723
3724 if ui.configbool(b'format', b'bookmarks-in-store'):
3724 if ui.configbool(b'format', b'bookmarks-in-store'):
3725 requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3725 requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3726
3726
3727 if ui.configbool(b'format', b'use-persistent-nodemap'):
3727 if ui.configbool(b'format', b'use-persistent-nodemap'):
3728 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3728 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3729
3729
3730 # if share-safe is enabled, let's create the new repository with the new
3730 # if share-safe is enabled, let's create the new repository with the new
3731 # requirement
3731 # requirement
3732 if ui.configbool(b'format', b'use-share-safe'):
3732 if ui.configbool(b'format', b'use-share-safe'):
3733 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3733 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3734
3734
3735 # if we are creating a share-repoΒΉ we have to handle requirement
3735 # if we are creating a share-repoΒΉ we have to handle requirement
3736 # differently.
3736 # differently.
3737 #
3737 #
3738 # [1] (i.e. reusing the store from another repository, just having a
3738 # [1] (i.e. reusing the store from another repository, just having a
3739 # working copy)
3739 # working copy)
3740 if b'sharedrepo' in createopts:
3740 if b'sharedrepo' in createopts:
3741 source_requirements = set(createopts[b'sharedrepo'].requirements)
3741 source_requirements = set(createopts[b'sharedrepo'].requirements)
3742
3742
3743 if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements:
3743 if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements:
3744 # share to an old school repository, we have to copy the
3744 # share to an old school repository, we have to copy the
3745 # requirements and hope for the best.
3745 # requirements and hope for the best.
3746 requirements = source_requirements
3746 requirements = source_requirements
3747 else:
3747 else:
3748 # We have control on the working copy only, so "copy" the non
3748 # We have control on the working copy only, so "copy" the non
3749 # working copy part over, ignoring previous logic.
3749 # working copy part over, ignoring previous logic.
3750 to_drop = set()
3750 to_drop = set()
3751 for req in requirements:
3751 for req in requirements:
3752 if req in requirementsmod.WORKING_DIR_REQUIREMENTS:
3752 if req in requirementsmod.WORKING_DIR_REQUIREMENTS:
3753 continue
3753 continue
3754 if req in source_requirements:
3754 if req in source_requirements:
3755 continue
3755 continue
3756 to_drop.add(req)
3756 to_drop.add(req)
3757 requirements -= to_drop
3757 requirements -= to_drop
3758 requirements |= source_requirements
3758 requirements |= source_requirements
3759
3759
3760 if createopts.get(b'sharedrelative'):
3760 if createopts.get(b'sharedrelative'):
3761 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3761 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3762 else:
3762 else:
3763 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3763 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3764
3764
3765 if ui.configbool(b'format', b'use-dirstate-tracked-hint'):
3765 if ui.configbool(b'format', b'use-dirstate-tracked-hint'):
3766 version = ui.configint(b'format', b'use-dirstate-tracked-hint.version')
3766 version = ui.configint(b'format', b'use-dirstate-tracked-hint.version')
3767 msg = _(b"ignoring unknown tracked key version: %d\n")
3767 msg = _(b"ignoring unknown tracked key version: %d\n")
3768 hint = _(
3768 hint = _(
3769 b"see `hg help config.format.use-dirstate-tracked-hint-version"
3769 b"see `hg help config.format.use-dirstate-tracked-hint-version"
3770 )
3770 )
3771 if version != 1:
3771 if version != 1:
3772 ui.warn(msg % version, hint=hint)
3772 ui.warn(msg % version, hint=hint)
3773 else:
3773 else:
3774 requirements.add(requirementsmod.DIRSTATE_TRACKED_HINT_V1)
3774 requirements.add(requirementsmod.DIRSTATE_TRACKED_HINT_V1)
3775
3775
3776 return requirements
3776 return requirements
3777
3777
3778
3778
3779 def checkrequirementscompat(ui, requirements):
3779 def checkrequirementscompat(ui, requirements):
3780 """Checks compatibility of repository requirements enabled and disabled.
3780 """Checks compatibility of repository requirements enabled and disabled.
3781
3781
3782 Returns a set of requirements which needs to be dropped because dependend
3782 Returns a set of requirements which needs to be dropped because dependend
3783 requirements are not enabled. Also warns users about it"""
3783 requirements are not enabled. Also warns users about it"""
3784
3784
3785 dropped = set()
3785 dropped = set()
3786
3786
3787 if requirementsmod.STORE_REQUIREMENT not in requirements:
3787 if requirementsmod.STORE_REQUIREMENT not in requirements:
3788 if requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3788 if requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3789 ui.warn(
3789 ui.warn(
3790 _(
3790 _(
3791 b'ignoring enabled \'format.bookmarks-in-store\' config '
3791 b'ignoring enabled \'format.bookmarks-in-store\' config '
3792 b'beacuse it is incompatible with disabled '
3792 b'beacuse it is incompatible with disabled '
3793 b'\'format.usestore\' config\n'
3793 b'\'format.usestore\' config\n'
3794 )
3794 )
3795 )
3795 )
3796 dropped.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3796 dropped.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3797
3797
3798 if (
3798 if (
3799 requirementsmod.SHARED_REQUIREMENT in requirements
3799 requirementsmod.SHARED_REQUIREMENT in requirements
3800 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3800 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3801 ):
3801 ):
3802 raise error.Abort(
3802 raise error.Abort(
3803 _(
3803 _(
3804 b"cannot create shared repository as source was created"
3804 b"cannot create shared repository as source was created"
3805 b" with 'format.usestore' config disabled"
3805 b" with 'format.usestore' config disabled"
3806 )
3806 )
3807 )
3807 )
3808
3808
3809 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3809 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3810 if ui.hasconfig(b'format', b'use-share-safe'):
3810 if ui.hasconfig(b'format', b'use-share-safe'):
3811 msg = _(
3811 msg = _(
3812 b"ignoring enabled 'format.use-share-safe' config because "
3812 b"ignoring enabled 'format.use-share-safe' config because "
3813 b"it is incompatible with disabled 'format.usestore'"
3813 b"it is incompatible with disabled 'format.usestore'"
3814 b" config\n"
3814 b" config\n"
3815 )
3815 )
3816 ui.warn(msg)
3816 ui.warn(msg)
3817 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3817 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3818
3818
3819 return dropped
3819 return dropped
3820
3820
3821
3821
3822 def filterknowncreateopts(ui, createopts):
3822 def filterknowncreateopts(ui, createopts):
3823 """Filters a dict of repo creation options against options that are known.
3823 """Filters a dict of repo creation options against options that are known.
3824
3824
3825 Receives a dict of repo creation options and returns a dict of those
3825 Receives a dict of repo creation options and returns a dict of those
3826 options that we don't know how to handle.
3826 options that we don't know how to handle.
3827
3827
3828 This function is called as part of repository creation. If the
3828 This function is called as part of repository creation. If the
3829 returned dict contains any items, repository creation will not
3829 returned dict contains any items, repository creation will not
3830 be allowed, as it means there was a request to create a repository
3830 be allowed, as it means there was a request to create a repository
3831 with options not recognized by loaded code.
3831 with options not recognized by loaded code.
3832
3832
3833 Extensions can wrap this function to filter out creation options
3833 Extensions can wrap this function to filter out creation options
3834 they know how to handle.
3834 they know how to handle.
3835 """
3835 """
3836 known = {
3836 known = {
3837 b'backend',
3837 b'backend',
3838 b'lfs',
3838 b'lfs',
3839 b'narrowfiles',
3839 b'narrowfiles',
3840 b'sharedrepo',
3840 b'sharedrepo',
3841 b'sharedrelative',
3841 b'sharedrelative',
3842 b'shareditems',
3842 b'shareditems',
3843 b'shallowfilestore',
3843 b'shallowfilestore',
3844 }
3844 }
3845
3845
3846 return {k: v for k, v in createopts.items() if k not in known}
3846 return {k: v for k, v in createopts.items() if k not in known}
3847
3847
3848
3848
3849 def createrepository(ui, path: bytes, createopts=None, requirements=None):
3849 def createrepository(ui, path: bytes, createopts=None, requirements=None):
3850 """Create a new repository in a vfs.
3850 """Create a new repository in a vfs.
3851
3851
3852 ``path`` path to the new repo's working directory.
3852 ``path`` path to the new repo's working directory.
3853 ``createopts`` options for the new repository.
3853 ``createopts`` options for the new repository.
3854 ``requirement`` predefined set of requirements.
3854 ``requirement`` predefined set of requirements.
3855 (incompatible with ``createopts``)
3855 (incompatible with ``createopts``)
3856
3856
3857 The following keys for ``createopts`` are recognized:
3857 The following keys for ``createopts`` are recognized:
3858
3858
3859 backend
3859 backend
3860 The storage backend to use.
3860 The storage backend to use.
3861 lfs
3861 lfs
3862 Repository will be created with ``lfs`` requirement. The lfs extension
3862 Repository will be created with ``lfs`` requirement. The lfs extension
3863 will automatically be loaded when the repository is accessed.
3863 will automatically be loaded when the repository is accessed.
3864 narrowfiles
3864 narrowfiles
3865 Set up repository to support narrow file storage.
3865 Set up repository to support narrow file storage.
3866 sharedrepo
3866 sharedrepo
3867 Repository object from which storage should be shared.
3867 Repository object from which storage should be shared.
3868 sharedrelative
3868 sharedrelative
3869 Boolean indicating if the path to the shared repo should be
3869 Boolean indicating if the path to the shared repo should be
3870 stored as relative. By default, the pointer to the "parent" repo
3870 stored as relative. By default, the pointer to the "parent" repo
3871 is stored as an absolute path.
3871 is stored as an absolute path.
3872 shareditems
3872 shareditems
3873 Set of items to share to the new repository (in addition to storage).
3873 Set of items to share to the new repository (in addition to storage).
3874 shallowfilestore
3874 shallowfilestore
3875 Indicates that storage for files should be shallow (not all ancestor
3875 Indicates that storage for files should be shallow (not all ancestor
3876 revisions are known).
3876 revisions are known).
3877 """
3877 """
3878
3878
3879 if requirements is not None:
3879 if requirements is not None:
3880 if createopts is not None:
3880 if createopts is not None:
3881 msg = b'cannot specify both createopts and requirements'
3881 msg = b'cannot specify both createopts and requirements'
3882 raise error.ProgrammingError(msg)
3882 raise error.ProgrammingError(msg)
3883 createopts = {}
3883 createopts = {}
3884 else:
3884 else:
3885 createopts = defaultcreateopts(ui, createopts=createopts)
3885 createopts = defaultcreateopts(ui, createopts=createopts)
3886
3886
3887 unknownopts = filterknowncreateopts(ui, createopts)
3887 unknownopts = filterknowncreateopts(ui, createopts)
3888
3888
3889 if not isinstance(unknownopts, dict):
3889 if not isinstance(unknownopts, dict):
3890 raise error.ProgrammingError(
3890 raise error.ProgrammingError(
3891 b'filterknowncreateopts() did not return a dict'
3891 b'filterknowncreateopts() did not return a dict'
3892 )
3892 )
3893
3893
3894 if unknownopts:
3894 if unknownopts:
3895 raise error.Abort(
3895 raise error.Abort(
3896 _(
3896 _(
3897 b'unable to create repository because of unknown '
3897 b'unable to create repository because of unknown '
3898 b'creation option: %s'
3898 b'creation option: %s'
3899 )
3899 )
3900 % b', '.join(sorted(unknownopts)),
3900 % b', '.join(sorted(unknownopts)),
3901 hint=_(b'is a required extension not loaded?'),
3901 hint=_(b'is a required extension not loaded?'),
3902 )
3902 )
3903
3903
3904 requirements = newreporequirements(ui, createopts=createopts)
3904 requirements = newreporequirements(ui, createopts=createopts)
3905 requirements -= checkrequirementscompat(ui, requirements)
3905 requirements -= checkrequirementscompat(ui, requirements)
3906
3906
3907 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3907 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3908
3908
3909 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3909 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3910 if hgvfs.exists():
3910 if hgvfs.exists():
3911 raise error.RepoError(_(b'repository %s already exists') % path)
3911 raise error.RepoError(_(b'repository %s already exists') % path)
3912
3912
3913 if b'sharedrepo' in createopts:
3913 if b'sharedrepo' in createopts:
3914 sharedpath = createopts[b'sharedrepo'].sharedpath
3914 sharedpath = createopts[b'sharedrepo'].sharedpath
3915
3915
3916 if createopts.get(b'sharedrelative'):
3916 if createopts.get(b'sharedrelative'):
3917 try:
3917 try:
3918 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3918 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3919 sharedpath = util.pconvert(sharedpath)
3919 sharedpath = util.pconvert(sharedpath)
3920 except (IOError, ValueError) as e:
3920 except (IOError, ValueError) as e:
3921 # ValueError is raised on Windows if the drive letters differ
3921 # ValueError is raised on Windows if the drive letters differ
3922 # on each path.
3922 # on each path.
3923 raise error.Abort(
3923 raise error.Abort(
3924 _(b'cannot calculate relative path'),
3924 _(b'cannot calculate relative path'),
3925 hint=stringutil.forcebytestr(e),
3925 hint=stringutil.forcebytestr(e),
3926 )
3926 )
3927
3927
3928 if not wdirvfs.exists():
3928 if not wdirvfs.exists():
3929 wdirvfs.makedirs()
3929 wdirvfs.makedirs()
3930
3930
3931 hgvfs.makedir(notindexed=True)
3931 hgvfs.makedir(notindexed=True)
3932 if b'sharedrepo' not in createopts:
3932 if b'sharedrepo' not in createopts:
3933 hgvfs.mkdir(b'cache')
3933 hgvfs.mkdir(b'cache')
3934 hgvfs.mkdir(b'wcache')
3934 hgvfs.mkdir(b'wcache')
3935
3935
3936 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3936 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3937 if has_store and b'sharedrepo' not in createopts:
3937 if has_store and b'sharedrepo' not in createopts:
3938 hgvfs.mkdir(b'store')
3938 hgvfs.mkdir(b'store')
3939
3939
3940 # We create an invalid changelog outside the store so very old
3940 # We create an invalid changelog outside the store so very old
3941 # Mercurial versions (which didn't know about the requirements
3941 # Mercurial versions (which didn't know about the requirements
3942 # file) encounter an error on reading the changelog. This
3942 # file) encounter an error on reading the changelog. This
3943 # effectively locks out old clients and prevents them from
3943 # effectively locks out old clients and prevents them from
3944 # mucking with a repo in an unknown format.
3944 # mucking with a repo in an unknown format.
3945 #
3945 #
3946 # The revlog header has version 65535, which won't be recognized by
3946 # The revlog header has version 65535, which won't be recognized by
3947 # such old clients.
3947 # such old clients.
3948 hgvfs.append(
3948 hgvfs.append(
3949 b'00changelog.i',
3949 b'00changelog.i',
3950 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3950 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3951 b'layout',
3951 b'layout',
3952 )
3952 )
3953
3953
3954 # Filter the requirements into working copy and store ones
3954 # Filter the requirements into working copy and store ones
3955 wcreq, storereq = scmutil.filterrequirements(requirements)
3955 wcreq, storereq = scmutil.filterrequirements(requirements)
3956 # write working copy ones
3956 # write working copy ones
3957 scmutil.writerequires(hgvfs, wcreq)
3957 scmutil.writerequires(hgvfs, wcreq)
3958 # If there are store requirements and the current repository
3958 # If there are store requirements and the current repository
3959 # is not a shared one, write stored requirements
3959 # is not a shared one, write stored requirements
3960 # For new shared repository, we don't need to write the store
3960 # For new shared repository, we don't need to write the store
3961 # requirements as they are already present in store requires
3961 # requirements as they are already present in store requires
3962 if storereq and b'sharedrepo' not in createopts:
3962 if storereq and b'sharedrepo' not in createopts:
3963 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3963 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3964 scmutil.writerequires(storevfs, storereq)
3964 scmutil.writerequires(storevfs, storereq)
3965
3965
3966 # Write out file telling readers where to find the shared store.
3966 # Write out file telling readers where to find the shared store.
3967 if b'sharedrepo' in createopts:
3967 if b'sharedrepo' in createopts:
3968 hgvfs.write(b'sharedpath', sharedpath)
3968 hgvfs.write(b'sharedpath', sharedpath)
3969
3969
3970 if createopts.get(b'shareditems'):
3970 if createopts.get(b'shareditems'):
3971 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3971 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3972 hgvfs.write(b'shared', shared)
3972 hgvfs.write(b'shared', shared)
3973
3973
3974
3974
3975 def poisonrepository(repo):
3975 def poisonrepository(repo):
3976 """Poison a repository instance so it can no longer be used."""
3976 """Poison a repository instance so it can no longer be used."""
3977 # Perform any cleanup on the instance.
3977 # Perform any cleanup on the instance.
3978 repo.close()
3978 repo.close()
3979
3979
3980 # Our strategy is to replace the type of the object with one that
3980 # Our strategy is to replace the type of the object with one that
3981 # has all attribute lookups result in error.
3981 # has all attribute lookups result in error.
3982 #
3982 #
3983 # But we have to allow the close() method because some constructors
3983 # But we have to allow the close() method because some constructors
3984 # of repos call close() on repo references.
3984 # of repos call close() on repo references.
3985 class poisonedrepository:
3985 class poisonedrepository:
3986 def __getattribute__(self, item):
3986 def __getattribute__(self, item):
3987 if item == 'close':
3987 if item == 'close':
3988 return object.__getattribute__(self, item)
3988 return object.__getattribute__(self, item)
3989
3989
3990 raise error.ProgrammingError(
3990 raise error.ProgrammingError(
3991 b'repo instances should not be used after unshare'
3991 b'repo instances should not be used after unshare'
3992 )
3992 )
3993
3993
3994 def close(self):
3994 def close(self):
3995 pass
3995 pass
3996
3996
3997 # We may have a repoview, which intercepts __setattr__. So be sure
3997 # We may have a repoview, which intercepts __setattr__. So be sure
3998 # we operate at the lowest level possible.
3998 # we operate at the lowest level possible.
3999 object.__setattr__(repo, '__class__', poisonedrepository)
3999 object.__setattr__(repo, '__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now