##// END OF EJS Templates
localrepo: drop the CamelCase name for `localrepo.ilocalrepositorymain`...
Matt Harbison -
r52973:513b4137 default
parent child Browse files
Show More
@@ -1,4054 +1,4044
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 # coding: utf-8
2 # coding: utf-8
3 #
3 #
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import annotations
9 from __future__ import annotations
10
10
11 import functools
11 import functools
12 import os
12 import os
13 import random
13 import random
14 import re
14 import re
15 import sys
15 import sys
16 import time
16 import time
17 import typing
17 import typing
18 import weakref
18 import weakref
19
19
20 from concurrent import futures
20 from concurrent import futures
21 from typing import (
21 from typing import (
22 Optional,
22 Optional,
23 )
23 )
24
24
25 from .i18n import _
25 from .i18n import _
26 from .node import (
26 from .node import (
27 bin,
27 bin,
28 hex,
28 hex,
29 nullrev,
29 nullrev,
30 sha1nodeconstants,
30 sha1nodeconstants,
31 short,
31 short,
32 )
32 )
33 from . import (
33 from . import (
34 bookmarks,
34 bookmarks,
35 branchmap,
35 branchmap,
36 bundle2,
36 bundle2,
37 bundlecaches,
37 bundlecaches,
38 changegroup,
38 changegroup,
39 color,
39 color,
40 commit,
40 commit,
41 context,
41 context,
42 dirstate,
42 dirstate,
43 discovery,
43 discovery,
44 encoding,
44 encoding,
45 error,
45 error,
46 exchange,
46 exchange,
47 extensions,
47 extensions,
48 filelog,
48 filelog,
49 hook,
49 hook,
50 lock as lockmod,
50 lock as lockmod,
51 match as matchmod,
51 match as matchmod,
52 mergestate as mergestatemod,
52 mergestate as mergestatemod,
53 mergeutil,
53 mergeutil,
54 namespaces,
54 namespaces,
55 narrowspec,
55 narrowspec,
56 obsolete,
56 obsolete,
57 pathutil,
57 pathutil,
58 phases,
58 phases,
59 policy,
59 policy,
60 pushkey,
60 pushkey,
61 pycompat,
61 pycompat,
62 rcutil,
62 rcutil,
63 repoview,
63 repoview,
64 requirements as requirementsmod,
64 requirements as requirementsmod,
65 revlog,
65 revlog,
66 revset,
66 revset,
67 revsetlang,
67 revsetlang,
68 scmutil,
68 scmutil,
69 sparse,
69 sparse,
70 store as storemod,
70 store as storemod,
71 subrepoutil,
71 subrepoutil,
72 tags as tagsmod,
72 tags as tagsmod,
73 transaction,
73 transaction,
74 txnutil,
74 txnutil,
75 util,
75 util,
76 vfs as vfsmod,
76 vfs as vfsmod,
77 wireprototypes,
77 wireprototypes,
78 )
78 )
79
79
80 from .branching import (
80 from .branching import (
81 rev_cache as rev_branch_cache,
81 rev_cache as rev_branch_cache,
82 )
82 )
83
83
84 from .interfaces import (
84 from .interfaces import (
85 repository,
85 repository,
86 util as interfaceutil,
87 )
86 )
88
87
89 from .utils import (
88 from .utils import (
90 hashutil,
89 hashutil,
91 procutil,
90 procutil,
92 stringutil,
91 stringutil,
93 urlutil,
92 urlutil,
94 )
93 )
95
94
96 from .revlogutils import (
95 from .revlogutils import (
97 concurrency_checker as revlogchecker,
96 concurrency_checker as revlogchecker,
98 constants as revlogconst,
97 constants as revlogconst,
99 sidedata as sidedatamod,
98 sidedata as sidedatamod,
100 )
99 )
101
100
102 release = lockmod.release
101 release = lockmod.release
103 urlerr = util.urlerr
102 urlerr = util.urlerr
104 urlreq = util.urlreq
103 urlreq = util.urlreq
105
104
106 RE_SKIP_DIRSTATE_ROLLBACK = re.compile(
105 RE_SKIP_DIRSTATE_ROLLBACK = re.compile(
107 b"^((dirstate|narrowspec.dirstate).*|branch$)"
106 b"^((dirstate|narrowspec.dirstate).*|branch$)"
108 )
107 )
109
108
110 # set of (path, vfs-location) tuples. vfs-location is:
109 # set of (path, vfs-location) tuples. vfs-location is:
111 # - 'plain for vfs relative paths
110 # - 'plain for vfs relative paths
112 # - '' for svfs relative paths
111 # - '' for svfs relative paths
113 _cachedfiles = set()
112 _cachedfiles = set()
114
113
115
114
116 class _basefilecache(scmutil.filecache):
115 class _basefilecache(scmutil.filecache):
117 """All filecache usage on repo are done for logic that should be unfiltered"""
116 """All filecache usage on repo are done for logic that should be unfiltered"""
118
117
119 def __get__(self, repo, type=None):
118 def __get__(self, repo, type=None):
120 if repo is None:
119 if repo is None:
121 return self
120 return self
122 # proxy to unfiltered __dict__ since filtered repo has no entry
121 # proxy to unfiltered __dict__ since filtered repo has no entry
123 unfi = repo.unfiltered()
122 unfi = repo.unfiltered()
124 try:
123 try:
125 return unfi.__dict__[self.sname]
124 return unfi.__dict__[self.sname]
126 except KeyError:
125 except KeyError:
127 pass
126 pass
128 return super(_basefilecache, self).__get__(unfi, type)
127 return super(_basefilecache, self).__get__(unfi, type)
129
128
130 def set(self, repo, value):
129 def set(self, repo, value):
131 return super(_basefilecache, self).set(repo.unfiltered(), value)
130 return super(_basefilecache, self).set(repo.unfiltered(), value)
132
131
133
132
134 class repofilecache(_basefilecache):
133 class repofilecache(_basefilecache):
135 """filecache for files in .hg but outside of .hg/store"""
134 """filecache for files in .hg but outside of .hg/store"""
136
135
137 def __init__(self, *paths):
136 def __init__(self, *paths):
138 super(repofilecache, self).__init__(*paths)
137 super(repofilecache, self).__init__(*paths)
139 for path in paths:
138 for path in paths:
140 _cachedfiles.add((path, b'plain'))
139 _cachedfiles.add((path, b'plain'))
141
140
142 def join(self, obj, fname):
141 def join(self, obj, fname):
143 return obj.vfs.join(fname)
142 return obj.vfs.join(fname)
144
143
145
144
146 class storecache(_basefilecache):
145 class storecache(_basefilecache):
147 """filecache for files in the store"""
146 """filecache for files in the store"""
148
147
149 def __init__(self, *paths):
148 def __init__(self, *paths):
150 super(storecache, self).__init__(*paths)
149 super(storecache, self).__init__(*paths)
151 for path in paths:
150 for path in paths:
152 _cachedfiles.add((path, b''))
151 _cachedfiles.add((path, b''))
153
152
154 def join(self, obj, fname):
153 def join(self, obj, fname):
155 return obj.sjoin(fname)
154 return obj.sjoin(fname)
156
155
157
156
158 class changelogcache(storecache):
157 class changelogcache(storecache):
159 """filecache for the changelog"""
158 """filecache for the changelog"""
160
159
161 def __init__(self):
160 def __init__(self):
162 super(changelogcache, self).__init__()
161 super(changelogcache, self).__init__()
163 _cachedfiles.add((b'00changelog.i', b''))
162 _cachedfiles.add((b'00changelog.i', b''))
164 _cachedfiles.add((b'00changelog.n', b''))
163 _cachedfiles.add((b'00changelog.n', b''))
165
164
166 def tracked_paths(self, obj):
165 def tracked_paths(self, obj):
167 paths = [self.join(obj, b'00changelog.i')]
166 paths = [self.join(obj, b'00changelog.i')]
168 if obj.store.opener.options.get(b'persistent-nodemap', False):
167 if obj.store.opener.options.get(b'persistent-nodemap', False):
169 paths.append(self.join(obj, b'00changelog.n'))
168 paths.append(self.join(obj, b'00changelog.n'))
170 return paths
169 return paths
171
170
172
171
173 class manifestlogcache(storecache):
172 class manifestlogcache(storecache):
174 """filecache for the manifestlog"""
173 """filecache for the manifestlog"""
175
174
176 def __init__(self):
175 def __init__(self):
177 super(manifestlogcache, self).__init__()
176 super(manifestlogcache, self).__init__()
178 _cachedfiles.add((b'00manifest.i', b''))
177 _cachedfiles.add((b'00manifest.i', b''))
179 _cachedfiles.add((b'00manifest.n', b''))
178 _cachedfiles.add((b'00manifest.n', b''))
180
179
181 def tracked_paths(self, obj):
180 def tracked_paths(self, obj):
182 paths = [self.join(obj, b'00manifest.i')]
181 paths = [self.join(obj, b'00manifest.i')]
183 if obj.store.opener.options.get(b'persistent-nodemap', False):
182 if obj.store.opener.options.get(b'persistent-nodemap', False):
184 paths.append(self.join(obj, b'00manifest.n'))
183 paths.append(self.join(obj, b'00manifest.n'))
185 return paths
184 return paths
186
185
187
186
188 class mixedrepostorecache(_basefilecache):
187 class mixedrepostorecache(_basefilecache):
189 """filecache for a mix files in .hg/store and outside"""
188 """filecache for a mix files in .hg/store and outside"""
190
189
191 def __init__(self, *pathsandlocations):
190 def __init__(self, *pathsandlocations):
192 # scmutil.filecache only uses the path for passing back into our
191 # scmutil.filecache only uses the path for passing back into our
193 # join(), so we can safely pass a list of paths and locations
192 # join(), so we can safely pass a list of paths and locations
194 super(mixedrepostorecache, self).__init__(*pathsandlocations)
193 super(mixedrepostorecache, self).__init__(*pathsandlocations)
195 _cachedfiles.update(pathsandlocations)
194 _cachedfiles.update(pathsandlocations)
196
195
197 def join(self, obj, fnameandlocation):
196 def join(self, obj, fnameandlocation):
198 fname, location = fnameandlocation
197 fname, location = fnameandlocation
199 if location == b'plain':
198 if location == b'plain':
200 return obj.vfs.join(fname)
199 return obj.vfs.join(fname)
201 else:
200 else:
202 if location != b'':
201 if location != b'':
203 raise error.ProgrammingError(
202 raise error.ProgrammingError(
204 b'unexpected location: %s' % location
203 b'unexpected location: %s' % location
205 )
204 )
206 return obj.sjoin(fname)
205 return obj.sjoin(fname)
207
206
208
207
209 def isfilecached(repo, name):
208 def isfilecached(repo, name):
210 """check if a repo has already cached "name" filecache-ed property
209 """check if a repo has already cached "name" filecache-ed property
211
210
212 This returns (cachedobj-or-None, iscached) tuple.
211 This returns (cachedobj-or-None, iscached) tuple.
213 """
212 """
214 cacheentry = repo.unfiltered()._filecache.get(name, None)
213 cacheentry = repo.unfiltered()._filecache.get(name, None)
215 if not cacheentry:
214 if not cacheentry:
216 return None, False
215 return None, False
217 return cacheentry.obj, True
216 return cacheentry.obj, True
218
217
219
218
220 class unfilteredpropertycache(util.propertycache):
219 class unfilteredpropertycache(util.propertycache):
221 """propertycache that apply to unfiltered repo only"""
220 """propertycache that apply to unfiltered repo only"""
222
221
223 def __get__(self, repo, type=None):
222 def __get__(self, repo, type=None):
224 unfi = repo.unfiltered()
223 unfi = repo.unfiltered()
225 if unfi is repo:
224 if unfi is repo:
226 return super(unfilteredpropertycache, self).__get__(unfi)
225 return super(unfilteredpropertycache, self).__get__(unfi)
227 return getattr(unfi, self.name)
226 return getattr(unfi, self.name)
228
227
229
228
230 class filteredpropertycache(util.propertycache):
229 class filteredpropertycache(util.propertycache):
231 """propertycache that must take filtering in account"""
230 """propertycache that must take filtering in account"""
232
231
233 def cachevalue(self, obj, value):
232 def cachevalue(self, obj, value):
234 object.__setattr__(obj, self.name, value)
233 object.__setattr__(obj, self.name, value)
235
234
236
235
237 def hasunfilteredcache(repo, name):
236 def hasunfilteredcache(repo, name):
238 """check if a repo has an unfilteredpropertycache value for <name>"""
237 """check if a repo has an unfilteredpropertycache value for <name>"""
239 return name in vars(repo.unfiltered())
238 return name in vars(repo.unfiltered())
240
239
241
240
242 def unfilteredmethod(orig):
241 def unfilteredmethod(orig):
243 """decorate method that always need to be run on unfiltered version"""
242 """decorate method that always need to be run on unfiltered version"""
244
243
245 @functools.wraps(orig)
244 @functools.wraps(orig)
246 def wrapper(repo, *args, **kwargs):
245 def wrapper(repo, *args, **kwargs):
247 return orig(repo.unfiltered(), *args, **kwargs)
246 return orig(repo.unfiltered(), *args, **kwargs)
248
247
249 return wrapper
248 return wrapper
250
249
251
250
252 moderncaps = {
251 moderncaps = {
253 b'lookup',
252 b'lookup',
254 b'branchmap',
253 b'branchmap',
255 b'pushkey',
254 b'pushkey',
256 b'known',
255 b'known',
257 b'getbundle',
256 b'getbundle',
258 b'unbundle',
257 b'unbundle',
259 }
258 }
260 legacycaps = moderncaps.union({b'changegroupsubset'})
259 legacycaps = moderncaps.union({b'changegroupsubset'})
261
260
262
261
263 class localcommandexecutor: # (repository.ipeercommandexecutor)
262 class localcommandexecutor: # (repository.ipeercommandexecutor)
264 def __init__(self, peer):
263 def __init__(self, peer):
265 self._peer = peer
264 self._peer = peer
266 self._sent = False
265 self._sent = False
267 self._closed = False
266 self._closed = False
268
267
269 def __enter__(self):
268 def __enter__(self):
270 return self
269 return self
271
270
272 def __exit__(self, exctype, excvalue, exctb):
271 def __exit__(self, exctype, excvalue, exctb):
273 self.close()
272 self.close()
274
273
275 def callcommand(self, command, args):
274 def callcommand(self, command, args):
276 if self._sent:
275 if self._sent:
277 raise error.ProgrammingError(
276 raise error.ProgrammingError(
278 b'callcommand() cannot be used after sendcommands()'
277 b'callcommand() cannot be used after sendcommands()'
279 )
278 )
280
279
281 if self._closed:
280 if self._closed:
282 raise error.ProgrammingError(
281 raise error.ProgrammingError(
283 b'callcommand() cannot be used after close()'
282 b'callcommand() cannot be used after close()'
284 )
283 )
285
284
286 # We don't need to support anything fancy. Just call the named
285 # We don't need to support anything fancy. Just call the named
287 # method on the peer and return a resolved future.
286 # method on the peer and return a resolved future.
288 fn = getattr(self._peer, pycompat.sysstr(command))
287 fn = getattr(self._peer, pycompat.sysstr(command))
289
288
290 f = futures.Future()
289 f = futures.Future()
291
290
292 try:
291 try:
293 result = fn(**pycompat.strkwargs(args))
292 result = fn(**pycompat.strkwargs(args))
294 except Exception:
293 except Exception:
295 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
294 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
296 else:
295 else:
297 f.set_result(result)
296 f.set_result(result)
298
297
299 return f
298 return f
300
299
301 def sendcommands(self):
300 def sendcommands(self):
302 self._sent = True
301 self._sent = True
303
302
304 def close(self):
303 def close(self):
305 self._closed = True
304 self._closed = True
306
305
307
306
308 class localpeer(repository.peer): # (repository.ipeercommands)
307 class localpeer(repository.peer): # (repository.ipeercommands)
309 '''peer for a local repo; reflects only the most recent API'''
308 '''peer for a local repo; reflects only the most recent API'''
310
309
311 def __init__(self, repo, caps=None, path=None, remotehidden=False):
310 def __init__(self, repo, caps=None, path=None, remotehidden=False):
312 super(localpeer, self).__init__(
311 super(localpeer, self).__init__(
313 repo.ui, path=path, remotehidden=remotehidden
312 repo.ui, path=path, remotehidden=remotehidden
314 )
313 )
315
314
316 if caps is None:
315 if caps is None:
317 caps = moderncaps.copy()
316 caps = moderncaps.copy()
318 if remotehidden:
317 if remotehidden:
319 self._repo = repo.filtered(b'served.hidden')
318 self._repo = repo.filtered(b'served.hidden')
320 else:
319 else:
321 self._repo = repo.filtered(b'served')
320 self._repo = repo.filtered(b'served')
322 if repo._wanted_sidedata:
321 if repo._wanted_sidedata:
323 formatted = bundle2.format_remote_wanted_sidedata(repo)
322 formatted = bundle2.format_remote_wanted_sidedata(repo)
324 caps.add(b'exp-wanted-sidedata=' + formatted)
323 caps.add(b'exp-wanted-sidedata=' + formatted)
325
324
326 self._caps = repo._restrictcapabilities(caps)
325 self._caps = repo._restrictcapabilities(caps)
327
326
328 # Begin of _basepeer interface.
327 # Begin of _basepeer interface.
329
328
330 def url(self):
329 def url(self):
331 return self._repo.url()
330 return self._repo.url()
332
331
333 def local(self):
332 def local(self):
334 return self._repo
333 return self._repo
335
334
336 def canpush(self):
335 def canpush(self):
337 return True
336 return True
338
337
339 def close(self):
338 def close(self):
340 self._repo.close()
339 self._repo.close()
341
340
342 # End of _basepeer interface.
341 # End of _basepeer interface.
343
342
344 # Begin of _basewirecommands interface.
343 # Begin of _basewirecommands interface.
345
344
346 def branchmap(self):
345 def branchmap(self):
347 return self._repo.branchmap()
346 return self._repo.branchmap()
348
347
349 def capabilities(self):
348 def capabilities(self):
350 return self._caps
349 return self._caps
351
350
352 def get_cached_bundle_inline(self, path):
351 def get_cached_bundle_inline(self, path):
353 # not needed with local peer
352 # not needed with local peer
354 raise NotImplementedError
353 raise NotImplementedError
355
354
356 def clonebundles(self):
355 def clonebundles(self):
357 return bundlecaches.get_manifest(self._repo)
356 return bundlecaches.get_manifest(self._repo)
358
357
359 def debugwireargs(self, one, two, three=None, four=None, five=None):
358 def debugwireargs(self, one, two, three=None, four=None, five=None):
360 """Used to test argument passing over the wire"""
359 """Used to test argument passing over the wire"""
361 return b"%s %s %s %s %s" % (
360 return b"%s %s %s %s %s" % (
362 one,
361 one,
363 two,
362 two,
364 pycompat.bytestr(three),
363 pycompat.bytestr(three),
365 pycompat.bytestr(four),
364 pycompat.bytestr(four),
366 pycompat.bytestr(five),
365 pycompat.bytestr(five),
367 )
366 )
368
367
369 def getbundle(
368 def getbundle(
370 self,
369 self,
371 source,
370 source,
372 heads=None,
371 heads=None,
373 common=None,
372 common=None,
374 bundlecaps=None,
373 bundlecaps=None,
375 remote_sidedata=None,
374 remote_sidedata=None,
376 **kwargs,
375 **kwargs,
377 ):
376 ):
378 chunks = exchange.getbundlechunks(
377 chunks = exchange.getbundlechunks(
379 self._repo,
378 self._repo,
380 source,
379 source,
381 heads=heads,
380 heads=heads,
382 common=common,
381 common=common,
383 bundlecaps=bundlecaps,
382 bundlecaps=bundlecaps,
384 remote_sidedata=remote_sidedata,
383 remote_sidedata=remote_sidedata,
385 **kwargs,
384 **kwargs,
386 )[1]
385 )[1]
387 cb = util.chunkbuffer(chunks)
386 cb = util.chunkbuffer(chunks)
388
387
389 if exchange.bundle2requested(bundlecaps):
388 if exchange.bundle2requested(bundlecaps):
390 # When requesting a bundle2, getbundle returns a stream to make the
389 # When requesting a bundle2, getbundle returns a stream to make the
391 # wire level function happier. We need to build a proper object
390 # wire level function happier. We need to build a proper object
392 # from it in local peer.
391 # from it in local peer.
393 return bundle2.getunbundler(self.ui, cb)
392 return bundle2.getunbundler(self.ui, cb)
394 else:
393 else:
395 return changegroup.getunbundler(b'01', cb, None)
394 return changegroup.getunbundler(b'01', cb, None)
396
395
397 def heads(self):
396 def heads(self):
398 return self._repo.heads()
397 return self._repo.heads()
399
398
400 def known(self, nodes):
399 def known(self, nodes):
401 return self._repo.known(nodes)
400 return self._repo.known(nodes)
402
401
403 def listkeys(self, namespace):
402 def listkeys(self, namespace):
404 return self._repo.listkeys(namespace)
403 return self._repo.listkeys(namespace)
405
404
406 def lookup(self, key):
405 def lookup(self, key):
407 return self._repo.lookup(key)
406 return self._repo.lookup(key)
408
407
409 def pushkey(self, namespace, key, old, new):
408 def pushkey(self, namespace, key, old, new):
410 return self._repo.pushkey(namespace, key, old, new)
409 return self._repo.pushkey(namespace, key, old, new)
411
410
412 def stream_out(self):
411 def stream_out(self):
413 raise error.Abort(_(b'cannot perform stream clone against local peer'))
412 raise error.Abort(_(b'cannot perform stream clone against local peer'))
414
413
415 def unbundle(self, bundle, heads, url):
414 def unbundle(self, bundle, heads, url):
416 """apply a bundle on a repo
415 """apply a bundle on a repo
417
416
418 This function handles the repo locking itself."""
417 This function handles the repo locking itself."""
419 try:
418 try:
420 try:
419 try:
421 bundle = exchange.readbundle(self.ui, bundle, None)
420 bundle = exchange.readbundle(self.ui, bundle, None)
422 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
421 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
423 if hasattr(ret, 'getchunks'):
422 if hasattr(ret, 'getchunks'):
424 # This is a bundle20 object, turn it into an unbundler.
423 # This is a bundle20 object, turn it into an unbundler.
425 # This little dance should be dropped eventually when the
424 # This little dance should be dropped eventually when the
426 # API is finally improved.
425 # API is finally improved.
427 stream = util.chunkbuffer(ret.getchunks())
426 stream = util.chunkbuffer(ret.getchunks())
428 ret = bundle2.getunbundler(self.ui, stream)
427 ret = bundle2.getunbundler(self.ui, stream)
429 return ret
428 return ret
430 except Exception as exc:
429 except Exception as exc:
431 # If the exception contains output salvaged from a bundle2
430 # If the exception contains output salvaged from a bundle2
432 # reply, we need to make sure it is printed before continuing
431 # reply, we need to make sure it is printed before continuing
433 # to fail. So we build a bundle2 with such output and consume
432 # to fail. So we build a bundle2 with such output and consume
434 # it directly.
433 # it directly.
435 #
434 #
436 # This is not very elegant but allows a "simple" solution for
435 # This is not very elegant but allows a "simple" solution for
437 # issue4594
436 # issue4594
438 output = getattr(exc, '_bundle2salvagedoutput', ())
437 output = getattr(exc, '_bundle2salvagedoutput', ())
439 if output:
438 if output:
440 bundler = bundle2.bundle20(self._repo.ui)
439 bundler = bundle2.bundle20(self._repo.ui)
441 for out in output:
440 for out in output:
442 bundler.addpart(out)
441 bundler.addpart(out)
443 stream = util.chunkbuffer(bundler.getchunks())
442 stream = util.chunkbuffer(bundler.getchunks())
444 b = bundle2.getunbundler(self.ui, stream)
443 b = bundle2.getunbundler(self.ui, stream)
445 bundle2.processbundle(self._repo, b)
444 bundle2.processbundle(self._repo, b)
446 raise
445 raise
447 except error.PushRaced as exc:
446 except error.PushRaced as exc:
448 raise error.ResponseError(
447 raise error.ResponseError(
449 _(b'push failed:'), stringutil.forcebytestr(exc)
448 _(b'push failed:'), stringutil.forcebytestr(exc)
450 )
449 )
451
450
452 # End of _basewirecommands interface.
451 # End of _basewirecommands interface.
453
452
454 # Begin of peer interface.
453 # Begin of peer interface.
455
454
456 def commandexecutor(self):
455 def commandexecutor(self):
457 return localcommandexecutor(self)
456 return localcommandexecutor(self)
458
457
459 # End of peer interface.
458 # End of peer interface.
460
459
461
460
462 class locallegacypeer(localpeer): # (repository.ipeerlegacycommands)
461 class locallegacypeer(localpeer): # (repository.ipeerlegacycommands)
463 """peer extension which implements legacy methods too; used for tests with
462 """peer extension which implements legacy methods too; used for tests with
464 restricted capabilities"""
463 restricted capabilities"""
465
464
466 def __init__(self, repo, path=None, remotehidden=False):
465 def __init__(self, repo, path=None, remotehidden=False):
467 super(locallegacypeer, self).__init__(
466 super(locallegacypeer, self).__init__(
468 repo, caps=legacycaps, path=path, remotehidden=remotehidden
467 repo, caps=legacycaps, path=path, remotehidden=remotehidden
469 )
468 )
470
469
471 # Begin of baselegacywirecommands interface.
470 # Begin of baselegacywirecommands interface.
472
471
473 def between(self, pairs):
472 def between(self, pairs):
474 return self._repo.between(pairs)
473 return self._repo.between(pairs)
475
474
476 def branches(self, nodes):
475 def branches(self, nodes):
477 return self._repo.branches(nodes)
476 return self._repo.branches(nodes)
478
477
479 def changegroup(self, nodes, source):
478 def changegroup(self, nodes, source):
480 outgoing = discovery.outgoing(
479 outgoing = discovery.outgoing(
481 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
480 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
482 )
481 )
483 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
482 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
484
483
485 def changegroupsubset(self, bases, heads, source):
484 def changegroupsubset(self, bases, heads, source):
486 outgoing = discovery.outgoing(
485 outgoing = discovery.outgoing(
487 self._repo, missingroots=bases, ancestorsof=heads
486 self._repo, missingroots=bases, ancestorsof=heads
488 )
487 )
489 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
488 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
490
489
491 # End of baselegacywirecommands interface.
490 # End of baselegacywirecommands interface.
492
491
493
492
494 # Functions receiving (ui, features) that extensions can register to impact
493 # Functions receiving (ui, features) that extensions can register to impact
495 # the ability to load repositories with custom requirements. Only
494 # the ability to load repositories with custom requirements. Only
496 # functions defined in loaded extensions are called.
495 # functions defined in loaded extensions are called.
497 #
496 #
498 # The function receives a set of requirement strings that the repository
497 # The function receives a set of requirement strings that the repository
499 # is capable of opening. Functions will typically add elements to the
498 # is capable of opening. Functions will typically add elements to the
500 # set to reflect that the extension knows how to handle that requirements.
499 # set to reflect that the extension knows how to handle that requirements.
501 featuresetupfuncs = set()
500 featuresetupfuncs = set()
502
501
503
502
504 def _getsharedvfs(hgvfs, requirements):
503 def _getsharedvfs(hgvfs, requirements):
505 """returns the vfs object pointing to root of shared source
504 """returns the vfs object pointing to root of shared source
506 repo for a shared repository
505 repo for a shared repository
507
506
508 hgvfs is vfs pointing at .hg/ of current repo (shared one)
507 hgvfs is vfs pointing at .hg/ of current repo (shared one)
509 requirements is a set of requirements of current repo (shared one)
508 requirements is a set of requirements of current repo (shared one)
510 """
509 """
511 # The ``shared`` or ``relshared`` requirements indicate the
510 # The ``shared`` or ``relshared`` requirements indicate the
512 # store lives in the path contained in the ``.hg/sharedpath`` file.
511 # store lives in the path contained in the ``.hg/sharedpath`` file.
513 # This is an absolute path for ``shared`` and relative to
512 # This is an absolute path for ``shared`` and relative to
514 # ``.hg/`` for ``relshared``.
513 # ``.hg/`` for ``relshared``.
515 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
514 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
516 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
515 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
517 sharedpath = util.normpath(hgvfs.join(sharedpath))
516 sharedpath = util.normpath(hgvfs.join(sharedpath))
518
517
519 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
518 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
520
519
521 if not sharedvfs.exists():
520 if not sharedvfs.exists():
522 raise error.RepoError(
521 raise error.RepoError(
523 _(b'.hg/sharedpath points to nonexistent directory %s')
522 _(b'.hg/sharedpath points to nonexistent directory %s')
524 % sharedvfs.base
523 % sharedvfs.base
525 )
524 )
526 return sharedvfs
525 return sharedvfs
527
526
528
527
529 def makelocalrepository(baseui, path: bytes, intents=None):
528 def makelocalrepository(baseui, path: bytes, intents=None):
530 """Create a local repository object.
529 """Create a local repository object.
531
530
532 Given arguments needed to construct a local repository, this function
531 Given arguments needed to construct a local repository, this function
533 performs various early repository loading functionality (such as
532 performs various early repository loading functionality (such as
534 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
533 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
535 the repository can be opened, derives a type suitable for representing
534 the repository can be opened, derives a type suitable for representing
536 that repository, and returns an instance of it.
535 that repository, and returns an instance of it.
537
536
538 The returned object conforms to the ``repository.completelocalrepository``
537 The returned object conforms to the ``repository.completelocalrepository``
539 interface.
538 interface.
540
539
541 The repository type is derived by calling a series of factory functions
540 The repository type is derived by calling a series of factory functions
542 for each aspect/interface of the final repository. These are defined by
541 for each aspect/interface of the final repository. These are defined by
543 ``REPO_INTERFACES``.
542 ``REPO_INTERFACES``.
544
543
545 Each factory function is called to produce a type implementing a specific
544 Each factory function is called to produce a type implementing a specific
546 interface. The cumulative list of returned types will be combined into a
545 interface. The cumulative list of returned types will be combined into a
547 new type and that type will be instantiated to represent the local
546 new type and that type will be instantiated to represent the local
548 repository.
547 repository.
549
548
550 The factory functions each receive various state that may be consulted
549 The factory functions each receive various state that may be consulted
551 as part of deriving a type.
550 as part of deriving a type.
552
551
553 Extensions should wrap these factory functions to customize repository type
552 Extensions should wrap these factory functions to customize repository type
554 creation. Note that an extension's wrapped function may be called even if
553 creation. Note that an extension's wrapped function may be called even if
555 that extension is not loaded for the repo being constructed. Extensions
554 that extension is not loaded for the repo being constructed. Extensions
556 should check if their ``__name__`` appears in the
555 should check if their ``__name__`` appears in the
557 ``extensionmodulenames`` set passed to the factory function and no-op if
556 ``extensionmodulenames`` set passed to the factory function and no-op if
558 not.
557 not.
559 """
558 """
560 ui = baseui.copy()
559 ui = baseui.copy()
561 # Prevent copying repo configuration.
560 # Prevent copying repo configuration.
562 ui.copy = baseui.copy
561 ui.copy = baseui.copy
563
562
564 # Working directory VFS rooted at repository root.
563 # Working directory VFS rooted at repository root.
565 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
564 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
566
565
567 # Main VFS for .hg/ directory.
566 # Main VFS for .hg/ directory.
568 hgpath = wdirvfs.join(b'.hg')
567 hgpath = wdirvfs.join(b'.hg')
569 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
568 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
570 # Whether this repository is shared one or not
569 # Whether this repository is shared one or not
571 shared = False
570 shared = False
572 # If this repository is shared, vfs pointing to shared repo
571 # If this repository is shared, vfs pointing to shared repo
573 sharedvfs = None
572 sharedvfs = None
574
573
575 # The .hg/ path should exist and should be a directory. All other
574 # The .hg/ path should exist and should be a directory. All other
576 # cases are errors.
575 # cases are errors.
577 if not hgvfs.isdir():
576 if not hgvfs.isdir():
578 try:
577 try:
579 hgvfs.stat()
578 hgvfs.stat()
580 except FileNotFoundError:
579 except FileNotFoundError:
581 pass
580 pass
582 except ValueError as e:
581 except ValueError as e:
583 # Can be raised on Python 3.8 when path is invalid.
582 # Can be raised on Python 3.8 when path is invalid.
584 raise error.Abort(
583 raise error.Abort(
585 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
584 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
586 )
585 )
587
586
588 raise error.RepoError(_(b'repository %s not found') % path)
587 raise error.RepoError(_(b'repository %s not found') % path)
589
588
590 requirements = scmutil.readrequires(hgvfs, True)
589 requirements = scmutil.readrequires(hgvfs, True)
591 shared = (
590 shared = (
592 requirementsmod.SHARED_REQUIREMENT in requirements
591 requirementsmod.SHARED_REQUIREMENT in requirements
593 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
592 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
594 )
593 )
595 storevfs = None
594 storevfs = None
596 if shared:
595 if shared:
597 # This is a shared repo
596 # This is a shared repo
598 sharedvfs = _getsharedvfs(hgvfs, requirements)
597 sharedvfs = _getsharedvfs(hgvfs, requirements)
599 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
598 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
600 else:
599 else:
601 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
600 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
602
601
603 # if .hg/requires contains the sharesafe requirement, it means
602 # if .hg/requires contains the sharesafe requirement, it means
604 # there exists a `.hg/store/requires` too and we should read it
603 # there exists a `.hg/store/requires` too and we should read it
605 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
604 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
606 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
605 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
607 # is not present, refer checkrequirementscompat() for that
606 # is not present, refer checkrequirementscompat() for that
608 #
607 #
609 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
608 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
610 # repository was shared the old way. We check the share source .hg/requires
609 # repository was shared the old way. We check the share source .hg/requires
611 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
610 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
612 # to be reshared
611 # to be reshared
613 hint = _(b"see `hg help config.format.use-share-safe` for more information")
612 hint = _(b"see `hg help config.format.use-share-safe` for more information")
614 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
613 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
615 if (
614 if (
616 shared
615 shared
617 and requirementsmod.SHARESAFE_REQUIREMENT
616 and requirementsmod.SHARESAFE_REQUIREMENT
618 not in scmutil.readrequires(sharedvfs, True)
617 not in scmutil.readrequires(sharedvfs, True)
619 ):
618 ):
620 mismatch_warn = ui.configbool(
619 mismatch_warn = ui.configbool(
621 b'share', b'safe-mismatch.source-not-safe.warn'
620 b'share', b'safe-mismatch.source-not-safe.warn'
622 )
621 )
623 mismatch_config = ui.config(
622 mismatch_config = ui.config(
624 b'share', b'safe-mismatch.source-not-safe'
623 b'share', b'safe-mismatch.source-not-safe'
625 )
624 )
626 mismatch_verbose_upgrade = ui.configbool(
625 mismatch_verbose_upgrade = ui.configbool(
627 b'share', b'safe-mismatch.source-not-safe:verbose-upgrade'
626 b'share', b'safe-mismatch.source-not-safe:verbose-upgrade'
628 )
627 )
629 if mismatch_config in (
628 if mismatch_config in (
630 b'downgrade-allow',
629 b'downgrade-allow',
631 b'allow',
630 b'allow',
632 b'downgrade-abort',
631 b'downgrade-abort',
633 ):
632 ):
634 # prevent cyclic import localrepo -> upgrade -> localrepo
633 # prevent cyclic import localrepo -> upgrade -> localrepo
635 from . import upgrade
634 from . import upgrade
636
635
637 upgrade.downgrade_share_to_non_safe(
636 upgrade.downgrade_share_to_non_safe(
638 ui,
637 ui,
639 hgvfs,
638 hgvfs,
640 sharedvfs,
639 sharedvfs,
641 requirements,
640 requirements,
642 mismatch_config,
641 mismatch_config,
643 mismatch_warn,
642 mismatch_warn,
644 mismatch_verbose_upgrade,
643 mismatch_verbose_upgrade,
645 )
644 )
646 elif mismatch_config == b'abort':
645 elif mismatch_config == b'abort':
647 raise error.Abort(
646 raise error.Abort(
648 _(b"share source does not support share-safe requirement"),
647 _(b"share source does not support share-safe requirement"),
649 hint=hint,
648 hint=hint,
650 )
649 )
651 else:
650 else:
652 raise error.Abort(
651 raise error.Abort(
653 _(
652 _(
654 b"share-safe mismatch with source.\nUnrecognized"
653 b"share-safe mismatch with source.\nUnrecognized"
655 b" value '%s' of `share.safe-mismatch.source-not-safe`"
654 b" value '%s' of `share.safe-mismatch.source-not-safe`"
656 b" set."
655 b" set."
657 )
656 )
658 % mismatch_config,
657 % mismatch_config,
659 hint=hint,
658 hint=hint,
660 )
659 )
661 else:
660 else:
662 requirements |= scmutil.readrequires(storevfs, False)
661 requirements |= scmutil.readrequires(storevfs, False)
663 elif shared:
662 elif shared:
664 sourcerequires = scmutil.readrequires(sharedvfs, False)
663 sourcerequires = scmutil.readrequires(sharedvfs, False)
665 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
664 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
666 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
665 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
667 mismatch_warn = ui.configbool(
666 mismatch_warn = ui.configbool(
668 b'share', b'safe-mismatch.source-safe.warn'
667 b'share', b'safe-mismatch.source-safe.warn'
669 )
668 )
670 mismatch_verbose_upgrade = ui.configbool(
669 mismatch_verbose_upgrade = ui.configbool(
671 b'share', b'safe-mismatch.source-safe:verbose-upgrade'
670 b'share', b'safe-mismatch.source-safe:verbose-upgrade'
672 )
671 )
673 if mismatch_config in (
672 if mismatch_config in (
674 b'upgrade-allow',
673 b'upgrade-allow',
675 b'allow',
674 b'allow',
676 b'upgrade-abort',
675 b'upgrade-abort',
677 ):
676 ):
678 # prevent cyclic import localrepo -> upgrade -> localrepo
677 # prevent cyclic import localrepo -> upgrade -> localrepo
679 from . import upgrade
678 from . import upgrade
680
679
681 upgrade.upgrade_share_to_safe(
680 upgrade.upgrade_share_to_safe(
682 ui,
681 ui,
683 hgvfs,
682 hgvfs,
684 storevfs,
683 storevfs,
685 requirements,
684 requirements,
686 mismatch_config,
685 mismatch_config,
687 mismatch_warn,
686 mismatch_warn,
688 mismatch_verbose_upgrade,
687 mismatch_verbose_upgrade,
689 )
688 )
690 elif mismatch_config == b'abort':
689 elif mismatch_config == b'abort':
691 raise error.Abort(
690 raise error.Abort(
692 _(
691 _(
693 b'version mismatch: source uses share-safe'
692 b'version mismatch: source uses share-safe'
694 b' functionality while the current share does not'
693 b' functionality while the current share does not'
695 ),
694 ),
696 hint=hint,
695 hint=hint,
697 )
696 )
698 else:
697 else:
699 raise error.Abort(
698 raise error.Abort(
700 _(
699 _(
701 b"share-safe mismatch with source.\nUnrecognized"
700 b"share-safe mismatch with source.\nUnrecognized"
702 b" value '%s' of `share.safe-mismatch.source-safe` set."
701 b" value '%s' of `share.safe-mismatch.source-safe` set."
703 )
702 )
704 % mismatch_config,
703 % mismatch_config,
705 hint=hint,
704 hint=hint,
706 )
705 )
707
706
708 # The .hg/hgrc file may load extensions or contain config options
707 # The .hg/hgrc file may load extensions or contain config options
709 # that influence repository construction. Attempt to load it and
708 # that influence repository construction. Attempt to load it and
710 # process any new extensions that it may have pulled in.
709 # process any new extensions that it may have pulled in.
711 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
710 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
712 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
711 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
713 extensions.loadall(ui)
712 extensions.loadall(ui)
714 extensions.populateui(ui)
713 extensions.populateui(ui)
715
714
716 # Set of module names of extensions loaded for this repository.
715 # Set of module names of extensions loaded for this repository.
717 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
716 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
718
717
719 supportedrequirements = gathersupportedrequirements(ui)
718 supportedrequirements = gathersupportedrequirements(ui)
720
719
721 # We first validate the requirements are known.
720 # We first validate the requirements are known.
722 ensurerequirementsrecognized(requirements, supportedrequirements)
721 ensurerequirementsrecognized(requirements, supportedrequirements)
723
722
724 # Then we validate that the known set is reasonable to use together.
723 # Then we validate that the known set is reasonable to use together.
725 ensurerequirementscompatible(ui, requirements)
724 ensurerequirementscompatible(ui, requirements)
726
725
727 # TODO there are unhandled edge cases related to opening repositories with
726 # TODO there are unhandled edge cases related to opening repositories with
728 # shared storage. If storage is shared, we should also test for requirements
727 # shared storage. If storage is shared, we should also test for requirements
729 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
728 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
730 # that repo, as that repo may load extensions needed to open it. This is a
729 # that repo, as that repo may load extensions needed to open it. This is a
731 # bit complicated because we don't want the other hgrc to overwrite settings
730 # bit complicated because we don't want the other hgrc to overwrite settings
732 # in this hgrc.
731 # in this hgrc.
733 #
732 #
734 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
733 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
735 # file when sharing repos. But if a requirement is added after the share is
734 # file when sharing repos. But if a requirement is added after the share is
736 # performed, thereby introducing a new requirement for the opener, we may
735 # performed, thereby introducing a new requirement for the opener, we may
737 # will not see that and could encounter a run-time error interacting with
736 # will not see that and could encounter a run-time error interacting with
738 # that shared store since it has an unknown-to-us requirement.
737 # that shared store since it has an unknown-to-us requirement.
739
738
740 # At this point, we know we should be capable of opening the repository.
739 # At this point, we know we should be capable of opening the repository.
741 # Now get on with doing that.
740 # Now get on with doing that.
742
741
743 features = set()
742 features = set()
744
743
745 # The "store" part of the repository holds versioned data. How it is
744 # The "store" part of the repository holds versioned data. How it is
746 # accessed is determined by various requirements. If `shared` or
745 # accessed is determined by various requirements. If `shared` or
747 # `relshared` requirements are present, this indicates current repository
746 # `relshared` requirements are present, this indicates current repository
748 # is a share and store exists in path mentioned in `.hg/sharedpath`
747 # is a share and store exists in path mentioned in `.hg/sharedpath`
749 if shared:
748 if shared:
750 storebasepath = sharedvfs.base
749 storebasepath = sharedvfs.base
751 cachepath = sharedvfs.join(b'cache')
750 cachepath = sharedvfs.join(b'cache')
752 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
751 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
753 else:
752 else:
754 storebasepath = hgvfs.base
753 storebasepath = hgvfs.base
755 cachepath = hgvfs.join(b'cache')
754 cachepath = hgvfs.join(b'cache')
756 wcachepath = hgvfs.join(b'wcache')
755 wcachepath = hgvfs.join(b'wcache')
757
756
758 # The store has changed over time and the exact layout is dictated by
757 # The store has changed over time and the exact layout is dictated by
759 # requirements. The store interface abstracts differences across all
758 # requirements. The store interface abstracts differences across all
760 # of them.
759 # of them.
761 store = makestore(
760 store = makestore(
762 requirements,
761 requirements,
763 storebasepath,
762 storebasepath,
764 lambda base: vfsmod.vfs(base, cacheaudited=True),
763 lambda base: vfsmod.vfs(base, cacheaudited=True),
765 )
764 )
766 hgvfs.createmode = store.createmode
765 hgvfs.createmode = store.createmode
767
766
768 storevfs = store.vfs
767 storevfs = store.vfs
769 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
768 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
770
769
771 if (
770 if (
772 requirementsmod.REVLOGV2_REQUIREMENT in requirements
771 requirementsmod.REVLOGV2_REQUIREMENT in requirements
773 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
772 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
774 ):
773 ):
775 features.add(repository.REPO_FEATURE_SIDE_DATA)
774 features.add(repository.REPO_FEATURE_SIDE_DATA)
776 # the revlogv2 docket introduced race condition that we need to fix
775 # the revlogv2 docket introduced race condition that we need to fix
777 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
776 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
778
777
779 # The cache vfs is used to manage cache files.
778 # The cache vfs is used to manage cache files.
780 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
779 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
781 cachevfs.createmode = store.createmode
780 cachevfs.createmode = store.createmode
782 # The cache vfs is used to manage cache files related to the working copy
781 # The cache vfs is used to manage cache files related to the working copy
783 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
782 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
784 wcachevfs.createmode = store.createmode
783 wcachevfs.createmode = store.createmode
785
784
786 # Now resolve the type for the repository object. We do this by repeatedly
785 # Now resolve the type for the repository object. We do this by repeatedly
787 # calling a factory function to produces types for specific aspects of the
786 # calling a factory function to produces types for specific aspects of the
788 # repo's operation. The aggregate returned types are used as base classes
787 # repo's operation. The aggregate returned types are used as base classes
789 # for a dynamically-derived type, which will represent our new repository.
788 # for a dynamically-derived type, which will represent our new repository.
790
789
791 bases = []
790 bases = []
792 extrastate = {}
791 extrastate = {}
793
792
794 for iface, fn in REPO_INTERFACES:
793 for iface, fn in REPO_INTERFACES:
795 # We pass all potentially useful state to give extensions tons of
794 # We pass all potentially useful state to give extensions tons of
796 # flexibility.
795 # flexibility.
797 typ = fn()(
796 typ = fn()(
798 ui=ui,
797 ui=ui,
799 intents=intents,
798 intents=intents,
800 requirements=requirements,
799 requirements=requirements,
801 features=features,
800 features=features,
802 wdirvfs=wdirvfs,
801 wdirvfs=wdirvfs,
803 hgvfs=hgvfs,
802 hgvfs=hgvfs,
804 store=store,
803 store=store,
805 storevfs=storevfs,
804 storevfs=storevfs,
806 storeoptions=storevfs.options,
805 storeoptions=storevfs.options,
807 cachevfs=cachevfs,
806 cachevfs=cachevfs,
808 wcachevfs=wcachevfs,
807 wcachevfs=wcachevfs,
809 extensionmodulenames=extensionmodulenames,
808 extensionmodulenames=extensionmodulenames,
810 extrastate=extrastate,
809 extrastate=extrastate,
811 baseclasses=bases,
810 baseclasses=bases,
812 )
811 )
813
812
814 if not isinstance(typ, type):
813 if not isinstance(typ, type):
815 raise error.ProgrammingError(
814 raise error.ProgrammingError(
816 b'unable to construct type for %s' % iface
815 b'unable to construct type for %s' % iface
817 )
816 )
818
817
819 bases.append(typ)
818 bases.append(typ)
820
819
821 # type() allows you to use characters in type names that wouldn't be
820 # type() allows you to use characters in type names that wouldn't be
822 # recognized as Python symbols in source code. We abuse that to add
821 # recognized as Python symbols in source code. We abuse that to add
823 # rich information about our constructed repo.
822 # rich information about our constructed repo.
824 name = pycompat.sysstr(
823 name = pycompat.sysstr(
825 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
824 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
826 )
825 )
827
826
828 cls = type(name, tuple(bases), {})
827 cls = type(name, tuple(bases), {})
829
828
830 return cls(
829 return cls(
831 baseui=baseui,
830 baseui=baseui,
832 ui=ui,
831 ui=ui,
833 origroot=path,
832 origroot=path,
834 wdirvfs=wdirvfs,
833 wdirvfs=wdirvfs,
835 hgvfs=hgvfs,
834 hgvfs=hgvfs,
836 requirements=requirements,
835 requirements=requirements,
837 supportedrequirements=supportedrequirements,
836 supportedrequirements=supportedrequirements,
838 sharedpath=storebasepath,
837 sharedpath=storebasepath,
839 store=store,
838 store=store,
840 cachevfs=cachevfs,
839 cachevfs=cachevfs,
841 wcachevfs=wcachevfs,
840 wcachevfs=wcachevfs,
842 features=features,
841 features=features,
843 intents=intents,
842 intents=intents,
844 )
843 )
845
844
846
845
847 def loadhgrc(
846 def loadhgrc(
848 ui,
847 ui,
849 wdirvfs: vfsmod.vfs,
848 wdirvfs: vfsmod.vfs,
850 hgvfs: vfsmod.vfs,
849 hgvfs: vfsmod.vfs,
851 requirements,
850 requirements,
852 sharedvfs: Optional[vfsmod.vfs] = None,
851 sharedvfs: Optional[vfsmod.vfs] = None,
853 ):
852 ):
854 """Load hgrc files/content into a ui instance.
853 """Load hgrc files/content into a ui instance.
855
854
856 This is called during repository opening to load any additional
855 This is called during repository opening to load any additional
857 config files or settings relevant to the current repository.
856 config files or settings relevant to the current repository.
858
857
859 Returns a bool indicating whether any additional configs were loaded.
858 Returns a bool indicating whether any additional configs were loaded.
860
859
861 Extensions should monkeypatch this function to modify how per-repo
860 Extensions should monkeypatch this function to modify how per-repo
862 configs are loaded. For example, an extension may wish to pull in
861 configs are loaded. For example, an extension may wish to pull in
863 configs from alternate files or sources.
862 configs from alternate files or sources.
864
863
865 sharedvfs is vfs object pointing to source repo if the current one is a
864 sharedvfs is vfs object pointing to source repo if the current one is a
866 shared one
865 shared one
867 """
866 """
868 if not rcutil.use_repo_hgrc():
867 if not rcutil.use_repo_hgrc():
869 return False
868 return False
870
869
871 ret = False
870 ret = False
872 # first load config from shared source if we has to
871 # first load config from shared source if we has to
873 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
872 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
874 try:
873 try:
875 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
874 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
876 ret = True
875 ret = True
877 except IOError:
876 except IOError:
878 pass
877 pass
879
878
880 try:
879 try:
881 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
880 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
882 ret = True
881 ret = True
883 except IOError:
882 except IOError:
884 pass
883 pass
885
884
886 try:
885 try:
887 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
886 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
888 ret = True
887 ret = True
889 except IOError:
888 except IOError:
890 pass
889 pass
891
890
892 return ret
891 return ret
893
892
894
893
895 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
894 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
896 """Perform additional actions after .hg/hgrc is loaded.
895 """Perform additional actions after .hg/hgrc is loaded.
897
896
898 This function is called during repository loading immediately after
897 This function is called during repository loading immediately after
899 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
898 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
900
899
901 The function can be used to validate configs, automatically add
900 The function can be used to validate configs, automatically add
902 options (including extensions) based on requirements, etc.
901 options (including extensions) based on requirements, etc.
903 """
902 """
904
903
905 # Map of requirements to list of extensions to load automatically when
904 # Map of requirements to list of extensions to load automatically when
906 # requirement is present.
905 # requirement is present.
907 autoextensions = {
906 autoextensions = {
908 b'git': [b'git'],
907 b'git': [b'git'],
909 b'largefiles': [b'largefiles'],
908 b'largefiles': [b'largefiles'],
910 b'lfs': [b'lfs'],
909 b'lfs': [b'lfs'],
911 }
910 }
912
911
913 for requirement, names in sorted(autoextensions.items()):
912 for requirement, names in sorted(autoextensions.items()):
914 if requirement not in requirements:
913 if requirement not in requirements:
915 continue
914 continue
916
915
917 for name in names:
916 for name in names:
918 if not ui.hasconfig(b'extensions', name):
917 if not ui.hasconfig(b'extensions', name):
919 ui.setconfig(b'extensions', name, b'', source=b'autoload')
918 ui.setconfig(b'extensions', name, b'', source=b'autoload')
920
919
921
920
922 def gathersupportedrequirements(ui):
921 def gathersupportedrequirements(ui):
923 """Determine the complete set of recognized requirements."""
922 """Determine the complete set of recognized requirements."""
924 # Start with all requirements supported by this file.
923 # Start with all requirements supported by this file.
925 supported = set(localrepository._basesupported)
924 supported = set(localrepository._basesupported)
926
925
927 # Execute ``featuresetupfuncs`` entries if they belong to an extension
926 # Execute ``featuresetupfuncs`` entries if they belong to an extension
928 # relevant to this ui instance.
927 # relevant to this ui instance.
929 modules = {m.__name__ for n, m in extensions.extensions(ui)}
928 modules = {m.__name__ for n, m in extensions.extensions(ui)}
930
929
931 for fn in featuresetupfuncs:
930 for fn in featuresetupfuncs:
932 if fn.__module__ in modules:
931 if fn.__module__ in modules:
933 fn(ui, supported)
932 fn(ui, supported)
934
933
935 # Add derived requirements from registered compression engines.
934 # Add derived requirements from registered compression engines.
936 for name in util.compengines:
935 for name in util.compengines:
937 engine = util.compengines[name]
936 engine = util.compengines[name]
938 if engine.available() and engine.revlogheader():
937 if engine.available() and engine.revlogheader():
939 supported.add(b'exp-compression-%s' % name)
938 supported.add(b'exp-compression-%s' % name)
940 if engine.name() == b'zstd':
939 if engine.name() == b'zstd':
941 supported.add(requirementsmod.REVLOG_COMPRESSION_ZSTD)
940 supported.add(requirementsmod.REVLOG_COMPRESSION_ZSTD)
942
941
943 return supported
942 return supported
944
943
945
944
946 def ensurerequirementsrecognized(requirements, supported):
945 def ensurerequirementsrecognized(requirements, supported):
947 """Validate that a set of local requirements is recognized.
946 """Validate that a set of local requirements is recognized.
948
947
949 Receives a set of requirements. Raises an ``error.RepoError`` if there
948 Receives a set of requirements. Raises an ``error.RepoError`` if there
950 exists any requirement in that set that currently loaded code doesn't
949 exists any requirement in that set that currently loaded code doesn't
951 recognize.
950 recognize.
952
951
953 Returns a set of supported requirements.
952 Returns a set of supported requirements.
954 """
953 """
955 missing = set()
954 missing = set()
956
955
957 for requirement in requirements:
956 for requirement in requirements:
958 if requirement in supported:
957 if requirement in supported:
959 continue
958 continue
960
959
961 if not requirement or not requirement[0:1].isalnum():
960 if not requirement or not requirement[0:1].isalnum():
962 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
961 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
963
962
964 missing.add(requirement)
963 missing.add(requirement)
965
964
966 if missing:
965 if missing:
967 raise error.RequirementError(
966 raise error.RequirementError(
968 _(b'repository requires features unknown to this Mercurial: %s')
967 _(b'repository requires features unknown to this Mercurial: %s')
969 % b' '.join(sorted(missing)),
968 % b' '.join(sorted(missing)),
970 hint=_(
969 hint=_(
971 b'see https://mercurial-scm.org/wiki/MissingRequirement '
970 b'see https://mercurial-scm.org/wiki/MissingRequirement '
972 b'for more information'
971 b'for more information'
973 ),
972 ),
974 )
973 )
975
974
976
975
977 def ensurerequirementscompatible(ui, requirements):
976 def ensurerequirementscompatible(ui, requirements):
978 """Validates that a set of recognized requirements is mutually compatible.
977 """Validates that a set of recognized requirements is mutually compatible.
979
978
980 Some requirements may not be compatible with others or require
979 Some requirements may not be compatible with others or require
981 config options that aren't enabled. This function is called during
980 config options that aren't enabled. This function is called during
982 repository opening to ensure that the set of requirements needed
981 repository opening to ensure that the set of requirements needed
983 to open a repository is sane and compatible with config options.
982 to open a repository is sane and compatible with config options.
984
983
985 Extensions can monkeypatch this function to perform additional
984 Extensions can monkeypatch this function to perform additional
986 checking.
985 checking.
987
986
988 ``error.RepoError`` should be raised on failure.
987 ``error.RepoError`` should be raised on failure.
989 """
988 """
990 if (
989 if (
991 requirementsmod.SPARSE_REQUIREMENT in requirements
990 requirementsmod.SPARSE_REQUIREMENT in requirements
992 and not sparse.enabled
991 and not sparse.enabled
993 ):
992 ):
994 raise error.RepoError(
993 raise error.RepoError(
995 _(
994 _(
996 b'repository is using sparse feature but '
995 b'repository is using sparse feature but '
997 b'sparse is not enabled; enable the '
996 b'sparse is not enabled; enable the '
998 b'"sparse" extensions to access'
997 b'"sparse" extensions to access'
999 )
998 )
1000 )
999 )
1001
1000
1002
1001
1003 def makestore(requirements, path, vfstype):
1002 def makestore(requirements, path, vfstype):
1004 """Construct a storage object for a repository."""
1003 """Construct a storage object for a repository."""
1005 if requirementsmod.STORE_REQUIREMENT in requirements:
1004 if requirementsmod.STORE_REQUIREMENT in requirements:
1006 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
1005 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
1007 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1006 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1008 return storemod.fncachestore(path, vfstype, dotencode)
1007 return storemod.fncachestore(path, vfstype, dotencode)
1009
1008
1010 return storemod.encodedstore(path, vfstype)
1009 return storemod.encodedstore(path, vfstype)
1011
1010
1012 return storemod.basicstore(path, vfstype)
1011 return storemod.basicstore(path, vfstype)
1013
1012
1014
1013
1015 def resolvestorevfsoptions(ui, requirements, features):
1014 def resolvestorevfsoptions(ui, requirements, features):
1016 """Resolve the options to pass to the store vfs opener.
1015 """Resolve the options to pass to the store vfs opener.
1017
1016
1018 The returned dict is used to influence behavior of the storage layer.
1017 The returned dict is used to influence behavior of the storage layer.
1019 """
1018 """
1020 options = {}
1019 options = {}
1021
1020
1022 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1021 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1023 options[b'treemanifest'] = True
1022 options[b'treemanifest'] = True
1024
1023
1025 # experimental config: format.manifestcachesize
1024 # experimental config: format.manifestcachesize
1026 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1025 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1027 if manifestcachesize is not None:
1026 if manifestcachesize is not None:
1028 options[b'manifestcachesize'] = manifestcachesize
1027 options[b'manifestcachesize'] = manifestcachesize
1029
1028
1030 # In the absence of another requirement superseding a revlog-related
1029 # In the absence of another requirement superseding a revlog-related
1031 # requirement, we have to assume the repo is using revlog version 0.
1030 # requirement, we have to assume the repo is using revlog version 0.
1032 # This revlog format is super old and we don't bother trying to parse
1031 # This revlog format is super old and we don't bother trying to parse
1033 # opener options for it because those options wouldn't do anything
1032 # opener options for it because those options wouldn't do anything
1034 # meaningful on such old repos.
1033 # meaningful on such old repos.
1035 if (
1034 if (
1036 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1035 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1037 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1036 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1038 ):
1037 ):
1039 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1038 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1040 else: # explicitly mark repo as using revlogv0
1039 else: # explicitly mark repo as using revlogv0
1041 options[b'revlogv0'] = True
1040 options[b'revlogv0'] = True
1042
1041
1043 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1042 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1044 options[b'copies-storage'] = b'changeset-sidedata'
1043 options[b'copies-storage'] = b'changeset-sidedata'
1045 else:
1044 else:
1046 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1045 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1047 copiesextramode = (b'changeset-only', b'compatibility')
1046 copiesextramode = (b'changeset-only', b'compatibility')
1048 if writecopiesto in copiesextramode:
1047 if writecopiesto in copiesextramode:
1049 options[b'copies-storage'] = b'extra'
1048 options[b'copies-storage'] = b'extra'
1050
1049
1051 return options
1050 return options
1052
1051
1053
1052
1054 def resolverevlogstorevfsoptions(ui, requirements, features):
1053 def resolverevlogstorevfsoptions(ui, requirements, features):
1055 """Resolve opener options specific to revlogs."""
1054 """Resolve opener options specific to revlogs."""
1056
1055
1057 options = {}
1056 options = {}
1058 options[b'flagprocessors'] = {}
1057 options[b'flagprocessors'] = {}
1059
1058
1060 feature_config = options[b'feature-config'] = revlog.FeatureConfig()
1059 feature_config = options[b'feature-config'] = revlog.FeatureConfig()
1061 data_config = options[b'data-config'] = revlog.DataConfig()
1060 data_config = options[b'data-config'] = revlog.DataConfig()
1062 delta_config = options[b'delta-config'] = revlog.DeltaConfig()
1061 delta_config = options[b'delta-config'] = revlog.DeltaConfig()
1063
1062
1064 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1063 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1065 options[b'revlogv1'] = True
1064 options[b'revlogv1'] = True
1066 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1065 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1067 options[b'revlogv2'] = True
1066 options[b'revlogv2'] = True
1068 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1067 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1069 options[b'changelogv2'] = True
1068 options[b'changelogv2'] = True
1070 cmp_rank = ui.configbool(b'experimental', b'changelog-v2.compute-rank')
1069 cmp_rank = ui.configbool(b'experimental', b'changelog-v2.compute-rank')
1071 options[b'changelogv2.compute-rank'] = cmp_rank
1070 options[b'changelogv2.compute-rank'] = cmp_rank
1072
1071
1073 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1072 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1074 options[b'generaldelta'] = True
1073 options[b'generaldelta'] = True
1075
1074
1076 # experimental config: format.chunkcachesize
1075 # experimental config: format.chunkcachesize
1077 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1076 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1078 if chunkcachesize is not None:
1077 if chunkcachesize is not None:
1079 data_config.chunk_cache_size = chunkcachesize
1078 data_config.chunk_cache_size = chunkcachesize
1080
1079
1081 memory_profile = scmutil.get_resource_profile(ui, b'memory')
1080 memory_profile = scmutil.get_resource_profile(ui, b'memory')
1082 if memory_profile >= scmutil.RESOURCE_MEDIUM:
1081 if memory_profile >= scmutil.RESOURCE_MEDIUM:
1083 data_config.uncompressed_cache_count = 10_000
1082 data_config.uncompressed_cache_count = 10_000
1084 data_config.uncompressed_cache_factor = 4
1083 data_config.uncompressed_cache_factor = 4
1085 if memory_profile >= scmutil.RESOURCE_HIGH:
1084 if memory_profile >= scmutil.RESOURCE_HIGH:
1086 data_config.uncompressed_cache_factor = 10
1085 data_config.uncompressed_cache_factor = 10
1087
1086
1088 delta_config.delta_both_parents = ui.configbool(
1087 delta_config.delta_both_parents = ui.configbool(
1089 b'storage', b'revlog.optimize-delta-parent-choice'
1088 b'storage', b'revlog.optimize-delta-parent-choice'
1090 )
1089 )
1091 delta_config.candidate_group_chunk_size = ui.configint(
1090 delta_config.candidate_group_chunk_size = ui.configint(
1092 b'storage',
1091 b'storage',
1093 b'revlog.delta-parent-search.candidate-group-chunk-size',
1092 b'revlog.delta-parent-search.candidate-group-chunk-size',
1094 )
1093 )
1095 delta_config.debug_delta = ui.configbool(b'debug', b'revlog.debug-delta')
1094 delta_config.debug_delta = ui.configbool(b'debug', b'revlog.debug-delta')
1096
1095
1097 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1096 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1098 options[b'issue6528.fix-incoming'] = issue6528
1097 options[b'issue6528.fix-incoming'] = issue6528
1099
1098
1100 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1099 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1101 lazydeltabase = False
1100 lazydeltabase = False
1102 if lazydelta:
1101 if lazydelta:
1103 lazydeltabase = ui.configbool(
1102 lazydeltabase = ui.configbool(
1104 b'storage', b'revlog.reuse-external-delta-parent'
1103 b'storage', b'revlog.reuse-external-delta-parent'
1105 )
1104 )
1106 if lazydeltabase is None:
1105 if lazydeltabase is None:
1107 lazydeltabase = not scmutil.gddeltaconfig(ui)
1106 lazydeltabase = not scmutil.gddeltaconfig(ui)
1108 delta_config.lazy_delta = lazydelta
1107 delta_config.lazy_delta = lazydelta
1109 delta_config.lazy_delta_base = lazydeltabase
1108 delta_config.lazy_delta_base = lazydeltabase
1110
1109
1111 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1110 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1112 if 0 <= chainspan:
1111 if 0 <= chainspan:
1113 delta_config.max_deltachain_span = chainspan
1112 delta_config.max_deltachain_span = chainspan
1114
1113
1115 has_populate = util.has_mmap_populate()
1114 has_populate = util.has_mmap_populate()
1116 if ui.configbool(b'storage', b'revlog.mmap.index', has_populate):
1115 if ui.configbool(b'storage', b'revlog.mmap.index', has_populate):
1117 data_config.mmap_index_threshold = ui.configbytes(
1116 data_config.mmap_index_threshold = ui.configbytes(
1118 b'storage',
1117 b'storage',
1119 b'revlog.mmap.index:size-threshold',
1118 b'revlog.mmap.index:size-threshold',
1120 )
1119 )
1121
1120
1122 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1121 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1123 srdensitythres = float(
1122 srdensitythres = float(
1124 ui.config(b'experimental', b'sparse-read.density-threshold')
1123 ui.config(b'experimental', b'sparse-read.density-threshold')
1125 )
1124 )
1126 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1125 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1127 data_config.with_sparse_read = withsparseread
1126 data_config.with_sparse_read = withsparseread
1128 data_config.sr_density_threshold = srdensitythres
1127 data_config.sr_density_threshold = srdensitythres
1129 data_config.sr_min_gap_size = srmingapsize
1128 data_config.sr_min_gap_size = srmingapsize
1130
1129
1131 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1130 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1132 delta_config.sparse_revlog = sparserevlog
1131 delta_config.sparse_revlog = sparserevlog
1133 if sparserevlog:
1132 if sparserevlog:
1134 options[b'generaldelta'] = True
1133 options[b'generaldelta'] = True
1135 data_config.with_sparse_read = True
1134 data_config.with_sparse_read = True
1136
1135
1137 maxchainlen = None
1136 maxchainlen = None
1138 if sparserevlog:
1137 if sparserevlog:
1139 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1138 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1140 # experimental config: format.maxchainlen
1139 # experimental config: format.maxchainlen
1141 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1140 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1142 if maxchainlen is not None:
1141 if maxchainlen is not None:
1143 delta_config.max_chain_len = maxchainlen
1142 delta_config.max_chain_len = maxchainlen
1144
1143
1145 for r in requirements:
1144 for r in requirements:
1146 # we allow multiple compression engine requirement to co-exist because
1145 # we allow multiple compression engine requirement to co-exist because
1147 # strickly speaking, revlog seems to support mixed compression style.
1146 # strickly speaking, revlog seems to support mixed compression style.
1148 #
1147 #
1149 # The compression used for new entries will be "the last one"
1148 # The compression used for new entries will be "the last one"
1150 prefix = r.startswith
1149 prefix = r.startswith
1151 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1150 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1152 feature_config.compression_engine = r.split(b'-', 2)[2]
1151 feature_config.compression_engine = r.split(b'-', 2)[2]
1153
1152
1154 zlib_level = ui.configint(b'storage', b'revlog.zlib.level')
1153 zlib_level = ui.configint(b'storage', b'revlog.zlib.level')
1155 if zlib_level is not None:
1154 if zlib_level is not None:
1156 if not (0 <= zlib_level <= 9):
1155 if not (0 <= zlib_level <= 9):
1157 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1156 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1158 raise error.Abort(msg % zlib_level)
1157 raise error.Abort(msg % zlib_level)
1159 feature_config.compression_engine_options[b'zlib.level'] = zlib_level
1158 feature_config.compression_engine_options[b'zlib.level'] = zlib_level
1160 zstd_level = ui.configint(b'storage', b'revlog.zstd.level')
1159 zstd_level = ui.configint(b'storage', b'revlog.zstd.level')
1161 if zstd_level is not None:
1160 if zstd_level is not None:
1162 if not (0 <= zstd_level <= 22):
1161 if not (0 <= zstd_level <= 22):
1163 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1162 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1164 raise error.Abort(msg % zstd_level)
1163 raise error.Abort(msg % zstd_level)
1165 feature_config.compression_engine_options[b'zstd.level'] = zstd_level
1164 feature_config.compression_engine_options[b'zstd.level'] = zstd_level
1166
1165
1167 if requirementsmod.NARROW_REQUIREMENT in requirements:
1166 if requirementsmod.NARROW_REQUIREMENT in requirements:
1168 feature_config.enable_ellipsis = True
1167 feature_config.enable_ellipsis = True
1169
1168
1170 if ui.configbool(b'experimental', b'rust.index'):
1169 if ui.configbool(b'experimental', b'rust.index'):
1171 options[b'rust.index'] = True
1170 options[b'rust.index'] = True
1172 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1171 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1173 slow_path = ui.config(
1172 slow_path = ui.config(
1174 b'storage', b'revlog.persistent-nodemap.slow-path'
1173 b'storage', b'revlog.persistent-nodemap.slow-path'
1175 )
1174 )
1176 if slow_path not in (b'allow', b'warn', b'abort'):
1175 if slow_path not in (b'allow', b'warn', b'abort'):
1177 default = ui.config_default(
1176 default = ui.config_default(
1178 b'storage', b'revlog.persistent-nodemap.slow-path'
1177 b'storage', b'revlog.persistent-nodemap.slow-path'
1179 )
1178 )
1180 msg = _(
1179 msg = _(
1181 b'unknown value for config '
1180 b'unknown value for config '
1182 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1181 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1183 )
1182 )
1184 ui.warn(msg % slow_path)
1183 ui.warn(msg % slow_path)
1185 if not ui.quiet:
1184 if not ui.quiet:
1186 ui.warn(_(b'falling back to default value: %s\n') % default)
1185 ui.warn(_(b'falling back to default value: %s\n') % default)
1187 slow_path = default
1186 slow_path = default
1188
1187
1189 msg = _(
1188 msg = _(
1190 b"accessing `persistent-nodemap` repository without associated "
1189 b"accessing `persistent-nodemap` repository without associated "
1191 b"fast implementation."
1190 b"fast implementation."
1192 )
1191 )
1193 hint = _(
1192 hint = _(
1194 b"check `hg help config.format.use-persistent-nodemap` "
1193 b"check `hg help config.format.use-persistent-nodemap` "
1195 b"for details"
1194 b"for details"
1196 )
1195 )
1197 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1196 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1198 if slow_path == b'warn':
1197 if slow_path == b'warn':
1199 msg = b"warning: " + msg + b'\n'
1198 msg = b"warning: " + msg + b'\n'
1200 ui.warn(msg)
1199 ui.warn(msg)
1201 if not ui.quiet:
1200 if not ui.quiet:
1202 hint = b'(' + hint + b')\n'
1201 hint = b'(' + hint + b')\n'
1203 ui.warn(hint)
1202 ui.warn(hint)
1204 if slow_path == b'abort':
1203 if slow_path == b'abort':
1205 raise error.Abort(msg, hint=hint)
1204 raise error.Abort(msg, hint=hint)
1206 options[b'persistent-nodemap'] = True
1205 options[b'persistent-nodemap'] = True
1207 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1206 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1208 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1207 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1209 if slow_path not in (b'allow', b'warn', b'abort'):
1208 if slow_path not in (b'allow', b'warn', b'abort'):
1210 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1209 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1211 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1210 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1212 ui.warn(msg % slow_path)
1211 ui.warn(msg % slow_path)
1213 if not ui.quiet:
1212 if not ui.quiet:
1214 ui.warn(_(b'falling back to default value: %s\n') % default)
1213 ui.warn(_(b'falling back to default value: %s\n') % default)
1215 slow_path = default
1214 slow_path = default
1216
1215
1217 msg = _(
1216 msg = _(
1218 b"accessing `dirstate-v2` repository without associated "
1217 b"accessing `dirstate-v2` repository without associated "
1219 b"fast implementation."
1218 b"fast implementation."
1220 )
1219 )
1221 hint = _(
1220 hint = _(
1222 b"check `hg help config.format.use-dirstate-v2` " b"for details"
1221 b"check `hg help config.format.use-dirstate-v2` " b"for details"
1223 )
1222 )
1224 if not dirstate.HAS_FAST_DIRSTATE_V2:
1223 if not dirstate.HAS_FAST_DIRSTATE_V2:
1225 if slow_path == b'warn':
1224 if slow_path == b'warn':
1226 msg = b"warning: " + msg + b'\n'
1225 msg = b"warning: " + msg + b'\n'
1227 ui.warn(msg)
1226 ui.warn(msg)
1228 if not ui.quiet:
1227 if not ui.quiet:
1229 hint = b'(' + hint + b')\n'
1228 hint = b'(' + hint + b')\n'
1230 ui.warn(hint)
1229 ui.warn(hint)
1231 if slow_path == b'abort':
1230 if slow_path == b'abort':
1232 raise error.Abort(msg, hint=hint)
1231 raise error.Abort(msg, hint=hint)
1233 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1232 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1234 options[b'persistent-nodemap.mmap'] = True
1233 options[b'persistent-nodemap.mmap'] = True
1235 if ui.configbool(b'devel', b'persistent-nodemap'):
1234 if ui.configbool(b'devel', b'persistent-nodemap'):
1236 options[b'devel-force-nodemap'] = True
1235 options[b'devel-force-nodemap'] = True
1237
1236
1238 return options
1237 return options
1239
1238
1240
1239
1241 def makemain(**kwargs):
1240 def makemain(**kwargs):
1242 """Produce a type conforming to ``ilocalrepositorymain``."""
1241 """Produce a type conforming to ``ilocalrepositorymain``."""
1243 return localrepository
1242 return localrepository
1244
1243
1245
1244
1246 class revlogfilestorage: # (repository.ilocalrepositoryfilestorage)
1245 class revlogfilestorage: # (repository.ilocalrepositoryfilestorage)
1247 """File storage when using revlogs."""
1246 """File storage when using revlogs."""
1248
1247
1249 def file(self, path):
1248 def file(self, path):
1250 if path.startswith(b'/'):
1249 if path.startswith(b'/'):
1251 path = path[1:]
1250 path = path[1:]
1252
1251
1253 try_split = (
1252 try_split = (
1254 self.currenttransaction() is not None
1253 self.currenttransaction() is not None
1255 or txnutil.mayhavepending(self.root)
1254 or txnutil.mayhavepending(self.root)
1256 )
1255 )
1257
1256
1258 return filelog.filelog(self.svfs, path, try_split=try_split)
1257 return filelog.filelog(self.svfs, path, try_split=try_split)
1259
1258
1260
1259
1261 class revlognarrowfilestorage: # (repository.ilocalrepositoryfilestorage)
1260 class revlognarrowfilestorage: # (repository.ilocalrepositoryfilestorage)
1262 """File storage when using revlogs and narrow files."""
1261 """File storage when using revlogs and narrow files."""
1263
1262
1264 def file(self, path):
1263 def file(self, path):
1265 if path.startswith(b'/'):
1264 if path.startswith(b'/'):
1266 path = path[1:]
1265 path = path[1:]
1267
1266
1268 try_split = (
1267 try_split = (
1269 self.currenttransaction() is not None
1268 self.currenttransaction() is not None
1270 or txnutil.mayhavepending(self.root)
1269 or txnutil.mayhavepending(self.root)
1271 )
1270 )
1272 return filelog.narrowfilelog(
1271 return filelog.narrowfilelog(
1273 self.svfs, path, self._storenarrowmatch, try_split=try_split
1272 self.svfs, path, self._storenarrowmatch, try_split=try_split
1274 )
1273 )
1275
1274
1276
1275
1277 def makefilestorage(requirements, features, **kwargs):
1276 def makefilestorage(requirements, features, **kwargs):
1278 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1277 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1279 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1278 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1280 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1279 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1281
1280
1282 if requirementsmod.NARROW_REQUIREMENT in requirements:
1281 if requirementsmod.NARROW_REQUIREMENT in requirements:
1283 return revlognarrowfilestorage
1282 return revlognarrowfilestorage
1284 else:
1283 else:
1285 return revlogfilestorage
1284 return revlogfilestorage
1286
1285
1287
1286
1288 # List of repository interfaces and factory functions for them. Each
1287 # List of repository interfaces and factory functions for them. Each
1289 # will be called in order during ``makelocalrepository()`` to iteratively
1288 # will be called in order during ``makelocalrepository()`` to iteratively
1290 # derive the final type for a local repository instance. We capture the
1289 # derive the final type for a local repository instance. We capture the
1291 # function as a lambda so we don't hold a reference and the module-level
1290 # function as a lambda so we don't hold a reference and the module-level
1292 # functions can be wrapped.
1291 # functions can be wrapped.
1293 REPO_INTERFACES = [
1292 REPO_INTERFACES = [
1294 (repository.ilocalrepositorymain, lambda: makemain),
1293 (repository.ilocalrepositorymain, lambda: makemain),
1295 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1294 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1296 ]
1295 ]
1297
1296
1298 _localrepo_base_classes = object
1297 _localrepo_base_classes = object
1299
1298
1300 if typing.TYPE_CHECKING:
1299 if typing.TYPE_CHECKING:
1301 _localrepo_base_classes = [
1300 _localrepo_base_classes = [
1302 repository.ilocalrepositorymain,
1301 repository.ilocalrepositorymain,
1303 repository.ilocalrepositoryfilestorage,
1302 repository.ilocalrepositoryfilestorage,
1304 ]
1303 ]
1305
1304
1306
1305
1307 class LocalRepository(_localrepo_base_classes):
1306 class localrepository(_localrepo_base_classes):
1308 """Main class for representing local repositories.
1307 """Main class for representing local repositories.
1309
1308
1310 All local repositories are instances of this class.
1309 All local repositories are instances of this class.
1311
1310
1312 Constructed on its own, instances of this class are not usable as
1311 Constructed on its own, instances of this class are not usable as
1313 repository objects. To obtain a usable repository object, call
1312 repository objects. To obtain a usable repository object, call
1314 ``hg.repository()``, ``localrepo.instance()``, or
1313 ``hg.repository()``, ``localrepo.instance()``, or
1315 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1314 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1316 ``instance()`` adds support for creating new repositories.
1315 ``instance()`` adds support for creating new repositories.
1317 ``hg.repository()`` adds more extension integration, including calling
1316 ``hg.repository()`` adds more extension integration, including calling
1318 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1317 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1319 used.
1318 used.
1320 """
1319 """
1321
1320
1322 _basesupported = {
1321 _basesupported = {
1323 requirementsmod.ARCHIVED_PHASE_REQUIREMENT,
1322 requirementsmod.ARCHIVED_PHASE_REQUIREMENT,
1324 requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT,
1323 requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT,
1325 requirementsmod.CHANGELOGV2_REQUIREMENT,
1324 requirementsmod.CHANGELOGV2_REQUIREMENT,
1326 requirementsmod.COPIESSDC_REQUIREMENT,
1325 requirementsmod.COPIESSDC_REQUIREMENT,
1327 requirementsmod.DIRSTATE_TRACKED_HINT_V1,
1326 requirementsmod.DIRSTATE_TRACKED_HINT_V1,
1328 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1327 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1329 requirementsmod.DOTENCODE_REQUIREMENT,
1328 requirementsmod.DOTENCODE_REQUIREMENT,
1330 requirementsmod.FNCACHE_REQUIREMENT,
1329 requirementsmod.FNCACHE_REQUIREMENT,
1331 requirementsmod.GENERALDELTA_REQUIREMENT,
1330 requirementsmod.GENERALDELTA_REQUIREMENT,
1332 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1331 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1333 requirementsmod.NODEMAP_REQUIREMENT,
1332 requirementsmod.NODEMAP_REQUIREMENT,
1334 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1333 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1335 requirementsmod.REVLOGV1_REQUIREMENT,
1334 requirementsmod.REVLOGV1_REQUIREMENT,
1336 requirementsmod.REVLOGV2_REQUIREMENT,
1335 requirementsmod.REVLOGV2_REQUIREMENT,
1337 requirementsmod.SHARED_REQUIREMENT,
1336 requirementsmod.SHARED_REQUIREMENT,
1338 requirementsmod.SHARESAFE_REQUIREMENT,
1337 requirementsmod.SHARESAFE_REQUIREMENT,
1339 requirementsmod.SPARSE_REQUIREMENT,
1338 requirementsmod.SPARSE_REQUIREMENT,
1340 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1339 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1341 requirementsmod.STORE_REQUIREMENT,
1340 requirementsmod.STORE_REQUIREMENT,
1342 requirementsmod.TREEMANIFEST_REQUIREMENT,
1341 requirementsmod.TREEMANIFEST_REQUIREMENT,
1343 }
1342 }
1344
1343
1345 # list of prefix for file which can be written without 'wlock'
1344 # list of prefix for file which can be written without 'wlock'
1346 # Extensions should extend this list when needed
1345 # Extensions should extend this list when needed
1347 _wlockfreeprefix = {
1346 _wlockfreeprefix = {
1348 # We migh consider requiring 'wlock' for the next
1347 # We migh consider requiring 'wlock' for the next
1349 # two, but pretty much all the existing code assume
1348 # two, but pretty much all the existing code assume
1350 # wlock is not needed so we keep them excluded for
1349 # wlock is not needed so we keep them excluded for
1351 # now.
1350 # now.
1352 b'hgrc',
1351 b'hgrc',
1353 b'requires',
1352 b'requires',
1354 # XXX cache is a complicatged business someone
1353 # XXX cache is a complicatged business someone
1355 # should investigate this in depth at some point
1354 # should investigate this in depth at some point
1356 b'cache/',
1355 b'cache/',
1357 # XXX bisect was still a bit too messy at the time
1356 # XXX bisect was still a bit too messy at the time
1358 # this changeset was introduced. Someone should fix
1357 # this changeset was introduced. Someone should fix
1359 # the remainig bit and drop this line
1358 # the remainig bit and drop this line
1360 b'bisect.state',
1359 b'bisect.state',
1361 }
1360 }
1362
1361
1363 def __init__(
1362 def __init__(
1364 self,
1363 self,
1365 baseui,
1364 baseui,
1366 ui,
1365 ui,
1367 origroot: bytes,
1366 origroot: bytes,
1368 wdirvfs: vfsmod.vfs,
1367 wdirvfs: vfsmod.vfs,
1369 hgvfs: vfsmod.vfs,
1368 hgvfs: vfsmod.vfs,
1370 requirements,
1369 requirements,
1371 supportedrequirements,
1370 supportedrequirements,
1372 sharedpath: bytes,
1371 sharedpath: bytes,
1373 store,
1372 store,
1374 cachevfs: vfsmod.vfs,
1373 cachevfs: vfsmod.vfs,
1375 wcachevfs: vfsmod.vfs,
1374 wcachevfs: vfsmod.vfs,
1376 features,
1375 features,
1377 intents=None,
1376 intents=None,
1378 ):
1377 ):
1379 """Create a new local repository instance.
1378 """Create a new local repository instance.
1380
1379
1381 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1380 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1382 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1381 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1383 object.
1382 object.
1384
1383
1385 Arguments:
1384 Arguments:
1386
1385
1387 baseui
1386 baseui
1388 ``ui.ui`` instance that ``ui`` argument was based off of.
1387 ``ui.ui`` instance that ``ui`` argument was based off of.
1389
1388
1390 ui
1389 ui
1391 ``ui.ui`` instance for use by the repository.
1390 ``ui.ui`` instance for use by the repository.
1392
1391
1393 origroot
1392 origroot
1394 ``bytes`` path to working directory root of this repository.
1393 ``bytes`` path to working directory root of this repository.
1395
1394
1396 wdirvfs
1395 wdirvfs
1397 ``vfs.vfs`` rooted at the working directory.
1396 ``vfs.vfs`` rooted at the working directory.
1398
1397
1399 hgvfs
1398 hgvfs
1400 ``vfs.vfs`` rooted at .hg/
1399 ``vfs.vfs`` rooted at .hg/
1401
1400
1402 requirements
1401 requirements
1403 ``set`` of bytestrings representing repository opening requirements.
1402 ``set`` of bytestrings representing repository opening requirements.
1404
1403
1405 supportedrequirements
1404 supportedrequirements
1406 ``set`` of bytestrings representing repository requirements that we
1405 ``set`` of bytestrings representing repository requirements that we
1407 know how to open. May be a supetset of ``requirements``.
1406 know how to open. May be a supetset of ``requirements``.
1408
1407
1409 sharedpath
1408 sharedpath
1410 ``bytes`` Defining path to storage base directory. Points to a
1409 ``bytes`` Defining path to storage base directory. Points to a
1411 ``.hg/`` directory somewhere.
1410 ``.hg/`` directory somewhere.
1412
1411
1413 store
1412 store
1414 ``store.basicstore`` (or derived) instance providing access to
1413 ``store.basicstore`` (or derived) instance providing access to
1415 versioned storage.
1414 versioned storage.
1416
1415
1417 cachevfs
1416 cachevfs
1418 ``vfs.vfs`` used for cache files.
1417 ``vfs.vfs`` used for cache files.
1419
1418
1420 wcachevfs
1419 wcachevfs
1421 ``vfs.vfs`` used for cache files related to the working copy.
1420 ``vfs.vfs`` used for cache files related to the working copy.
1422
1421
1423 features
1422 features
1424 ``set`` of bytestrings defining features/capabilities of this
1423 ``set`` of bytestrings defining features/capabilities of this
1425 instance.
1424 instance.
1426
1425
1427 intents
1426 intents
1428 ``set`` of system strings indicating what this repo will be used
1427 ``set`` of system strings indicating what this repo will be used
1429 for.
1428 for.
1430 """
1429 """
1431 self.baseui = baseui
1430 self.baseui = baseui
1432 self.ui = ui
1431 self.ui = ui
1433 self.origroot = origroot
1432 self.origroot = origroot
1434 # vfs rooted at working directory.
1433 # vfs rooted at working directory.
1435 self.wvfs = wdirvfs
1434 self.wvfs = wdirvfs
1436 self.root = wdirvfs.base
1435 self.root = wdirvfs.base
1437 # vfs rooted at .hg/. Used to access most non-store paths.
1436 # vfs rooted at .hg/. Used to access most non-store paths.
1438 self.vfs = hgvfs
1437 self.vfs = hgvfs
1439 self.path = hgvfs.base
1438 self.path = hgvfs.base
1440 self.requirements = requirements
1439 self.requirements = requirements
1441 self.nodeconstants = sha1nodeconstants
1440 self.nodeconstants = sha1nodeconstants
1442 self.nullid = self.nodeconstants.nullid
1441 self.nullid = self.nodeconstants.nullid
1443 self.supported = supportedrequirements
1442 self.supported = supportedrequirements
1444 self.sharedpath = sharedpath
1443 self.sharedpath = sharedpath
1445 self.store = store
1444 self.store = store
1446 self.cachevfs = cachevfs
1445 self.cachevfs = cachevfs
1447 self.wcachevfs = wcachevfs
1446 self.wcachevfs = wcachevfs
1448 self.features = features
1447 self.features = features
1449
1448
1450 self.filtername = None
1449 self.filtername = None
1451
1450
1452 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1451 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1453 b'devel', b'check-locks'
1452 b'devel', b'check-locks'
1454 ):
1453 ):
1455 self.vfs.audit = self._getvfsward(self.vfs.audit)
1454 self.vfs.audit = self._getvfsward(self.vfs.audit)
1456 # A list of callback to shape the phase if no data were found.
1455 # A list of callback to shape the phase if no data were found.
1457 # Callback are in the form: func(repo, roots) --> processed root.
1456 # Callback are in the form: func(repo, roots) --> processed root.
1458 # This list it to be filled by extension during repo setup
1457 # This list it to be filled by extension during repo setup
1459 self._phasedefaults = []
1458 self._phasedefaults = []
1460
1459
1461 color.setup(self.ui)
1460 color.setup(self.ui)
1462
1461
1463 self.spath = self.store.path
1462 self.spath = self.store.path
1464 self.svfs = self.store.vfs
1463 self.svfs = self.store.vfs
1465 self.sjoin = self.store.join
1464 self.sjoin = self.store.join
1466 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1465 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1467 b'devel', b'check-locks'
1466 b'devel', b'check-locks'
1468 ):
1467 ):
1469 if hasattr(self.svfs, 'vfs'): # this is filtervfs
1468 if hasattr(self.svfs, 'vfs'): # this is filtervfs
1470 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1469 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1471 else: # standard vfs
1470 else: # standard vfs
1472 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1471 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1473
1472
1474 self._dirstatevalidatewarned = False
1473 self._dirstatevalidatewarned = False
1475
1474
1476 self._branchcaches = branchmap.BranchMapCache()
1475 self._branchcaches = branchmap.BranchMapCache()
1477 self._revbranchcache = None
1476 self._revbranchcache = None
1478 self._filterpats = {}
1477 self._filterpats = {}
1479 self._datafilters = {}
1478 self._datafilters = {}
1480 self._transref = self._lockref = self._wlockref = None
1479 self._transref = self._lockref = self._wlockref = None
1481
1480
1482 # A cache for various files under .hg/ that tracks file changes,
1481 # A cache for various files under .hg/ that tracks file changes,
1483 # (used by the filecache decorator)
1482 # (used by the filecache decorator)
1484 #
1483 #
1485 # Maps a property name to its util.filecacheentry
1484 # Maps a property name to its util.filecacheentry
1486 self._filecache = {}
1485 self._filecache = {}
1487
1486
1488 # hold sets of revision to be filtered
1487 # hold sets of revision to be filtered
1489 # should be cleared when something might have changed the filter value:
1488 # should be cleared when something might have changed the filter value:
1490 # - new changesets,
1489 # - new changesets,
1491 # - phase change,
1490 # - phase change,
1492 # - new obsolescence marker,
1491 # - new obsolescence marker,
1493 # - working directory parent change,
1492 # - working directory parent change,
1494 # - bookmark changes
1493 # - bookmark changes
1495 self.filteredrevcache = {}
1494 self.filteredrevcache = {}
1496
1495
1497 self._dirstate = None
1496 self._dirstate = None
1498 # post-dirstate-status hooks
1497 # post-dirstate-status hooks
1499 self._postdsstatus = []
1498 self._postdsstatus = []
1500
1499
1501 self._pending_narrow_pats = None
1500 self._pending_narrow_pats = None
1502 self._pending_narrow_pats_dirstate = None
1501 self._pending_narrow_pats_dirstate = None
1503
1502
1504 # generic mapping between names and nodes
1503 # generic mapping between names and nodes
1505 self.names = namespaces.namespaces()
1504 self.names = namespaces.namespaces()
1506
1505
1507 # Key to signature value.
1506 # Key to signature value.
1508 self._sparsesignaturecache = {}
1507 self._sparsesignaturecache = {}
1509 # Signature to cached matcher instance.
1508 # Signature to cached matcher instance.
1510 self._sparsematchercache = {}
1509 self._sparsematchercache = {}
1511
1510
1512 self._extrafilterid = repoview.extrafilter(ui)
1511 self._extrafilterid = repoview.extrafilter(ui)
1513
1512
1514 self.filecopiesmode = None
1513 self.filecopiesmode = None
1515 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1514 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1516 self.filecopiesmode = b'changeset-sidedata'
1515 self.filecopiesmode = b'changeset-sidedata'
1517
1516
1518 self._wanted_sidedata = set()
1517 self._wanted_sidedata = set()
1519 self._sidedata_computers = {}
1518 self._sidedata_computers = {}
1520 sidedatamod.set_sidedata_spec_for_repo(self)
1519 sidedatamod.set_sidedata_spec_for_repo(self)
1521
1520
1522 def _getvfsward(self, origfunc):
1521 def _getvfsward(self, origfunc):
1523 """build a ward for self.vfs"""
1522 """build a ward for self.vfs"""
1524 rref = weakref.ref(self)
1523 rref = weakref.ref(self)
1525
1524
1526 def checkvfs(path, mode=None):
1525 def checkvfs(path, mode=None):
1527 ret = origfunc(path, mode=mode)
1526 ret = origfunc(path, mode=mode)
1528 repo = rref()
1527 repo = rref()
1529 if (
1528 if (
1530 repo is None
1529 repo is None
1531 or not hasattr(repo, '_wlockref')
1530 or not hasattr(repo, '_wlockref')
1532 or not hasattr(repo, '_lockref')
1531 or not hasattr(repo, '_lockref')
1533 ):
1532 ):
1534 return
1533 return
1535 if mode in (None, b'r', b'rb'):
1534 if mode in (None, b'r', b'rb'):
1536 return
1535 return
1537 if path.startswith(repo.path):
1536 if path.startswith(repo.path):
1538 # truncate name relative to the repository (.hg)
1537 # truncate name relative to the repository (.hg)
1539 path = path[len(repo.path) + 1 :]
1538 path = path[len(repo.path) + 1 :]
1540 if path.startswith(b'cache/'):
1539 if path.startswith(b'cache/'):
1541 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1540 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1542 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1541 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1543 # path prefixes covered by 'lock'
1542 # path prefixes covered by 'lock'
1544 vfs_path_prefixes = (
1543 vfs_path_prefixes = (
1545 b'journal.',
1544 b'journal.',
1546 b'undo.',
1545 b'undo.',
1547 b'strip-backup/',
1546 b'strip-backup/',
1548 b'cache/',
1547 b'cache/',
1549 )
1548 )
1550 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1549 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1551 if repo._currentlock(repo._lockref) is None:
1550 if repo._currentlock(repo._lockref) is None:
1552 repo.ui.develwarn(
1551 repo.ui.develwarn(
1553 b'write with no lock: "%s"' % path,
1552 b'write with no lock: "%s"' % path,
1554 stacklevel=3,
1553 stacklevel=3,
1555 config=b'check-locks',
1554 config=b'check-locks',
1556 )
1555 )
1557 elif repo._currentlock(repo._wlockref) is None:
1556 elif repo._currentlock(repo._wlockref) is None:
1558 # rest of vfs files are covered by 'wlock'
1557 # rest of vfs files are covered by 'wlock'
1559 #
1558 #
1560 # exclude special files
1559 # exclude special files
1561 for prefix in self._wlockfreeprefix:
1560 for prefix in self._wlockfreeprefix:
1562 if path.startswith(prefix):
1561 if path.startswith(prefix):
1563 return
1562 return
1564 repo.ui.develwarn(
1563 repo.ui.develwarn(
1565 b'write with no wlock: "%s"' % path,
1564 b'write with no wlock: "%s"' % path,
1566 stacklevel=3,
1565 stacklevel=3,
1567 config=b'check-locks',
1566 config=b'check-locks',
1568 )
1567 )
1569 return ret
1568 return ret
1570
1569
1571 return checkvfs
1570 return checkvfs
1572
1571
1573 def _getsvfsward(self, origfunc):
1572 def _getsvfsward(self, origfunc):
1574 """build a ward for self.svfs"""
1573 """build a ward for self.svfs"""
1575 rref = weakref.ref(self)
1574 rref = weakref.ref(self)
1576
1575
1577 def checksvfs(path, mode=None):
1576 def checksvfs(path, mode=None):
1578 ret = origfunc(path, mode=mode)
1577 ret = origfunc(path, mode=mode)
1579 repo = rref()
1578 repo = rref()
1580 if repo is None or not hasattr(repo, '_lockref'):
1579 if repo is None or not hasattr(repo, '_lockref'):
1581 return
1580 return
1582 if mode in (None, b'r', b'rb'):
1581 if mode in (None, b'r', b'rb'):
1583 return
1582 return
1584 if path.startswith(repo.sharedpath):
1583 if path.startswith(repo.sharedpath):
1585 # truncate name relative to the repository (.hg)
1584 # truncate name relative to the repository (.hg)
1586 path = path[len(repo.sharedpath) + 1 :]
1585 path = path[len(repo.sharedpath) + 1 :]
1587 if repo._currentlock(repo._lockref) is None:
1586 if repo._currentlock(repo._lockref) is None:
1588 repo.ui.develwarn(
1587 repo.ui.develwarn(
1589 b'write with no lock: "%s"' % path, stacklevel=4
1588 b'write with no lock: "%s"' % path, stacklevel=4
1590 )
1589 )
1591 return ret
1590 return ret
1592
1591
1593 return checksvfs
1592 return checksvfs
1594
1593
1595 @property
1594 @property
1596 def vfs_map(self):
1595 def vfs_map(self):
1597 return {
1596 return {
1598 b'': self.svfs,
1597 b'': self.svfs,
1599 b'plain': self.vfs,
1598 b'plain': self.vfs,
1600 b'store': self.svfs,
1599 b'store': self.svfs,
1601 }
1600 }
1602
1601
1603 def close(self):
1602 def close(self):
1604 self._writecaches()
1603 self._writecaches()
1605
1604
1606 def _writecaches(self):
1605 def _writecaches(self):
1607 if self._revbranchcache:
1606 if self._revbranchcache:
1608 self._revbranchcache.write()
1607 self._revbranchcache.write()
1609
1608
1610 def _restrictcapabilities(self, caps):
1609 def _restrictcapabilities(self, caps):
1611 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1610 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1612 caps = set(caps)
1611 caps = set(caps)
1613 capsblob = bundle2.encodecaps(
1612 capsblob = bundle2.encodecaps(
1614 bundle2.getrepocaps(self, role=b'client')
1613 bundle2.getrepocaps(self, role=b'client')
1615 )
1614 )
1616 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1615 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1617 if self.ui.configbool(b'experimental', b'narrow'):
1616 if self.ui.configbool(b'experimental', b'narrow'):
1618 caps.add(wireprototypes.NARROWCAP)
1617 caps.add(wireprototypes.NARROWCAP)
1619 return caps
1618 return caps
1620
1619
1621 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1620 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1622 # self -> auditor -> self._checknested -> self
1621 # self -> auditor -> self._checknested -> self
1623
1622
1624 @property
1623 @property
1625 def auditor(self):
1624 def auditor(self):
1626 # This is only used by context.workingctx.match in order to
1625 # This is only used by context.workingctx.match in order to
1627 # detect files in subrepos.
1626 # detect files in subrepos.
1628 return pathutil.pathauditor(self.root, callback=self._checknested)
1627 return pathutil.pathauditor(self.root, callback=self._checknested)
1629
1628
1630 @property
1629 @property
1631 def nofsauditor(self):
1630 def nofsauditor(self):
1632 # This is only used by context.basectx.match in order to detect
1631 # This is only used by context.basectx.match in order to detect
1633 # files in subrepos.
1632 # files in subrepos.
1634 return pathutil.pathauditor(
1633 return pathutil.pathauditor(
1635 self.root, callback=self._checknested, realfs=False, cached=True
1634 self.root, callback=self._checknested, realfs=False, cached=True
1636 )
1635 )
1637
1636
1638 def _checknested(self, path):
1637 def _checknested(self, path):
1639 """Determine if path is a legal nested repository."""
1638 """Determine if path is a legal nested repository."""
1640 if not path.startswith(self.root):
1639 if not path.startswith(self.root):
1641 return False
1640 return False
1642 subpath = path[len(self.root) + 1 :]
1641 subpath = path[len(self.root) + 1 :]
1643 normsubpath = util.pconvert(subpath)
1642 normsubpath = util.pconvert(subpath)
1644
1643
1645 # XXX: Checking against the current working copy is wrong in
1644 # XXX: Checking against the current working copy is wrong in
1646 # the sense that it can reject things like
1645 # the sense that it can reject things like
1647 #
1646 #
1648 # $ hg cat -r 10 sub/x.txt
1647 # $ hg cat -r 10 sub/x.txt
1649 #
1648 #
1650 # if sub/ is no longer a subrepository in the working copy
1649 # if sub/ is no longer a subrepository in the working copy
1651 # parent revision.
1650 # parent revision.
1652 #
1651 #
1653 # However, it can of course also allow things that would have
1652 # However, it can of course also allow things that would have
1654 # been rejected before, such as the above cat command if sub/
1653 # been rejected before, such as the above cat command if sub/
1655 # is a subrepository now, but was a normal directory before.
1654 # is a subrepository now, but was a normal directory before.
1656 # The old path auditor would have rejected by mistake since it
1655 # The old path auditor would have rejected by mistake since it
1657 # panics when it sees sub/.hg/.
1656 # panics when it sees sub/.hg/.
1658 #
1657 #
1659 # All in all, checking against the working copy seems sensible
1658 # All in all, checking against the working copy seems sensible
1660 # since we want to prevent access to nested repositories on
1659 # since we want to prevent access to nested repositories on
1661 # the filesystem *now*.
1660 # the filesystem *now*.
1662 ctx = self[None]
1661 ctx = self[None]
1663 parts = util.splitpath(subpath)
1662 parts = util.splitpath(subpath)
1664 while parts:
1663 while parts:
1665 prefix = b'/'.join(parts)
1664 prefix = b'/'.join(parts)
1666 if prefix in ctx.substate:
1665 if prefix in ctx.substate:
1667 if prefix == normsubpath:
1666 if prefix == normsubpath:
1668 return True
1667 return True
1669 else:
1668 else:
1670 sub = ctx.sub(prefix)
1669 sub = ctx.sub(prefix)
1671 return sub.checknested(subpath[len(prefix) + 1 :])
1670 return sub.checknested(subpath[len(prefix) + 1 :])
1672 else:
1671 else:
1673 parts.pop()
1672 parts.pop()
1674 return False
1673 return False
1675
1674
1676 def peer(self, path=None, remotehidden=False):
1675 def peer(self, path=None, remotehidden=False):
1677 return localpeer(
1676 return localpeer(
1678 self, path=path, remotehidden=remotehidden
1677 self, path=path, remotehidden=remotehidden
1679 ) # not cached to avoid reference cycle
1678 ) # not cached to avoid reference cycle
1680
1679
1681 def unfiltered(self):
1680 def unfiltered(self):
1682 """Return unfiltered version of the repository
1681 """Return unfiltered version of the repository
1683
1682
1684 Intended to be overwritten by filtered repo."""
1683 Intended to be overwritten by filtered repo."""
1685 return self
1684 return self
1686
1685
1687 def filtered(self, name, visibilityexceptions=None):
1686 def filtered(self, name, visibilityexceptions=None):
1688 """Return a filtered version of a repository
1687 """Return a filtered version of a repository
1689
1688
1690 The `name` parameter is the identifier of the requested view. This
1689 The `name` parameter is the identifier of the requested view. This
1691 will return a repoview object set "exactly" to the specified view.
1690 will return a repoview object set "exactly" to the specified view.
1692
1691
1693 This function does not apply recursive filtering to a repository. For
1692 This function does not apply recursive filtering to a repository. For
1694 example calling `repo.filtered("served")` will return a repoview using
1693 example calling `repo.filtered("served")` will return a repoview using
1695 the "served" view, regardless of the initial view used by `repo`.
1694 the "served" view, regardless of the initial view used by `repo`.
1696
1695
1697 In other word, there is always only one level of `repoview` "filtering".
1696 In other word, there is always only one level of `repoview` "filtering".
1698 """
1697 """
1699 if self._extrafilterid is not None and b'%' not in name:
1698 if self._extrafilterid is not None and b'%' not in name:
1700 name = name + b'%' + self._extrafilterid
1699 name = name + b'%' + self._extrafilterid
1701
1700
1702 cls = repoview.newtype(self.unfiltered().__class__)
1701 cls = repoview.newtype(self.unfiltered().__class__)
1703 return cls(self, name, visibilityexceptions)
1702 return cls(self, name, visibilityexceptions)
1704
1703
1705 @mixedrepostorecache(
1704 @mixedrepostorecache(
1706 (b'bookmarks', b'plain'),
1705 (b'bookmarks', b'plain'),
1707 (b'bookmarks.current', b'plain'),
1706 (b'bookmarks.current', b'plain'),
1708 (b'bookmarks', b''),
1707 (b'bookmarks', b''),
1709 (b'00changelog.i', b''),
1708 (b'00changelog.i', b''),
1710 )
1709 )
1711 def _bookmarks(self):
1710 def _bookmarks(self):
1712 # Since the multiple files involved in the transaction cannot be
1711 # Since the multiple files involved in the transaction cannot be
1713 # written atomically (with current repository format), there is a race
1712 # written atomically (with current repository format), there is a race
1714 # condition here.
1713 # condition here.
1715 #
1714 #
1716 # 1) changelog content A is read
1715 # 1) changelog content A is read
1717 # 2) outside transaction update changelog to content B
1716 # 2) outside transaction update changelog to content B
1718 # 3) outside transaction update bookmark file referring to content B
1717 # 3) outside transaction update bookmark file referring to content B
1719 # 4) bookmarks file content is read and filtered against changelog-A
1718 # 4) bookmarks file content is read and filtered against changelog-A
1720 #
1719 #
1721 # When this happens, bookmarks against nodes missing from A are dropped.
1720 # When this happens, bookmarks against nodes missing from A are dropped.
1722 #
1721 #
1723 # Having this happening during read is not great, but it become worse
1722 # Having this happening during read is not great, but it become worse
1724 # when this happen during write because the bookmarks to the "unknown"
1723 # when this happen during write because the bookmarks to the "unknown"
1725 # nodes will be dropped for good. However, writes happen within locks.
1724 # nodes will be dropped for good. However, writes happen within locks.
1726 # This locking makes it possible to have a race free consistent read.
1725 # This locking makes it possible to have a race free consistent read.
1727 # For this purpose data read from disc before locking are
1726 # For this purpose data read from disc before locking are
1728 # "invalidated" right after the locks are taken. This invalidations are
1727 # "invalidated" right after the locks are taken. This invalidations are
1729 # "light", the `filecache` mechanism keep the data in memory and will
1728 # "light", the `filecache` mechanism keep the data in memory and will
1730 # reuse them if the underlying files did not changed. Not parsing the
1729 # reuse them if the underlying files did not changed. Not parsing the
1731 # same data multiple times helps performances.
1730 # same data multiple times helps performances.
1732 #
1731 #
1733 # Unfortunately in the case describe above, the files tracked by the
1732 # Unfortunately in the case describe above, the files tracked by the
1734 # bookmarks file cache might not have changed, but the in-memory
1733 # bookmarks file cache might not have changed, but the in-memory
1735 # content is still "wrong" because we used an older changelog content
1734 # content is still "wrong" because we used an older changelog content
1736 # to process the on-disk data. So after locking, the changelog would be
1735 # to process the on-disk data. So after locking, the changelog would be
1737 # refreshed but `_bookmarks` would be preserved.
1736 # refreshed but `_bookmarks` would be preserved.
1738 # Adding `00changelog.i` to the list of tracked file is not
1737 # Adding `00changelog.i` to the list of tracked file is not
1739 # enough, because at the time we build the content for `_bookmarks` in
1738 # enough, because at the time we build the content for `_bookmarks` in
1740 # (4), the changelog file has already diverged from the content used
1739 # (4), the changelog file has already diverged from the content used
1741 # for loading `changelog` in (1)
1740 # for loading `changelog` in (1)
1742 #
1741 #
1743 # To prevent the issue, we force the changelog to be explicitly
1742 # To prevent the issue, we force the changelog to be explicitly
1744 # reloaded while computing `_bookmarks`. The data race can still happen
1743 # reloaded while computing `_bookmarks`. The data race can still happen
1745 # without the lock (with a narrower window), but it would no longer go
1744 # without the lock (with a narrower window), but it would no longer go
1746 # undetected during the lock time refresh.
1745 # undetected during the lock time refresh.
1747 #
1746 #
1748 # The new schedule is as follow
1747 # The new schedule is as follow
1749 #
1748 #
1750 # 1) filecache logic detect that `_bookmarks` needs to be computed
1749 # 1) filecache logic detect that `_bookmarks` needs to be computed
1751 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1750 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1752 # 3) We force `changelog` filecache to be tested
1751 # 3) We force `changelog` filecache to be tested
1753 # 4) cachestat for `changelog` are captured (for changelog)
1752 # 4) cachestat for `changelog` are captured (for changelog)
1754 # 5) `_bookmarks` is computed and cached
1753 # 5) `_bookmarks` is computed and cached
1755 #
1754 #
1756 # The step in (3) ensure we have a changelog at least as recent as the
1755 # The step in (3) ensure we have a changelog at least as recent as the
1757 # cache stat computed in (1). As a result at locking time:
1756 # cache stat computed in (1). As a result at locking time:
1758 # * if the changelog did not changed since (1) -> we can reuse the data
1757 # * if the changelog did not changed since (1) -> we can reuse the data
1759 # * otherwise -> the bookmarks get refreshed.
1758 # * otherwise -> the bookmarks get refreshed.
1760 self._refreshchangelog()
1759 self._refreshchangelog()
1761 return bookmarks.bmstore(self)
1760 return bookmarks.bmstore(self)
1762
1761
1763 def _refreshchangelog(self):
1762 def _refreshchangelog(self):
1764 """make sure the in memory changelog match the on-disk one"""
1763 """make sure the in memory changelog match the on-disk one"""
1765 if 'changelog' in vars(self) and self.currenttransaction() is None:
1764 if 'changelog' in vars(self) and self.currenttransaction() is None:
1766 del self.changelog
1765 del self.changelog
1767
1766
1768 @property
1767 @property
1769 def _activebookmark(self):
1768 def _activebookmark(self):
1770 return self._bookmarks.active
1769 return self._bookmarks.active
1771
1770
1772 # _phasesets depend on changelog. what we need is to call
1771 # _phasesets depend on changelog. what we need is to call
1773 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1772 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1774 # can't be easily expressed in filecache mechanism.
1773 # can't be easily expressed in filecache mechanism.
1775 @storecache(b'phaseroots', b'00changelog.i')
1774 @storecache(b'phaseroots', b'00changelog.i')
1776 def _phasecache(self):
1775 def _phasecache(self):
1777 return phases.phasecache(self, self._phasedefaults)
1776 return phases.phasecache(self, self._phasedefaults)
1778
1777
1779 @storecache(b'obsstore')
1778 @storecache(b'obsstore')
1780 def obsstore(self):
1779 def obsstore(self):
1781 return obsolete.makestore(self.ui, self)
1780 return obsolete.makestore(self.ui, self)
1782
1781
1783 @changelogcache()
1782 @changelogcache()
1784 def changelog(repo):
1783 def changelog(repo):
1785 # load dirstate before changelog to avoid race see issue6303
1784 # load dirstate before changelog to avoid race see issue6303
1786 repo.dirstate.prefetch_parents()
1785 repo.dirstate.prefetch_parents()
1787 return repo.store.changelog(
1786 return repo.store.changelog(
1788 txnutil.mayhavepending(repo.root),
1787 txnutil.mayhavepending(repo.root),
1789 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1788 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1790 )
1789 )
1791
1790
1792 @manifestlogcache()
1791 @manifestlogcache()
1793 def manifestlog(self):
1792 def manifestlog(self):
1794 return self.store.manifestlog(self, self._storenarrowmatch)
1793 return self.store.manifestlog(self, self._storenarrowmatch)
1795
1794
1796 @unfilteredpropertycache
1795 @unfilteredpropertycache
1797 def dirstate(self):
1796 def dirstate(self):
1798 if self._dirstate is None:
1797 if self._dirstate is None:
1799 self._dirstate = self._makedirstate()
1798 self._dirstate = self._makedirstate()
1800 else:
1799 else:
1801 self._dirstate.refresh()
1800 self._dirstate.refresh()
1802 return self._dirstate
1801 return self._dirstate
1803
1802
1804 def _makedirstate(self):
1803 def _makedirstate(self):
1805 """Extension point for wrapping the dirstate per-repo."""
1804 """Extension point for wrapping the dirstate per-repo."""
1806 sparsematchfn = None
1805 sparsematchfn = None
1807 if sparse.use_sparse(self):
1806 if sparse.use_sparse(self):
1808 sparsematchfn = lambda: sparse.matcher(self)
1807 sparsematchfn = lambda: sparse.matcher(self)
1809 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1808 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1810 th = requirementsmod.DIRSTATE_TRACKED_HINT_V1
1809 th = requirementsmod.DIRSTATE_TRACKED_HINT_V1
1811 use_dirstate_v2 = v2_req in self.requirements
1810 use_dirstate_v2 = v2_req in self.requirements
1812 use_tracked_hint = th in self.requirements
1811 use_tracked_hint = th in self.requirements
1813
1812
1814 return dirstate.dirstate(
1813 return dirstate.dirstate(
1815 self.vfs,
1814 self.vfs,
1816 self.ui,
1815 self.ui,
1817 self.root,
1816 self.root,
1818 self._dirstatevalidate,
1817 self._dirstatevalidate,
1819 sparsematchfn,
1818 sparsematchfn,
1820 self.nodeconstants,
1819 self.nodeconstants,
1821 use_dirstate_v2,
1820 use_dirstate_v2,
1822 use_tracked_hint=use_tracked_hint,
1821 use_tracked_hint=use_tracked_hint,
1823 )
1822 )
1824
1823
1825 def _dirstatevalidate(self, node):
1824 def _dirstatevalidate(self, node):
1826 okay = True
1825 okay = True
1827 try:
1826 try:
1828 self.changelog.rev(node)
1827 self.changelog.rev(node)
1829 except error.LookupError:
1828 except error.LookupError:
1830 # If the parent are unknown it might just be because the changelog
1829 # If the parent are unknown it might just be because the changelog
1831 # in memory is lagging behind the dirstate in memory. So try to
1830 # in memory is lagging behind the dirstate in memory. So try to
1832 # refresh the changelog first.
1831 # refresh the changelog first.
1833 #
1832 #
1834 # We only do so if we don't hold the lock, if we do hold the lock
1833 # We only do so if we don't hold the lock, if we do hold the lock
1835 # the invalidation at that time should have taken care of this and
1834 # the invalidation at that time should have taken care of this and
1836 # something is very fishy.
1835 # something is very fishy.
1837 if self.currentlock() is None:
1836 if self.currentlock() is None:
1838 self.invalidate()
1837 self.invalidate()
1839 try:
1838 try:
1840 self.changelog.rev(node)
1839 self.changelog.rev(node)
1841 except error.LookupError:
1840 except error.LookupError:
1842 okay = False
1841 okay = False
1843 else:
1842 else:
1844 # XXX we should consider raising an error here.
1843 # XXX we should consider raising an error here.
1845 okay = False
1844 okay = False
1846 if okay:
1845 if okay:
1847 return node
1846 return node
1848 else:
1847 else:
1849 if not self._dirstatevalidatewarned:
1848 if not self._dirstatevalidatewarned:
1850 self._dirstatevalidatewarned = True
1849 self._dirstatevalidatewarned = True
1851 self.ui.warn(
1850 self.ui.warn(
1852 _(b"warning: ignoring unknown working parent %s!\n")
1851 _(b"warning: ignoring unknown working parent %s!\n")
1853 % short(node)
1852 % short(node)
1854 )
1853 )
1855 return self.nullid
1854 return self.nullid
1856
1855
1857 @storecache(narrowspec.FILENAME)
1856 @storecache(narrowspec.FILENAME)
1858 def narrowpats(self):
1857 def narrowpats(self):
1859 """matcher patterns for this repository's narrowspec
1858 """matcher patterns for this repository's narrowspec
1860
1859
1861 A tuple of (includes, excludes).
1860 A tuple of (includes, excludes).
1862 """
1861 """
1863 # the narrow management should probably move into its own object
1862 # the narrow management should probably move into its own object
1864 val = self._pending_narrow_pats
1863 val = self._pending_narrow_pats
1865 if val is None:
1864 if val is None:
1866 val = narrowspec.load(self)
1865 val = narrowspec.load(self)
1867 return val
1866 return val
1868
1867
1869 @storecache(narrowspec.FILENAME)
1868 @storecache(narrowspec.FILENAME)
1870 def _storenarrowmatch(self):
1869 def _storenarrowmatch(self):
1871 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1870 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1872 return matchmod.always()
1871 return matchmod.always()
1873 include, exclude = self.narrowpats
1872 include, exclude = self.narrowpats
1874 return narrowspec.match(self.root, include=include, exclude=exclude)
1873 return narrowspec.match(self.root, include=include, exclude=exclude)
1875
1874
1876 @storecache(narrowspec.FILENAME)
1875 @storecache(narrowspec.FILENAME)
1877 def _narrowmatch(self):
1876 def _narrowmatch(self):
1878 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1877 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1879 return matchmod.always()
1878 return matchmod.always()
1880 narrowspec.checkworkingcopynarrowspec(self)
1879 narrowspec.checkworkingcopynarrowspec(self)
1881 include, exclude = self.narrowpats
1880 include, exclude = self.narrowpats
1882 return narrowspec.match(self.root, include=include, exclude=exclude)
1881 return narrowspec.match(self.root, include=include, exclude=exclude)
1883
1882
1884 def narrowmatch(self, match=None, includeexact=False):
1883 def narrowmatch(self, match=None, includeexact=False):
1885 """matcher corresponding the the repo's narrowspec
1884 """matcher corresponding the the repo's narrowspec
1886
1885
1887 If `match` is given, then that will be intersected with the narrow
1886 If `match` is given, then that will be intersected with the narrow
1888 matcher.
1887 matcher.
1889
1888
1890 If `includeexact` is True, then any exact matches from `match` will
1889 If `includeexact` is True, then any exact matches from `match` will
1891 be included even if they're outside the narrowspec.
1890 be included even if they're outside the narrowspec.
1892 """
1891 """
1893 if match:
1892 if match:
1894 if includeexact and not self._narrowmatch.always():
1893 if includeexact and not self._narrowmatch.always():
1895 # do not exclude explicitly-specified paths so that they can
1894 # do not exclude explicitly-specified paths so that they can
1896 # be warned later on
1895 # be warned later on
1897 em = matchmod.exact(match.files())
1896 em = matchmod.exact(match.files())
1898 nm = matchmod.unionmatcher([self._narrowmatch, em])
1897 nm = matchmod.unionmatcher([self._narrowmatch, em])
1899 return matchmod.intersectmatchers(match, nm)
1898 return matchmod.intersectmatchers(match, nm)
1900 return matchmod.intersectmatchers(match, self._narrowmatch)
1899 return matchmod.intersectmatchers(match, self._narrowmatch)
1901 return self._narrowmatch
1900 return self._narrowmatch
1902
1901
1903 def setnarrowpats(self, newincludes, newexcludes):
1902 def setnarrowpats(self, newincludes, newexcludes):
1904 narrowspec.save(self, newincludes, newexcludes)
1903 narrowspec.save(self, newincludes, newexcludes)
1905 self.invalidate(clearfilecache=True)
1904 self.invalidate(clearfilecache=True)
1906
1905
1907 @unfilteredpropertycache
1906 @unfilteredpropertycache
1908 def _quick_access_changeid_null(self):
1907 def _quick_access_changeid_null(self):
1909 return {
1908 return {
1910 b'null': (nullrev, self.nodeconstants.nullid),
1909 b'null': (nullrev, self.nodeconstants.nullid),
1911 nullrev: (nullrev, self.nodeconstants.nullid),
1910 nullrev: (nullrev, self.nodeconstants.nullid),
1912 self.nullid: (nullrev, self.nullid),
1911 self.nullid: (nullrev, self.nullid),
1913 }
1912 }
1914
1913
1915 @unfilteredpropertycache
1914 @unfilteredpropertycache
1916 def _quick_access_changeid_wc(self):
1915 def _quick_access_changeid_wc(self):
1917 # also fast path access to the working copy parents
1916 # also fast path access to the working copy parents
1918 # however, only do it for filter that ensure wc is visible.
1917 # however, only do it for filter that ensure wc is visible.
1919 quick = self._quick_access_changeid_null.copy()
1918 quick = self._quick_access_changeid_null.copy()
1920 cl = self.unfiltered().changelog
1919 cl = self.unfiltered().changelog
1921 for node in self.dirstate.parents():
1920 for node in self.dirstate.parents():
1922 if node == self.nullid:
1921 if node == self.nullid:
1923 continue
1922 continue
1924 rev = cl.index.get_rev(node)
1923 rev = cl.index.get_rev(node)
1925 if rev is None:
1924 if rev is None:
1926 # unknown working copy parent case:
1925 # unknown working copy parent case:
1927 #
1926 #
1928 # skip the fast path and let higher code deal with it
1927 # skip the fast path and let higher code deal with it
1929 continue
1928 continue
1930 pair = (rev, node)
1929 pair = (rev, node)
1931 quick[rev] = pair
1930 quick[rev] = pair
1932 quick[node] = pair
1931 quick[node] = pair
1933 # also add the parents of the parents
1932 # also add the parents of the parents
1934 for r in cl.parentrevs(rev):
1933 for r in cl.parentrevs(rev):
1935 if r == nullrev:
1934 if r == nullrev:
1936 continue
1935 continue
1937 n = cl.node(r)
1936 n = cl.node(r)
1938 pair = (r, n)
1937 pair = (r, n)
1939 quick[r] = pair
1938 quick[r] = pair
1940 quick[n] = pair
1939 quick[n] = pair
1941 p1node = self.dirstate.p1()
1940 p1node = self.dirstate.p1()
1942 if p1node != self.nullid:
1941 if p1node != self.nullid:
1943 quick[b'.'] = quick[p1node]
1942 quick[b'.'] = quick[p1node]
1944 return quick
1943 return quick
1945
1944
1946 @unfilteredmethod
1945 @unfilteredmethod
1947 def _quick_access_changeid_invalidate(self):
1946 def _quick_access_changeid_invalidate(self):
1948 if '_quick_access_changeid_wc' in vars(self):
1947 if '_quick_access_changeid_wc' in vars(self):
1949 del self.__dict__['_quick_access_changeid_wc']
1948 del self.__dict__['_quick_access_changeid_wc']
1950
1949
1951 @property
1950 @property
1952 def _quick_access_changeid(self):
1951 def _quick_access_changeid(self):
1953 """an helper dictionnary for __getitem__ calls
1952 """an helper dictionnary for __getitem__ calls
1954
1953
1955 This contains a list of symbol we can recognise right away without
1954 This contains a list of symbol we can recognise right away without
1956 further processing.
1955 further processing.
1957 """
1956 """
1958 if self.filtername in repoview.filter_has_wc:
1957 if self.filtername in repoview.filter_has_wc:
1959 return self._quick_access_changeid_wc
1958 return self._quick_access_changeid_wc
1960 return self._quick_access_changeid_null
1959 return self._quick_access_changeid_null
1961
1960
1962 def __getitem__(self, changeid):
1961 def __getitem__(self, changeid):
1963 # dealing with special cases
1962 # dealing with special cases
1964 if changeid is None:
1963 if changeid is None:
1965 return context.workingctx(self)
1964 return context.workingctx(self)
1966 if isinstance(changeid, context.basectx):
1965 if isinstance(changeid, context.basectx):
1967 return changeid
1966 return changeid
1968
1967
1969 # dealing with multiple revisions
1968 # dealing with multiple revisions
1970 if isinstance(changeid, slice):
1969 if isinstance(changeid, slice):
1971 # wdirrev isn't contiguous so the slice shouldn't include it
1970 # wdirrev isn't contiguous so the slice shouldn't include it
1972 return [
1971 return [
1973 self[i]
1972 self[i]
1974 for i in range(*changeid.indices(len(self)))
1973 for i in range(*changeid.indices(len(self)))
1975 if i not in self.changelog.filteredrevs
1974 if i not in self.changelog.filteredrevs
1976 ]
1975 ]
1977
1976
1978 # dealing with some special values
1977 # dealing with some special values
1979 quick_access = self._quick_access_changeid.get(changeid)
1978 quick_access = self._quick_access_changeid.get(changeid)
1980 if quick_access is not None:
1979 if quick_access is not None:
1981 rev, node = quick_access
1980 rev, node = quick_access
1982 return context.changectx(self, rev, node, maybe_filtered=False)
1981 return context.changectx(self, rev, node, maybe_filtered=False)
1983 if changeid == b'tip':
1982 if changeid == b'tip':
1984 node = self.changelog.tip()
1983 node = self.changelog.tip()
1985 rev = self.changelog.rev(node)
1984 rev = self.changelog.rev(node)
1986 return context.changectx(self, rev, node)
1985 return context.changectx(self, rev, node)
1987
1986
1988 # dealing with arbitrary values
1987 # dealing with arbitrary values
1989 try:
1988 try:
1990 if isinstance(changeid, int):
1989 if isinstance(changeid, int):
1991 node = self.changelog.node(changeid)
1990 node = self.changelog.node(changeid)
1992 rev = changeid
1991 rev = changeid
1993 elif changeid == b'.':
1992 elif changeid == b'.':
1994 # this is a hack to delay/avoid loading obsmarkers
1993 # this is a hack to delay/avoid loading obsmarkers
1995 # when we know that '.' won't be hidden
1994 # when we know that '.' won't be hidden
1996 node = self.dirstate.p1()
1995 node = self.dirstate.p1()
1997 rev = self.unfiltered().changelog.rev(node)
1996 rev = self.unfiltered().changelog.rev(node)
1998 elif len(changeid) == self.nodeconstants.nodelen:
1997 elif len(changeid) == self.nodeconstants.nodelen:
1999 try:
1998 try:
2000 node = changeid
1999 node = changeid
2001 rev = self.changelog.rev(changeid)
2000 rev = self.changelog.rev(changeid)
2002 except error.FilteredLookupError:
2001 except error.FilteredLookupError:
2003 changeid = hex(changeid) # for the error message
2002 changeid = hex(changeid) # for the error message
2004 raise
2003 raise
2005 except LookupError:
2004 except LookupError:
2006 # check if it might have come from damaged dirstate
2005 # check if it might have come from damaged dirstate
2007 #
2006 #
2008 # XXX we could avoid the unfiltered if we had a recognizable
2007 # XXX we could avoid the unfiltered if we had a recognizable
2009 # exception for filtered changeset access
2008 # exception for filtered changeset access
2010 if (
2009 if (
2011 self.local()
2010 self.local()
2012 and changeid in self.unfiltered().dirstate.parents()
2011 and changeid in self.unfiltered().dirstate.parents()
2013 ):
2012 ):
2014 msg = _(b"working directory has unknown parent '%s'!")
2013 msg = _(b"working directory has unknown parent '%s'!")
2015 raise error.Abort(msg % short(changeid))
2014 raise error.Abort(msg % short(changeid))
2016 changeid = hex(changeid) # for the error message
2015 changeid = hex(changeid) # for the error message
2017 raise
2016 raise
2018
2017
2019 elif len(changeid) == 2 * self.nodeconstants.nodelen:
2018 elif len(changeid) == 2 * self.nodeconstants.nodelen:
2020 node = bin(changeid)
2019 node = bin(changeid)
2021 rev = self.changelog.rev(node)
2020 rev = self.changelog.rev(node)
2022 else:
2021 else:
2023 raise error.ProgrammingError(
2022 raise error.ProgrammingError(
2024 b"unsupported changeid '%s' of type %s"
2023 b"unsupported changeid '%s' of type %s"
2025 % (changeid, pycompat.bytestr(type(changeid)))
2024 % (changeid, pycompat.bytestr(type(changeid)))
2026 )
2025 )
2027
2026
2028 return context.changectx(self, rev, node)
2027 return context.changectx(self, rev, node)
2029
2028
2030 except (error.FilteredIndexError, error.FilteredLookupError):
2029 except (error.FilteredIndexError, error.FilteredLookupError):
2031 raise error.FilteredRepoLookupError(
2030 raise error.FilteredRepoLookupError(
2032 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
2031 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
2033 )
2032 )
2034 except (IndexError, LookupError):
2033 except (IndexError, LookupError):
2035 raise error.RepoLookupError(
2034 raise error.RepoLookupError(
2036 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
2035 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
2037 )
2036 )
2038 except error.WdirUnsupported:
2037 except error.WdirUnsupported:
2039 return context.workingctx(self)
2038 return context.workingctx(self)
2040
2039
2041 def __contains__(self, changeid):
2040 def __contains__(self, changeid):
2042 """True if the given changeid exists"""
2041 """True if the given changeid exists"""
2043 try:
2042 try:
2044 self[changeid]
2043 self[changeid]
2045 return True
2044 return True
2046 except error.RepoLookupError:
2045 except error.RepoLookupError:
2047 return False
2046 return False
2048
2047
2049 def __nonzero__(self):
2048 def __nonzero__(self):
2050 return True
2049 return True
2051
2050
2052 __bool__ = __nonzero__
2051 __bool__ = __nonzero__
2053
2052
2054 def __len__(self):
2053 def __len__(self):
2055 # no need to pay the cost of repoview.changelog
2054 # no need to pay the cost of repoview.changelog
2056 unfi = self.unfiltered()
2055 unfi = self.unfiltered()
2057 return len(unfi.changelog)
2056 return len(unfi.changelog)
2058
2057
2059 def __iter__(self):
2058 def __iter__(self):
2060 return iter(self.changelog)
2059 return iter(self.changelog)
2061
2060
2062 def revs(self, expr: bytes, *args):
2061 def revs(self, expr: bytes, *args):
2063 """Find revisions matching a revset.
2062 """Find revisions matching a revset.
2064
2063
2065 The revset is specified as a string ``expr`` that may contain
2064 The revset is specified as a string ``expr`` that may contain
2066 %-formatting to escape certain types. See ``revsetlang.formatspec``.
2065 %-formatting to escape certain types. See ``revsetlang.formatspec``.
2067
2066
2068 Revset aliases from the configuration are not expanded. To expand
2067 Revset aliases from the configuration are not expanded. To expand
2069 user aliases, consider calling ``scmutil.revrange()`` or
2068 user aliases, consider calling ``scmutil.revrange()`` or
2070 ``repo.anyrevs([expr], user=True)``.
2069 ``repo.anyrevs([expr], user=True)``.
2071
2070
2072 Returns a smartset.abstractsmartset, which is a list-like interface
2071 Returns a smartset.abstractsmartset, which is a list-like interface
2073 that contains integer revisions.
2072 that contains integer revisions.
2074 """
2073 """
2075 tree = revsetlang.spectree(expr, *args)
2074 tree = revsetlang.spectree(expr, *args)
2076 return revset.makematcher(tree)(self)
2075 return revset.makematcher(tree)(self)
2077
2076
2078 def set(self, expr: bytes, *args):
2077 def set(self, expr: bytes, *args):
2079 """Find revisions matching a revset and emit changectx instances.
2078 """Find revisions matching a revset and emit changectx instances.
2080
2079
2081 This is a convenience wrapper around ``revs()`` that iterates the
2080 This is a convenience wrapper around ``revs()`` that iterates the
2082 result and is a generator of changectx instances.
2081 result and is a generator of changectx instances.
2083
2082
2084 Revset aliases from the configuration are not expanded. To expand
2083 Revset aliases from the configuration are not expanded. To expand
2085 user aliases, consider calling ``scmutil.revrange()``.
2084 user aliases, consider calling ``scmutil.revrange()``.
2086 """
2085 """
2087 for r in self.revs(expr, *args):
2086 for r in self.revs(expr, *args):
2088 yield self[r]
2087 yield self[r]
2089
2088
2090 def anyrevs(self, specs: bytes, user=False, localalias=None):
2089 def anyrevs(self, specs: bytes, user=False, localalias=None):
2091 """Find revisions matching one of the given revsets.
2090 """Find revisions matching one of the given revsets.
2092
2091
2093 Revset aliases from the configuration are not expanded by default. To
2092 Revset aliases from the configuration are not expanded by default. To
2094 expand user aliases, specify ``user=True``. To provide some local
2093 expand user aliases, specify ``user=True``. To provide some local
2095 definitions overriding user aliases, set ``localalias`` to
2094 definitions overriding user aliases, set ``localalias`` to
2096 ``{name: definitionstring}``.
2095 ``{name: definitionstring}``.
2097 """
2096 """
2098 if specs == [b'null']:
2097 if specs == [b'null']:
2099 return revset.baseset([nullrev])
2098 return revset.baseset([nullrev])
2100 if specs == [b'.']:
2099 if specs == [b'.']:
2101 quick_data = self._quick_access_changeid.get(b'.')
2100 quick_data = self._quick_access_changeid.get(b'.')
2102 if quick_data is not None:
2101 if quick_data is not None:
2103 return revset.baseset([quick_data[0]])
2102 return revset.baseset([quick_data[0]])
2104 if user:
2103 if user:
2105 m = revset.matchany(
2104 m = revset.matchany(
2106 self.ui,
2105 self.ui,
2107 specs,
2106 specs,
2108 lookup=revset.lookupfn(self),
2107 lookup=revset.lookupfn(self),
2109 localalias=localalias,
2108 localalias=localalias,
2110 )
2109 )
2111 else:
2110 else:
2112 m = revset.matchany(None, specs, localalias=localalias)
2111 m = revset.matchany(None, specs, localalias=localalias)
2113 return m(self)
2112 return m(self)
2114
2113
2115 def url(self) -> bytes:
2114 def url(self) -> bytes:
2116 return b'file:' + self.root
2115 return b'file:' + self.root
2117
2116
2118 def hook(self, name, throw=False, **args):
2117 def hook(self, name, throw=False, **args):
2119 """Call a hook, passing this repo instance.
2118 """Call a hook, passing this repo instance.
2120
2119
2121 This a convenience method to aid invoking hooks. Extensions likely
2120 This a convenience method to aid invoking hooks. Extensions likely
2122 won't call this unless they have registered a custom hook or are
2121 won't call this unless they have registered a custom hook or are
2123 replacing code that is expected to call a hook.
2122 replacing code that is expected to call a hook.
2124 """
2123 """
2125 return hook.hook(self.ui, self, name, throw, **args)
2124 return hook.hook(self.ui, self, name, throw, **args)
2126
2125
2127 @filteredpropertycache
2126 @filteredpropertycache
2128 def _tagscache(self):
2127 def _tagscache(self):
2129 """Returns a tagscache object that contains various tags related
2128 """Returns a tagscache object that contains various tags related
2130 caches."""
2129 caches."""
2131
2130
2132 # This simplifies its cache management by having one decorated
2131 # This simplifies its cache management by having one decorated
2133 # function (this one) and the rest simply fetch things from it.
2132 # function (this one) and the rest simply fetch things from it.
2134 class tagscache:
2133 class tagscache:
2135 def __init__(self):
2134 def __init__(self):
2136 # These two define the set of tags for this repository. tags
2135 # These two define the set of tags for this repository. tags
2137 # maps tag name to node; tagtypes maps tag name to 'global' or
2136 # maps tag name to node; tagtypes maps tag name to 'global' or
2138 # 'local'. (Global tags are defined by .hgtags across all
2137 # 'local'. (Global tags are defined by .hgtags across all
2139 # heads, and local tags are defined in .hg/localtags.)
2138 # heads, and local tags are defined in .hg/localtags.)
2140 # They constitute the in-memory cache of tags.
2139 # They constitute the in-memory cache of tags.
2141 self.tags = self.tagtypes = None
2140 self.tags = self.tagtypes = None
2142
2141
2143 self.nodetagscache = self.tagslist = None
2142 self.nodetagscache = self.tagslist = None
2144
2143
2145 cache = tagscache()
2144 cache = tagscache()
2146 cache.tags, cache.tagtypes = self._findtags()
2145 cache.tags, cache.tagtypes = self._findtags()
2147
2146
2148 return cache
2147 return cache
2149
2148
2150 def tags(self):
2149 def tags(self):
2151 '''return a mapping of tag to node'''
2150 '''return a mapping of tag to node'''
2152 t = {}
2151 t = {}
2153 if self.changelog.filteredrevs:
2152 if self.changelog.filteredrevs:
2154 tags, tt = self._findtags()
2153 tags, tt = self._findtags()
2155 else:
2154 else:
2156 tags = self._tagscache.tags
2155 tags = self._tagscache.tags
2157 rev = self.changelog.rev
2156 rev = self.changelog.rev
2158 for k, v in tags.items():
2157 for k, v in tags.items():
2159 try:
2158 try:
2160 # ignore tags to unknown nodes
2159 # ignore tags to unknown nodes
2161 rev(v)
2160 rev(v)
2162 t[k] = v
2161 t[k] = v
2163 except (error.LookupError, ValueError):
2162 except (error.LookupError, ValueError):
2164 pass
2163 pass
2165 return t
2164 return t
2166
2165
2167 def _findtags(self):
2166 def _findtags(self):
2168 """Do the hard work of finding tags. Return a pair of dicts
2167 """Do the hard work of finding tags. Return a pair of dicts
2169 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2168 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2170 maps tag name to a string like \'global\' or \'local\'.
2169 maps tag name to a string like \'global\' or \'local\'.
2171 Subclasses or extensions are free to add their own tags, but
2170 Subclasses or extensions are free to add their own tags, but
2172 should be aware that the returned dicts will be retained for the
2171 should be aware that the returned dicts will be retained for the
2173 duration of the localrepo object."""
2172 duration of the localrepo object."""
2174
2173
2175 # XXX what tagtype should subclasses/extensions use? Currently
2174 # XXX what tagtype should subclasses/extensions use? Currently
2176 # mq and bookmarks add tags, but do not set the tagtype at all.
2175 # mq and bookmarks add tags, but do not set the tagtype at all.
2177 # Should each extension invent its own tag type? Should there
2176 # Should each extension invent its own tag type? Should there
2178 # be one tagtype for all such "virtual" tags? Or is the status
2177 # be one tagtype for all such "virtual" tags? Or is the status
2179 # quo fine?
2178 # quo fine?
2180
2179
2181 # map tag name to (node, hist)
2180 # map tag name to (node, hist)
2182 alltags = tagsmod.findglobaltags(self.ui, self)
2181 alltags = tagsmod.findglobaltags(self.ui, self)
2183 # map tag name to tag type
2182 # map tag name to tag type
2184 tagtypes = {tag: b'global' for tag in alltags}
2183 tagtypes = {tag: b'global' for tag in alltags}
2185
2184
2186 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2185 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2187
2186
2188 # Build the return dicts. Have to re-encode tag names because
2187 # Build the return dicts. Have to re-encode tag names because
2189 # the tags module always uses UTF-8 (in order not to lose info
2188 # the tags module always uses UTF-8 (in order not to lose info
2190 # writing to the cache), but the rest of Mercurial wants them in
2189 # writing to the cache), but the rest of Mercurial wants them in
2191 # local encoding.
2190 # local encoding.
2192 tags = {}
2191 tags = {}
2193 for name, (node, hist) in alltags.items():
2192 for name, (node, hist) in alltags.items():
2194 if node != self.nullid:
2193 if node != self.nullid:
2195 tags[encoding.tolocal(name)] = node
2194 tags[encoding.tolocal(name)] = node
2196 tags[b'tip'] = self.changelog.tip()
2195 tags[b'tip'] = self.changelog.tip()
2197 tagtypes = {
2196 tagtypes = {
2198 encoding.tolocal(name): value for (name, value) in tagtypes.items()
2197 encoding.tolocal(name): value for (name, value) in tagtypes.items()
2199 }
2198 }
2200 return (tags, tagtypes)
2199 return (tags, tagtypes)
2201
2200
2202 def tagtype(self, tagname):
2201 def tagtype(self, tagname):
2203 """
2202 """
2204 return the type of the given tag. result can be:
2203 return the type of the given tag. result can be:
2205
2204
2206 'local' : a local tag
2205 'local' : a local tag
2207 'global' : a global tag
2206 'global' : a global tag
2208 None : tag does not exist
2207 None : tag does not exist
2209 """
2208 """
2210
2209
2211 return self._tagscache.tagtypes.get(tagname)
2210 return self._tagscache.tagtypes.get(tagname)
2212
2211
2213 def tagslist(self):
2212 def tagslist(self):
2214 '''return a list of tags ordered by revision'''
2213 '''return a list of tags ordered by revision'''
2215 if not self._tagscache.tagslist:
2214 if not self._tagscache.tagslist:
2216 l = []
2215 l = []
2217 for t, n in self.tags().items():
2216 for t, n in self.tags().items():
2218 l.append((self.changelog.rev(n), t, n))
2217 l.append((self.changelog.rev(n), t, n))
2219 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2218 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2220
2219
2221 return self._tagscache.tagslist
2220 return self._tagscache.tagslist
2222
2221
2223 def nodetags(self, node):
2222 def nodetags(self, node):
2224 '''return the tags associated with a node'''
2223 '''return the tags associated with a node'''
2225 if not self._tagscache.nodetagscache:
2224 if not self._tagscache.nodetagscache:
2226 nodetagscache = {}
2225 nodetagscache = {}
2227 for t, n in self._tagscache.tags.items():
2226 for t, n in self._tagscache.tags.items():
2228 nodetagscache.setdefault(n, []).append(t)
2227 nodetagscache.setdefault(n, []).append(t)
2229 for tags in nodetagscache.values():
2228 for tags in nodetagscache.values():
2230 tags.sort()
2229 tags.sort()
2231 self._tagscache.nodetagscache = nodetagscache
2230 self._tagscache.nodetagscache = nodetagscache
2232 return self._tagscache.nodetagscache.get(node, [])
2231 return self._tagscache.nodetagscache.get(node, [])
2233
2232
2234 def nodebookmarks(self, node):
2233 def nodebookmarks(self, node):
2235 """return the list of bookmarks pointing to the specified node"""
2234 """return the list of bookmarks pointing to the specified node"""
2236 return self._bookmarks.names(node)
2235 return self._bookmarks.names(node)
2237
2236
2238 def branchmap(self):
2237 def branchmap(self):
2239 """returns a dictionary {branch: [branchheads]} with branchheads
2238 """returns a dictionary {branch: [branchheads]} with branchheads
2240 ordered by increasing revision number"""
2239 ordered by increasing revision number"""
2241 return self._branchcaches[self]
2240 return self._branchcaches[self]
2242
2241
2243 @unfilteredmethod
2242 @unfilteredmethod
2244 def revbranchcache(self):
2243 def revbranchcache(self):
2245 if not self._revbranchcache:
2244 if not self._revbranchcache:
2246 unfi = self.unfiltered()
2245 unfi = self.unfiltered()
2247 self._revbranchcache = rev_branch_cache.revbranchcache(unfi)
2246 self._revbranchcache = rev_branch_cache.revbranchcache(unfi)
2248 return self._revbranchcache
2247 return self._revbranchcache
2249
2248
2250 def register_changeset(self, rev, changelogrevision):
2249 def register_changeset(self, rev, changelogrevision):
2251 self.revbranchcache().setdata(rev, changelogrevision)
2250 self.revbranchcache().setdata(rev, changelogrevision)
2252
2251
2253 def branchtip(self, branch, ignoremissing=False):
2252 def branchtip(self, branch, ignoremissing=False):
2254 """return the tip node for a given branch
2253 """return the tip node for a given branch
2255
2254
2256 If ignoremissing is True, then this method will not raise an error.
2255 If ignoremissing is True, then this method will not raise an error.
2257 This is helpful for callers that only expect None for a missing branch
2256 This is helpful for callers that only expect None for a missing branch
2258 (e.g. namespace).
2257 (e.g. namespace).
2259
2258
2260 """
2259 """
2261 try:
2260 try:
2262 return self.branchmap().branchtip(branch)
2261 return self.branchmap().branchtip(branch)
2263 except KeyError:
2262 except KeyError:
2264 if not ignoremissing:
2263 if not ignoremissing:
2265 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2264 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2266 else:
2265 else:
2267 pass
2266 pass
2268
2267
2269 def lookup(self, key):
2268 def lookup(self, key):
2270 node = scmutil.revsymbol(self, key).node()
2269 node = scmutil.revsymbol(self, key).node()
2271 if node is None:
2270 if node is None:
2272 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2271 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2273 return node
2272 return node
2274
2273
2275 def lookupbranch(self, key):
2274 def lookupbranch(self, key):
2276 if self.branchmap().hasbranch(key):
2275 if self.branchmap().hasbranch(key):
2277 return key
2276 return key
2278
2277
2279 return scmutil.revsymbol(self, key).branch()
2278 return scmutil.revsymbol(self, key).branch()
2280
2279
2281 def known(self, nodes):
2280 def known(self, nodes):
2282 cl = self.changelog
2281 cl = self.changelog
2283 get_rev = cl.index.get_rev
2282 get_rev = cl.index.get_rev
2284 filtered = cl.filteredrevs
2283 filtered = cl.filteredrevs
2285 result = []
2284 result = []
2286 for n in nodes:
2285 for n in nodes:
2287 r = get_rev(n)
2286 r = get_rev(n)
2288 resp = not (r is None or r in filtered)
2287 resp = not (r is None or r in filtered)
2289 result.append(resp)
2288 result.append(resp)
2290 return result
2289 return result
2291
2290
2292 def local(self):
2291 def local(self):
2293 return self
2292 return self
2294
2293
2295 def publishing(self):
2294 def publishing(self):
2296 # it's safe (and desirable) to trust the publish flag unconditionally
2295 # it's safe (and desirable) to trust the publish flag unconditionally
2297 # so that we don't finalize changes shared between users via ssh or nfs
2296 # so that we don't finalize changes shared between users via ssh or nfs
2298 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2297 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2299
2298
2300 def cancopy(self):
2299 def cancopy(self):
2301 # so statichttprepo's override of local() works
2300 # so statichttprepo's override of local() works
2302 if not self.local():
2301 if not self.local():
2303 return False
2302 return False
2304 if not self.publishing():
2303 if not self.publishing():
2305 return True
2304 return True
2306 # if publishing we can't copy if there is filtered content
2305 # if publishing we can't copy if there is filtered content
2307 return not self.filtered(b'visible').changelog.filteredrevs
2306 return not self.filtered(b'visible').changelog.filteredrevs
2308
2307
2309 def shared(self):
2308 def shared(self):
2310 '''the type of shared repository (None if not shared)'''
2309 '''the type of shared repository (None if not shared)'''
2311 if self.sharedpath != self.path:
2310 if self.sharedpath != self.path:
2312 return b'store'
2311 return b'store'
2313 return None
2312 return None
2314
2313
2315 def wjoin(self, f: bytes, *insidef: bytes) -> bytes:
2314 def wjoin(self, f: bytes, *insidef: bytes) -> bytes:
2316 return self.vfs.reljoin(self.root, f, *insidef)
2315 return self.vfs.reljoin(self.root, f, *insidef)
2317
2316
2318 def setparents(self, p1, p2=None):
2317 def setparents(self, p1, p2=None):
2319 if p2 is None:
2318 if p2 is None:
2320 p2 = self.nullid
2319 p2 = self.nullid
2321 self[None].setparents(p1, p2)
2320 self[None].setparents(p1, p2)
2322 self._quick_access_changeid_invalidate()
2321 self._quick_access_changeid_invalidate()
2323
2322
2324 def filectx(self, path: bytes, changeid=None, fileid=None, changectx=None):
2323 def filectx(self, path: bytes, changeid=None, fileid=None, changectx=None):
2325 """changeid must be a changeset revision, if specified.
2324 """changeid must be a changeset revision, if specified.
2326 fileid can be a file revision or node."""
2325 fileid can be a file revision or node."""
2327 return context.filectx(
2326 return context.filectx(
2328 self, path, changeid, fileid, changectx=changectx
2327 self, path, changeid, fileid, changectx=changectx
2329 )
2328 )
2330
2329
2331 def getcwd(self) -> bytes:
2330 def getcwd(self) -> bytes:
2332 return self.dirstate.getcwd()
2331 return self.dirstate.getcwd()
2333
2332
2334 def pathto(self, f: bytes, cwd: Optional[bytes] = None) -> bytes:
2333 def pathto(self, f: bytes, cwd: Optional[bytes] = None) -> bytes:
2335 return self.dirstate.pathto(f, cwd)
2334 return self.dirstate.pathto(f, cwd)
2336
2335
2337 def _loadfilter(self, filter):
2336 def _loadfilter(self, filter):
2338 if filter not in self._filterpats:
2337 if filter not in self._filterpats:
2339 l = []
2338 l = []
2340 for pat, cmd in self.ui.configitems(filter):
2339 for pat, cmd in self.ui.configitems(filter):
2341 if cmd == b'!':
2340 if cmd == b'!':
2342 continue
2341 continue
2343 mf = matchmod.match(self.root, b'', [pat])
2342 mf = matchmod.match(self.root, b'', [pat])
2344 fn = None
2343 fn = None
2345 params = cmd
2344 params = cmd
2346 for name, filterfn in self._datafilters.items():
2345 for name, filterfn in self._datafilters.items():
2347 if cmd.startswith(name):
2346 if cmd.startswith(name):
2348 fn = filterfn
2347 fn = filterfn
2349 params = cmd[len(name) :].lstrip()
2348 params = cmd[len(name) :].lstrip()
2350 break
2349 break
2351 if not fn:
2350 if not fn:
2352 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2351 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2353 fn.__name__ = 'commandfilter'
2352 fn.__name__ = 'commandfilter'
2354 # Wrap old filters not supporting keyword arguments
2353 # Wrap old filters not supporting keyword arguments
2355 if not pycompat.getargspec(fn)[2]:
2354 if not pycompat.getargspec(fn)[2]:
2356 oldfn = fn
2355 oldfn = fn
2357 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2356 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2358 fn.__name__ = 'compat-' + oldfn.__name__
2357 fn.__name__ = 'compat-' + oldfn.__name__
2359 l.append((mf, fn, params))
2358 l.append((mf, fn, params))
2360 self._filterpats[filter] = l
2359 self._filterpats[filter] = l
2361 return self._filterpats[filter]
2360 return self._filterpats[filter]
2362
2361
2363 def _filter(self, filterpats, filename, data):
2362 def _filter(self, filterpats, filename, data):
2364 for mf, fn, cmd in filterpats:
2363 for mf, fn, cmd in filterpats:
2365 if mf(filename):
2364 if mf(filename):
2366 self.ui.debug(
2365 self.ui.debug(
2367 b"filtering %s through %s\n"
2366 b"filtering %s through %s\n"
2368 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2367 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2369 )
2368 )
2370 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2369 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2371 break
2370 break
2372
2371
2373 return data
2372 return data
2374
2373
2375 @unfilteredpropertycache
2374 @unfilteredpropertycache
2376 def _encodefilterpats(self):
2375 def _encodefilterpats(self):
2377 return self._loadfilter(b'encode')
2376 return self._loadfilter(b'encode')
2378
2377
2379 @unfilteredpropertycache
2378 @unfilteredpropertycache
2380 def _decodefilterpats(self):
2379 def _decodefilterpats(self):
2381 return self._loadfilter(b'decode')
2380 return self._loadfilter(b'decode')
2382
2381
2383 def adddatafilter(self, name, filter):
2382 def adddatafilter(self, name, filter):
2384 self._datafilters[name] = filter
2383 self._datafilters[name] = filter
2385
2384
2386 def wread(self, filename: bytes) -> bytes:
2385 def wread(self, filename: bytes) -> bytes:
2387 if self.wvfs.islink(filename):
2386 if self.wvfs.islink(filename):
2388 data = self.wvfs.readlink(filename)
2387 data = self.wvfs.readlink(filename)
2389 else:
2388 else:
2390 data = self.wvfs.read(filename)
2389 data = self.wvfs.read(filename)
2391 return self._filter(self._encodefilterpats, filename, data)
2390 return self._filter(self._encodefilterpats, filename, data)
2392
2391
2393 def wwrite(
2392 def wwrite(
2394 self,
2393 self,
2395 filename: bytes,
2394 filename: bytes,
2396 data: bytes,
2395 data: bytes,
2397 flags: bytes,
2396 flags: bytes,
2398 backgroundclose=False,
2397 backgroundclose=False,
2399 **kwargs,
2398 **kwargs,
2400 ) -> int:
2399 ) -> int:
2401 """write ``data`` into ``filename`` in the working directory
2400 """write ``data`` into ``filename`` in the working directory
2402
2401
2403 This returns length of written (maybe decoded) data.
2402 This returns length of written (maybe decoded) data.
2404 """
2403 """
2405 data = self._filter(self._decodefilterpats, filename, data)
2404 data = self._filter(self._decodefilterpats, filename, data)
2406 if b'l' in flags:
2405 if b'l' in flags:
2407 self.wvfs.symlink(data, filename)
2406 self.wvfs.symlink(data, filename)
2408 else:
2407 else:
2409 self.wvfs.write(
2408 self.wvfs.write(
2410 filename, data, backgroundclose=backgroundclose, **kwargs
2409 filename, data, backgroundclose=backgroundclose, **kwargs
2411 )
2410 )
2412 if b'x' in flags:
2411 if b'x' in flags:
2413 self.wvfs.setflags(filename, False, True)
2412 self.wvfs.setflags(filename, False, True)
2414 else:
2413 else:
2415 self.wvfs.setflags(filename, False, False)
2414 self.wvfs.setflags(filename, False, False)
2416 return len(data)
2415 return len(data)
2417
2416
2418 def wwritedata(self, filename: bytes, data: bytes) -> bytes:
2417 def wwritedata(self, filename: bytes, data: bytes) -> bytes:
2419 return self._filter(self._decodefilterpats, filename, data)
2418 return self._filter(self._decodefilterpats, filename, data)
2420
2419
2421 def currenttransaction(self):
2420 def currenttransaction(self):
2422 """return the current transaction or None if non exists"""
2421 """return the current transaction or None if non exists"""
2423 if self._transref:
2422 if self._transref:
2424 tr = self._transref()
2423 tr = self._transref()
2425 else:
2424 else:
2426 tr = None
2425 tr = None
2427
2426
2428 if tr and tr.running():
2427 if tr and tr.running():
2429 return tr
2428 return tr
2430 return None
2429 return None
2431
2430
2432 def transaction(self, desc, report=None):
2431 def transaction(self, desc, report=None):
2433 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2432 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2434 b'devel', b'check-locks'
2433 b'devel', b'check-locks'
2435 ):
2434 ):
2436 if self._currentlock(self._lockref) is None:
2435 if self._currentlock(self._lockref) is None:
2437 raise error.ProgrammingError(b'transaction requires locking')
2436 raise error.ProgrammingError(b'transaction requires locking')
2438 tr = self.currenttransaction()
2437 tr = self.currenttransaction()
2439 if tr is not None:
2438 if tr is not None:
2440 return tr.nest(name=desc)
2439 return tr.nest(name=desc)
2441
2440
2442 # abort here if the journal already exists
2441 # abort here if the journal already exists
2443 if self.svfs.exists(b"journal"):
2442 if self.svfs.exists(b"journal"):
2444 raise error.RepoError(
2443 raise error.RepoError(
2445 _(b"abandoned transaction found"),
2444 _(b"abandoned transaction found"),
2446 hint=_(b"run 'hg recover' to clean up transaction"),
2445 hint=_(b"run 'hg recover' to clean up transaction"),
2447 )
2446 )
2448
2447
2449 # At that point your dirstate should be clean:
2448 # At that point your dirstate should be clean:
2450 #
2449 #
2451 # - If you don't have the wlock, why would you still have a dirty
2450 # - If you don't have the wlock, why would you still have a dirty
2452 # dirstate ?
2451 # dirstate ?
2453 #
2452 #
2454 # - If you hold the wlock, you should not be opening a transaction in
2453 # - If you hold the wlock, you should not be opening a transaction in
2455 # the middle of a `distate.changing_*` block. The transaction needs to
2454 # the middle of a `distate.changing_*` block. The transaction needs to
2456 # be open before that and wrap the change-context.
2455 # be open before that and wrap the change-context.
2457 #
2456 #
2458 # - If you are not within a `dirstate.changing_*` context, why is our
2457 # - If you are not within a `dirstate.changing_*` context, why is our
2459 # dirstate dirty?
2458 # dirstate dirty?
2460 if self.dirstate._dirty:
2459 if self.dirstate._dirty:
2461 m = "cannot open a transaction with a dirty dirstate"
2460 m = "cannot open a transaction with a dirty dirstate"
2462 raise error.ProgrammingError(m)
2461 raise error.ProgrammingError(m)
2463
2462
2464 idbase = b"%.40f#%f" % (random.random(), time.time())
2463 idbase = b"%.40f#%f" % (random.random(), time.time())
2465 ha = hex(hashutil.sha1(idbase).digest())
2464 ha = hex(hashutil.sha1(idbase).digest())
2466 txnid = b'TXN:' + ha
2465 txnid = b'TXN:' + ha
2467 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2466 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2468
2467
2469 self._writejournal(desc)
2468 self._writejournal(desc)
2470 if report:
2469 if report:
2471 rp = report
2470 rp = report
2472 else:
2471 else:
2473 rp = self.ui.warn
2472 rp = self.ui.warn
2474 vfsmap = self.vfs_map
2473 vfsmap = self.vfs_map
2475 # we must avoid cyclic reference between repo and transaction.
2474 # we must avoid cyclic reference between repo and transaction.
2476 reporef = weakref.ref(self)
2475 reporef = weakref.ref(self)
2477 # Code to track tag movement
2476 # Code to track tag movement
2478 #
2477 #
2479 # Since tags are all handled as file content, it is actually quite hard
2478 # Since tags are all handled as file content, it is actually quite hard
2480 # to track these movement from a code perspective. So we fallback to a
2479 # to track these movement from a code perspective. So we fallback to a
2481 # tracking at the repository level. One could envision to track changes
2480 # tracking at the repository level. One could envision to track changes
2482 # to the '.hgtags' file through changegroup apply but that fails to
2481 # to the '.hgtags' file through changegroup apply but that fails to
2483 # cope with case where transaction expose new heads without changegroup
2482 # cope with case where transaction expose new heads without changegroup
2484 # being involved (eg: phase movement).
2483 # being involved (eg: phase movement).
2485 #
2484 #
2486 # For now, We gate the feature behind a flag since this likely comes
2485 # For now, We gate the feature behind a flag since this likely comes
2487 # with performance impacts. The current code run more often than needed
2486 # with performance impacts. The current code run more often than needed
2488 # and do not use caches as much as it could. The current focus is on
2487 # and do not use caches as much as it could. The current focus is on
2489 # the behavior of the feature so we disable it by default. The flag
2488 # the behavior of the feature so we disable it by default. The flag
2490 # will be removed when we are happy with the performance impact.
2489 # will be removed when we are happy with the performance impact.
2491 #
2490 #
2492 # Once this feature is no longer experimental move the following
2491 # Once this feature is no longer experimental move the following
2493 # documentation to the appropriate help section:
2492 # documentation to the appropriate help section:
2494 #
2493 #
2495 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2494 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2496 # tags (new or changed or deleted tags). In addition the details of
2495 # tags (new or changed or deleted tags). In addition the details of
2497 # these changes are made available in a file at:
2496 # these changes are made available in a file at:
2498 # ``REPOROOT/.hg/changes/tags.changes``.
2497 # ``REPOROOT/.hg/changes/tags.changes``.
2499 # Make sure you check for HG_TAG_MOVED before reading that file as it
2498 # Make sure you check for HG_TAG_MOVED before reading that file as it
2500 # might exist from a previous transaction even if no tag were touched
2499 # might exist from a previous transaction even if no tag were touched
2501 # in this one. Changes are recorded in a line base format::
2500 # in this one. Changes are recorded in a line base format::
2502 #
2501 #
2503 # <action> <hex-node> <tag-name>\n
2502 # <action> <hex-node> <tag-name>\n
2504 #
2503 #
2505 # Actions are defined as follow:
2504 # Actions are defined as follow:
2506 # "-R": tag is removed,
2505 # "-R": tag is removed,
2507 # "+A": tag is added,
2506 # "+A": tag is added,
2508 # "-M": tag is moved (old value),
2507 # "-M": tag is moved (old value),
2509 # "+M": tag is moved (new value),
2508 # "+M": tag is moved (new value),
2510 tracktags = lambda x: None
2509 tracktags = lambda x: None
2511 # experimental config: experimental.hook-track-tags
2510 # experimental config: experimental.hook-track-tags
2512 shouldtracktags = self.ui.configbool(
2511 shouldtracktags = self.ui.configbool(
2513 b'experimental', b'hook-track-tags'
2512 b'experimental', b'hook-track-tags'
2514 )
2513 )
2515 if desc != b'strip' and shouldtracktags:
2514 if desc != b'strip' and shouldtracktags:
2516 oldheads = self.changelog.headrevs()
2515 oldheads = self.changelog.headrevs()
2517
2516
2518 def tracktags(tr2):
2517 def tracktags(tr2):
2519 repo = reporef()
2518 repo = reporef()
2520 assert repo is not None # help pytype
2519 assert repo is not None # help pytype
2521 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2520 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2522 newheads = repo.changelog.headrevs()
2521 newheads = repo.changelog.headrevs()
2523 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2522 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2524 # notes: we compare lists here.
2523 # notes: we compare lists here.
2525 # As we do it only once buiding set would not be cheaper
2524 # As we do it only once buiding set would not be cheaper
2526 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2525 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2527 if changes:
2526 if changes:
2528 tr2.hookargs[b'tag_moved'] = b'1'
2527 tr2.hookargs[b'tag_moved'] = b'1'
2529 with repo.vfs(
2528 with repo.vfs(
2530 b'changes/tags.changes', b'w', atomictemp=True
2529 b'changes/tags.changes', b'w', atomictemp=True
2531 ) as changesfile:
2530 ) as changesfile:
2532 # note: we do not register the file to the transaction
2531 # note: we do not register the file to the transaction
2533 # because we needs it to still exist on the transaction
2532 # because we needs it to still exist on the transaction
2534 # is close (for txnclose hooks)
2533 # is close (for txnclose hooks)
2535 tagsmod.writediff(changesfile, changes)
2534 tagsmod.writediff(changesfile, changes)
2536
2535
2537 def validate(tr2):
2536 def validate(tr2):
2538 """will run pre-closing hooks"""
2537 """will run pre-closing hooks"""
2539 # XXX the transaction API is a bit lacking here so we take a hacky
2538 # XXX the transaction API is a bit lacking here so we take a hacky
2540 # path for now
2539 # path for now
2541 #
2540 #
2542 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2541 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2543 # dict is copied before these run. In addition we needs the data
2542 # dict is copied before these run. In addition we needs the data
2544 # available to in memory hooks too.
2543 # available to in memory hooks too.
2545 #
2544 #
2546 # Moreover, we also need to make sure this runs before txnclose
2545 # Moreover, we also need to make sure this runs before txnclose
2547 # hooks and there is no "pending" mechanism that would execute
2546 # hooks and there is no "pending" mechanism that would execute
2548 # logic only if hooks are about to run.
2547 # logic only if hooks are about to run.
2549 #
2548 #
2550 # Fixing this limitation of the transaction is also needed to track
2549 # Fixing this limitation of the transaction is also needed to track
2551 # other families of changes (bookmarks, phases, obsolescence).
2550 # other families of changes (bookmarks, phases, obsolescence).
2552 #
2551 #
2553 # This will have to be fixed before we remove the experimental
2552 # This will have to be fixed before we remove the experimental
2554 # gating.
2553 # gating.
2555 tracktags(tr2)
2554 tracktags(tr2)
2556 repo = reporef()
2555 repo = reporef()
2557 assert repo is not None # help pytype
2556 assert repo is not None # help pytype
2558
2557
2559 singleheadopt = (b'experimental', b'single-head-per-branch')
2558 singleheadopt = (b'experimental', b'single-head-per-branch')
2560 singlehead = repo.ui.configbool(*singleheadopt)
2559 singlehead = repo.ui.configbool(*singleheadopt)
2561 if singlehead:
2560 if singlehead:
2562 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2561 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2563 accountclosed = singleheadsub.get(
2562 accountclosed = singleheadsub.get(
2564 b"account-closed-heads", False
2563 b"account-closed-heads", False
2565 )
2564 )
2566 if singleheadsub.get(b"public-changes-only", False):
2565 if singleheadsub.get(b"public-changes-only", False):
2567 filtername = b"immutable"
2566 filtername = b"immutable"
2568 else:
2567 else:
2569 filtername = b"visible"
2568 filtername = b"visible"
2570 scmutil.enforcesinglehead(
2569 scmutil.enforcesinglehead(
2571 repo, tr2, desc, accountclosed, filtername
2570 repo, tr2, desc, accountclosed, filtername
2572 )
2571 )
2573 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2572 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2574 for name, (old, new) in sorted(
2573 for name, (old, new) in sorted(
2575 tr.changes[b'bookmarks'].items()
2574 tr.changes[b'bookmarks'].items()
2576 ):
2575 ):
2577 args = tr.hookargs.copy()
2576 args = tr.hookargs.copy()
2578 args.update(bookmarks.preparehookargs(name, old, new))
2577 args.update(bookmarks.preparehookargs(name, old, new))
2579 repo.hook(
2578 repo.hook(
2580 b'pretxnclose-bookmark',
2579 b'pretxnclose-bookmark',
2581 throw=True,
2580 throw=True,
2582 **pycompat.strkwargs(args),
2581 **pycompat.strkwargs(args),
2583 )
2582 )
2584 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2583 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2585 cl = repo.unfiltered().changelog
2584 cl = repo.unfiltered().changelog
2586 for revs, (old, new) in tr.changes[b'phases']:
2585 for revs, (old, new) in tr.changes[b'phases']:
2587 for rev in revs:
2586 for rev in revs:
2588 args = tr.hookargs.copy()
2587 args = tr.hookargs.copy()
2589 node = hex(cl.node(rev))
2588 node = hex(cl.node(rev))
2590 args.update(phases.preparehookargs(node, old, new))
2589 args.update(phases.preparehookargs(node, old, new))
2591 repo.hook(
2590 repo.hook(
2592 b'pretxnclose-phase',
2591 b'pretxnclose-phase',
2593 throw=True,
2592 throw=True,
2594 **pycompat.strkwargs(args),
2593 **pycompat.strkwargs(args),
2595 )
2594 )
2596
2595
2597 repo.hook(
2596 repo.hook(
2598 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2597 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2599 )
2598 )
2600
2599
2601 def releasefn(tr, success):
2600 def releasefn(tr, success):
2602 repo = reporef()
2601 repo = reporef()
2603 if repo is None:
2602 if repo is None:
2604 # If the repo has been GC'd (and this release function is being
2603 # If the repo has been GC'd (and this release function is being
2605 # called from transaction.__del__), there's not much we can do,
2604 # called from transaction.__del__), there's not much we can do,
2606 # so just leave the unfinished transaction there and let the
2605 # so just leave the unfinished transaction there and let the
2607 # user run `hg recover`.
2606 # user run `hg recover`.
2608 return
2607 return
2609 if success:
2608 if success:
2610 # this should be explicitly invoked here, because
2609 # this should be explicitly invoked here, because
2611 # in-memory changes aren't written out at closing
2610 # in-memory changes aren't written out at closing
2612 # transaction, if tr.addfilegenerator (via
2611 # transaction, if tr.addfilegenerator (via
2613 # dirstate.write or so) isn't invoked while
2612 # dirstate.write or so) isn't invoked while
2614 # transaction running
2613 # transaction running
2615 repo.dirstate.write(None)
2614 repo.dirstate.write(None)
2616 else:
2615 else:
2617 # discard all changes (including ones already written
2616 # discard all changes (including ones already written
2618 # out) in this transaction
2617 # out) in this transaction
2619 repo.invalidate(clearfilecache=True)
2618 repo.invalidate(clearfilecache=True)
2620
2619
2621 tr = transaction.transaction(
2620 tr = transaction.transaction(
2622 rp,
2621 rp,
2623 self.svfs,
2622 self.svfs,
2624 vfsmap,
2623 vfsmap,
2625 b"journal",
2624 b"journal",
2626 b"undo",
2625 b"undo",
2627 lambda: None,
2626 lambda: None,
2628 self.store.createmode,
2627 self.store.createmode,
2629 validator=validate,
2628 validator=validate,
2630 releasefn=releasefn,
2629 releasefn=releasefn,
2631 checkambigfiles=_cachedfiles,
2630 checkambigfiles=_cachedfiles,
2632 name=desc,
2631 name=desc,
2633 )
2632 )
2634 for vfs_id, path in self._journalfiles():
2633 for vfs_id, path in self._journalfiles():
2635 tr.add_journal(vfs_id, path)
2634 tr.add_journal(vfs_id, path)
2636 tr.changes[b'origrepolen'] = len(self)
2635 tr.changes[b'origrepolen'] = len(self)
2637 tr.changes[b'obsmarkers'] = set()
2636 tr.changes[b'obsmarkers'] = set()
2638 tr.changes[b'phases'] = []
2637 tr.changes[b'phases'] = []
2639 tr.changes[b'bookmarks'] = {}
2638 tr.changes[b'bookmarks'] = {}
2640
2639
2641 tr.hookargs[b'txnid'] = txnid
2640 tr.hookargs[b'txnid'] = txnid
2642 tr.hookargs[b'txnname'] = desc
2641 tr.hookargs[b'txnname'] = desc
2643 tr.hookargs[b'changes'] = tr.changes
2642 tr.hookargs[b'changes'] = tr.changes
2644 # note: writing the fncache only during finalize mean that the file is
2643 # note: writing the fncache only during finalize mean that the file is
2645 # outdated when running hooks. As fncache is used for streaming clone,
2644 # outdated when running hooks. As fncache is used for streaming clone,
2646 # this is not expected to break anything that happen during the hooks.
2645 # this is not expected to break anything that happen during the hooks.
2647 tr.addfinalize(b'flush-fncache', self.store.write)
2646 tr.addfinalize(b'flush-fncache', self.store.write)
2648
2647
2649 def txnclosehook(tr2):
2648 def txnclosehook(tr2):
2650 """To be run if transaction is successful, will schedule a hook run"""
2649 """To be run if transaction is successful, will schedule a hook run"""
2651 # Don't reference tr2 in hook() so we don't hold a reference.
2650 # Don't reference tr2 in hook() so we don't hold a reference.
2652 # This reduces memory consumption when there are multiple
2651 # This reduces memory consumption when there are multiple
2653 # transactions per lock. This can likely go away if issue5045
2652 # transactions per lock. This can likely go away if issue5045
2654 # fixes the function accumulation.
2653 # fixes the function accumulation.
2655 hookargs = tr2.hookargs
2654 hookargs = tr2.hookargs
2656
2655
2657 def hookfunc(unused_success):
2656 def hookfunc(unused_success):
2658 repo = reporef()
2657 repo = reporef()
2659 assert repo is not None # help pytype
2658 assert repo is not None # help pytype
2660
2659
2661 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2660 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2662 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2661 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2663 for name, (old, new) in bmchanges:
2662 for name, (old, new) in bmchanges:
2664 args = tr.hookargs.copy()
2663 args = tr.hookargs.copy()
2665 args.update(bookmarks.preparehookargs(name, old, new))
2664 args.update(bookmarks.preparehookargs(name, old, new))
2666 repo.hook(
2665 repo.hook(
2667 b'txnclose-bookmark',
2666 b'txnclose-bookmark',
2668 throw=False,
2667 throw=False,
2669 **pycompat.strkwargs(args),
2668 **pycompat.strkwargs(args),
2670 )
2669 )
2671
2670
2672 if hook.hashook(repo.ui, b'txnclose-phase'):
2671 if hook.hashook(repo.ui, b'txnclose-phase'):
2673 cl = repo.unfiltered().changelog
2672 cl = repo.unfiltered().changelog
2674 phasemv = sorted(
2673 phasemv = sorted(
2675 tr.changes[b'phases'], key=lambda r: r[0][0]
2674 tr.changes[b'phases'], key=lambda r: r[0][0]
2676 )
2675 )
2677 for revs, (old, new) in phasemv:
2676 for revs, (old, new) in phasemv:
2678 for rev in revs:
2677 for rev in revs:
2679 args = tr.hookargs.copy()
2678 args = tr.hookargs.copy()
2680 node = hex(cl.node(rev))
2679 node = hex(cl.node(rev))
2681 args.update(phases.preparehookargs(node, old, new))
2680 args.update(phases.preparehookargs(node, old, new))
2682 repo.hook(
2681 repo.hook(
2683 b'txnclose-phase',
2682 b'txnclose-phase',
2684 throw=False,
2683 throw=False,
2685 **pycompat.strkwargs(args),
2684 **pycompat.strkwargs(args),
2686 )
2685 )
2687
2686
2688 repo.hook(
2687 repo.hook(
2689 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2688 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2690 )
2689 )
2691
2690
2692 repo = reporef()
2691 repo = reporef()
2693 assert repo is not None # help pytype
2692 assert repo is not None # help pytype
2694 repo._afterlock(hookfunc)
2693 repo._afterlock(hookfunc)
2695
2694
2696 tr.addfinalize(b'txnclose-hook', txnclosehook)
2695 tr.addfinalize(b'txnclose-hook', txnclosehook)
2697 # Include a leading "-" to make it happen before the transaction summary
2696 # Include a leading "-" to make it happen before the transaction summary
2698 # reports registered via scmutil.registersummarycallback() whose names
2697 # reports registered via scmutil.registersummarycallback() whose names
2699 # are 00-txnreport etc. That way, the caches will be warm when the
2698 # are 00-txnreport etc. That way, the caches will be warm when the
2700 # callbacks run.
2699 # callbacks run.
2701 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2700 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2702
2701
2703 def txnaborthook(tr2):
2702 def txnaborthook(tr2):
2704 """To be run if transaction is aborted"""
2703 """To be run if transaction is aborted"""
2705 repo = reporef()
2704 repo = reporef()
2706 assert repo is not None # help pytype
2705 assert repo is not None # help pytype
2707 repo.hook(
2706 repo.hook(
2708 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2707 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2709 )
2708 )
2710
2709
2711 tr.addabort(b'txnabort-hook', txnaborthook)
2710 tr.addabort(b'txnabort-hook', txnaborthook)
2712 # avoid eager cache invalidation. in-memory data should be identical
2711 # avoid eager cache invalidation. in-memory data should be identical
2713 # to stored data if transaction has no error.
2712 # to stored data if transaction has no error.
2714 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2713 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2715 self._transref = weakref.ref(tr)
2714 self._transref = weakref.ref(tr)
2716 scmutil.registersummarycallback(self, tr, desc)
2715 scmutil.registersummarycallback(self, tr, desc)
2717 # This only exist to deal with the need of rollback to have viable
2716 # This only exist to deal with the need of rollback to have viable
2718 # parents at the end of the operation. So backup viable parents at the
2717 # parents at the end of the operation. So backup viable parents at the
2719 # time of this operation.
2718 # time of this operation.
2720 #
2719 #
2721 # We only do it when the `wlock` is taken, otherwise other might be
2720 # We only do it when the `wlock` is taken, otherwise other might be
2722 # altering the dirstate under us.
2721 # altering the dirstate under us.
2723 #
2722 #
2724 # This is really not a great way to do this (first, because we cannot
2723 # This is really not a great way to do this (first, because we cannot
2725 # always do it). There are more viable alternative that exists
2724 # always do it). There are more viable alternative that exists
2726 #
2725 #
2727 # - backing only the working copy parent in a dedicated files and doing
2726 # - backing only the working copy parent in a dedicated files and doing
2728 # a clean "keep-update" to them on `hg rollback`.
2727 # a clean "keep-update" to them on `hg rollback`.
2729 #
2728 #
2730 # - slightly changing the behavior an applying a logic similar to "hg
2729 # - slightly changing the behavior an applying a logic similar to "hg
2731 # strip" to pick a working copy destination on `hg rollback`
2730 # strip" to pick a working copy destination on `hg rollback`
2732 if self.currentwlock() is not None:
2731 if self.currentwlock() is not None:
2733 ds = self.dirstate
2732 ds = self.dirstate
2734 if not self.vfs.exists(b'branch'):
2733 if not self.vfs.exists(b'branch'):
2735 # force a file to be written if None exist
2734 # force a file to be written if None exist
2736 ds.setbranch(b'default', None)
2735 ds.setbranch(b'default', None)
2737
2736
2738 def backup_dirstate(tr):
2737 def backup_dirstate(tr):
2739 for f in ds.all_file_names():
2738 for f in ds.all_file_names():
2740 # hardlink backup is okay because `dirstate` is always
2739 # hardlink backup is okay because `dirstate` is always
2741 # atomically written and possible data file are append only
2740 # atomically written and possible data file are append only
2742 # and resistant to trailing data.
2741 # and resistant to trailing data.
2743 tr.addbackup(f, hardlink=True, location=b'plain')
2742 tr.addbackup(f, hardlink=True, location=b'plain')
2744
2743
2745 tr.addvalidator(b'dirstate-backup', backup_dirstate)
2744 tr.addvalidator(b'dirstate-backup', backup_dirstate)
2746 return tr
2745 return tr
2747
2746
2748 def _journalfiles(self):
2747 def _journalfiles(self):
2749 return (
2748 return (
2750 (self.svfs, b'journal'),
2749 (self.svfs, b'journal'),
2751 (self.vfs, b'journal.desc'),
2750 (self.vfs, b'journal.desc'),
2752 )
2751 )
2753
2752
2754 def undofiles(self):
2753 def undofiles(self):
2755 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2754 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2756
2755
2757 @unfilteredmethod
2756 @unfilteredmethod
2758 def _writejournal(self, desc):
2757 def _writejournal(self, desc):
2759 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2758 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2760
2759
2761 def recover(self):
2760 def recover(self):
2762 with self.lock():
2761 with self.lock():
2763 if self.svfs.exists(b"journal"):
2762 if self.svfs.exists(b"journal"):
2764 self.ui.status(_(b"rolling back interrupted transaction\n"))
2763 self.ui.status(_(b"rolling back interrupted transaction\n"))
2765 vfsmap = self.vfs_map
2764 vfsmap = self.vfs_map
2766 transaction.rollback(
2765 transaction.rollback(
2767 self.svfs,
2766 self.svfs,
2768 vfsmap,
2767 vfsmap,
2769 b"journal",
2768 b"journal",
2770 self.ui.warn,
2769 self.ui.warn,
2771 checkambigfiles=_cachedfiles,
2770 checkambigfiles=_cachedfiles,
2772 )
2771 )
2773 self.invalidate()
2772 self.invalidate()
2774 return True
2773 return True
2775 else:
2774 else:
2776 self.ui.warn(_(b"no interrupted transaction available\n"))
2775 self.ui.warn(_(b"no interrupted transaction available\n"))
2777 return False
2776 return False
2778
2777
2779 def rollback(self, dryrun=False, force=False):
2778 def rollback(self, dryrun=False, force=False):
2780 wlock = lock = None
2779 wlock = lock = None
2781 try:
2780 try:
2782 wlock = self.wlock()
2781 wlock = self.wlock()
2783 lock = self.lock()
2782 lock = self.lock()
2784 if self.svfs.exists(b"undo"):
2783 if self.svfs.exists(b"undo"):
2785 return self._rollback(dryrun, force)
2784 return self._rollback(dryrun, force)
2786 else:
2785 else:
2787 self.ui.warn(_(b"no rollback information available\n"))
2786 self.ui.warn(_(b"no rollback information available\n"))
2788 return 1
2787 return 1
2789 finally:
2788 finally:
2790 release(lock, wlock)
2789 release(lock, wlock)
2791
2790
2792 @unfilteredmethod # Until we get smarter cache management
2791 @unfilteredmethod # Until we get smarter cache management
2793 def _rollback(self, dryrun, force):
2792 def _rollback(self, dryrun, force):
2794 ui = self.ui
2793 ui = self.ui
2795
2794
2796 parents = self.dirstate.parents()
2795 parents = self.dirstate.parents()
2797 try:
2796 try:
2798 args = self.vfs.read(b'undo.desc').splitlines()
2797 args = self.vfs.read(b'undo.desc').splitlines()
2799 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2798 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2800 if len(args) >= 3:
2799 if len(args) >= 3:
2801 detail = args[2]
2800 detail = args[2]
2802 oldtip = oldlen - 1
2801 oldtip = oldlen - 1
2803
2802
2804 if detail and ui.verbose:
2803 if detail and ui.verbose:
2805 msg = _(
2804 msg = _(
2806 b'repository tip rolled back to revision %d'
2805 b'repository tip rolled back to revision %d'
2807 b' (undo %s: %s)\n'
2806 b' (undo %s: %s)\n'
2808 ) % (oldtip, desc, detail)
2807 ) % (oldtip, desc, detail)
2809 else:
2808 else:
2810 msg = _(
2809 msg = _(
2811 b'repository tip rolled back to revision %d (undo %s)\n'
2810 b'repository tip rolled back to revision %d (undo %s)\n'
2812 ) % (oldtip, desc)
2811 ) % (oldtip, desc)
2813 parentgone = any(self[p].rev() > oldtip for p in parents)
2812 parentgone = any(self[p].rev() > oldtip for p in parents)
2814 except IOError:
2813 except IOError:
2815 msg = _(b'rolling back unknown transaction\n')
2814 msg = _(b'rolling back unknown transaction\n')
2816 desc = None
2815 desc = None
2817 parentgone = True
2816 parentgone = True
2818
2817
2819 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2818 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2820 raise error.Abort(
2819 raise error.Abort(
2821 _(
2820 _(
2822 b'rollback of last commit while not checked out '
2821 b'rollback of last commit while not checked out '
2823 b'may lose data'
2822 b'may lose data'
2824 ),
2823 ),
2825 hint=_(b'use -f to force'),
2824 hint=_(b'use -f to force'),
2826 )
2825 )
2827
2826
2828 ui.status(msg)
2827 ui.status(msg)
2829 if dryrun:
2828 if dryrun:
2830 return 0
2829 return 0
2831
2830
2832 self.destroying()
2831 self.destroying()
2833 vfsmap = self.vfs_map
2832 vfsmap = self.vfs_map
2834 skip_journal_pattern = None
2833 skip_journal_pattern = None
2835 if not parentgone:
2834 if not parentgone:
2836 skip_journal_pattern = RE_SKIP_DIRSTATE_ROLLBACK
2835 skip_journal_pattern = RE_SKIP_DIRSTATE_ROLLBACK
2837 transaction.rollback(
2836 transaction.rollback(
2838 self.svfs,
2837 self.svfs,
2839 vfsmap,
2838 vfsmap,
2840 b'undo',
2839 b'undo',
2841 ui.warn,
2840 ui.warn,
2842 checkambigfiles=_cachedfiles,
2841 checkambigfiles=_cachedfiles,
2843 skip_journal_pattern=skip_journal_pattern,
2842 skip_journal_pattern=skip_journal_pattern,
2844 )
2843 )
2845 self.invalidate()
2844 self.invalidate()
2846 self.dirstate.invalidate()
2845 self.dirstate.invalidate()
2847
2846
2848 if parentgone:
2847 if parentgone:
2849 # replace this with some explicit parent update in the future.
2848 # replace this with some explicit parent update in the future.
2850 has_node = self.changelog.index.has_node
2849 has_node = self.changelog.index.has_node
2851 if not all(has_node(p) for p in self.dirstate._pl):
2850 if not all(has_node(p) for p in self.dirstate._pl):
2852 # There was no dirstate to backup initially, we need to drop
2851 # There was no dirstate to backup initially, we need to drop
2853 # the existing one.
2852 # the existing one.
2854 with self.dirstate.changing_parents(self):
2853 with self.dirstate.changing_parents(self):
2855 self.dirstate.setparents(self.nullid)
2854 self.dirstate.setparents(self.nullid)
2856 self.dirstate.clear()
2855 self.dirstate.clear()
2857
2856
2858 parents = tuple([p.rev() for p in self[None].parents()])
2857 parents = tuple([p.rev() for p in self[None].parents()])
2859 if len(parents) > 1:
2858 if len(parents) > 1:
2860 ui.status(
2859 ui.status(
2861 _(
2860 _(
2862 b'working directory now based on '
2861 b'working directory now based on '
2863 b'revisions %d and %d\n'
2862 b'revisions %d and %d\n'
2864 )
2863 )
2865 % parents
2864 % parents
2866 )
2865 )
2867 else:
2866 else:
2868 ui.status(
2867 ui.status(
2869 _(b'working directory now based on revision %d\n') % parents
2868 _(b'working directory now based on revision %d\n') % parents
2870 )
2869 )
2871 mergestatemod.mergestate.clean(self)
2870 mergestatemod.mergestate.clean(self)
2872
2871
2873 # TODO: if we know which new heads may result from this rollback, pass
2872 # TODO: if we know which new heads may result from this rollback, pass
2874 # them to destroy(), which will prevent the branchhead cache from being
2873 # them to destroy(), which will prevent the branchhead cache from being
2875 # invalidated.
2874 # invalidated.
2876 self.destroyed()
2875 self.destroyed()
2877 return 0
2876 return 0
2878
2877
2879 def _buildcacheupdater(self, newtransaction):
2878 def _buildcacheupdater(self, newtransaction):
2880 """called during transaction to build the callback updating cache
2879 """called during transaction to build the callback updating cache
2881
2880
2882 Lives on the repository to help extension who might want to augment
2881 Lives on the repository to help extension who might want to augment
2883 this logic. For this purpose, the created transaction is passed to the
2882 this logic. For this purpose, the created transaction is passed to the
2884 method.
2883 method.
2885 """
2884 """
2886 # we must avoid cyclic reference between repo and transaction.
2885 # we must avoid cyclic reference between repo and transaction.
2887 reporef = weakref.ref(self)
2886 reporef = weakref.ref(self)
2888
2887
2889 def updater(tr):
2888 def updater(tr):
2890 repo = reporef()
2889 repo = reporef()
2891 assert repo is not None # help pytype
2890 assert repo is not None # help pytype
2892 repo.updatecaches(tr)
2891 repo.updatecaches(tr)
2893
2892
2894 return updater
2893 return updater
2895
2894
2896 @unfilteredmethod
2895 @unfilteredmethod
2897 def updatecaches(self, tr=None, full=False, caches=None):
2896 def updatecaches(self, tr=None, full=False, caches=None):
2898 """warm appropriate caches
2897 """warm appropriate caches
2899
2898
2900 If this function is called after a transaction closed. The transaction
2899 If this function is called after a transaction closed. The transaction
2901 will be available in the 'tr' argument. This can be used to selectively
2900 will be available in the 'tr' argument. This can be used to selectively
2902 update caches relevant to the changes in that transaction.
2901 update caches relevant to the changes in that transaction.
2903
2902
2904 If 'full' is set, make sure all caches the function knows about have
2903 If 'full' is set, make sure all caches the function knows about have
2905 up-to-date data. Even the ones usually loaded more lazily.
2904 up-to-date data. Even the ones usually loaded more lazily.
2906
2905
2907 The `full` argument can take a special "post-clone" value. In this case
2906 The `full` argument can take a special "post-clone" value. In this case
2908 the cache warming is made after a clone and of the slower cache might
2907 the cache warming is made after a clone and of the slower cache might
2909 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2908 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2910 as we plan for a cleaner way to deal with this for 5.9.
2909 as we plan for a cleaner way to deal with this for 5.9.
2911 """
2910 """
2912 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2911 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2913 # During strip, many caches are invalid but
2912 # During strip, many caches are invalid but
2914 # later call to `destroyed` will refresh them.
2913 # later call to `destroyed` will refresh them.
2915 return
2914 return
2916
2915
2917 unfi = self.unfiltered()
2916 unfi = self.unfiltered()
2918
2917
2919 if caches is None:
2918 if caches is None:
2920 caches = repository.CACHES_DEFAULT
2919 caches = repository.CACHES_DEFAULT
2921
2920
2922 if repository.CACHE_BRANCHMAP_SERVED in caches:
2921 if repository.CACHE_BRANCHMAP_SERVED in caches:
2923 if tr is None or tr.changes[b'origrepolen'] < len(self):
2922 if tr is None or tr.changes[b'origrepolen'] < len(self):
2924 self.ui.debug(b'updating the branch cache\n')
2923 self.ui.debug(b'updating the branch cache\n')
2925 dpt = repository.CACHE_BRANCHMAP_DETECT_PURE_TOPO in caches
2924 dpt = repository.CACHE_BRANCHMAP_DETECT_PURE_TOPO in caches
2926 served = self.filtered(b'served')
2925 served = self.filtered(b'served')
2927 self._branchcaches.update_disk(served, detect_pure_topo=dpt)
2926 self._branchcaches.update_disk(served, detect_pure_topo=dpt)
2928 served_hidden = self.filtered(b'served.hidden')
2927 served_hidden = self.filtered(b'served.hidden')
2929 self._branchcaches.update_disk(
2928 self._branchcaches.update_disk(
2930 served_hidden, detect_pure_topo=dpt
2929 served_hidden, detect_pure_topo=dpt
2931 )
2930 )
2932
2931
2933 if repository.CACHE_CHANGELOG_CACHE in caches:
2932 if repository.CACHE_CHANGELOG_CACHE in caches:
2934 self.changelog.update_caches(transaction=tr)
2933 self.changelog.update_caches(transaction=tr)
2935
2934
2936 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2935 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2937 self.manifestlog.update_caches(transaction=tr)
2936 self.manifestlog.update_caches(transaction=tr)
2938 for entry in self.store.walk():
2937 for entry in self.store.walk():
2939 if not entry.is_revlog:
2938 if not entry.is_revlog:
2940 continue
2939 continue
2941 if not entry.is_manifestlog:
2940 if not entry.is_manifestlog:
2942 continue
2941 continue
2943 manifestrevlog = entry.get_revlog_instance(self).get_revlog()
2942 manifestrevlog = entry.get_revlog_instance(self).get_revlog()
2944 if manifestrevlog is not None:
2943 if manifestrevlog is not None:
2945 manifestrevlog.update_caches(transaction=tr)
2944 manifestrevlog.update_caches(transaction=tr)
2946
2945
2947 if repository.CACHE_REV_BRANCH in caches:
2946 if repository.CACHE_REV_BRANCH in caches:
2948 rbc = unfi.revbranchcache()
2947 rbc = unfi.revbranchcache()
2949 for r in unfi.changelog:
2948 for r in unfi.changelog:
2950 rbc.branchinfo(r)
2949 rbc.branchinfo(r)
2951 rbc.write()
2950 rbc.write()
2952
2951
2953 if repository.CACHE_FULL_MANIFEST in caches:
2952 if repository.CACHE_FULL_MANIFEST in caches:
2954 # ensure the working copy parents are in the manifestfulltextcache
2953 # ensure the working copy parents are in the manifestfulltextcache
2955 for ctx in self[b'.'].parents():
2954 for ctx in self[b'.'].parents():
2956 ctx.manifest() # accessing the manifest is enough
2955 ctx.manifest() # accessing the manifest is enough
2957
2956
2958 if repository.CACHE_FILE_NODE_TAGS in caches:
2957 if repository.CACHE_FILE_NODE_TAGS in caches:
2959 # accessing fnode cache warms the cache
2958 # accessing fnode cache warms the cache
2960 tagsmod.warm_cache(self)
2959 tagsmod.warm_cache(self)
2961
2960
2962 if repository.CACHE_TAGS_DEFAULT in caches:
2961 if repository.CACHE_TAGS_DEFAULT in caches:
2963 # accessing tags warm the cache
2962 # accessing tags warm the cache
2964 self.tags()
2963 self.tags()
2965 if repository.CACHE_TAGS_SERVED in caches:
2964 if repository.CACHE_TAGS_SERVED in caches:
2966 self.filtered(b'served').tags()
2965 self.filtered(b'served').tags()
2967
2966
2968 if repository.CACHE_BRANCHMAP_ALL in caches:
2967 if repository.CACHE_BRANCHMAP_ALL in caches:
2969 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2968 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2970 # so we're forcing a write to cause these caches to be warmed up
2969 # so we're forcing a write to cause these caches to be warmed up
2971 # even if they haven't explicitly been requested yet (if they've
2970 # even if they haven't explicitly been requested yet (if they've
2972 # never been used by hg, they won't ever have been written, even if
2971 # never been used by hg, they won't ever have been written, even if
2973 # they're a subset of another kind of cache that *has* been used).
2972 # they're a subset of another kind of cache that *has* been used).
2974 dpt = repository.CACHE_BRANCHMAP_DETECT_PURE_TOPO in caches
2973 dpt = repository.CACHE_BRANCHMAP_DETECT_PURE_TOPO in caches
2975
2974
2976 for filt in repoview.filtertable.keys():
2975 for filt in repoview.filtertable.keys():
2977 filtered = self.filtered(filt)
2976 filtered = self.filtered(filt)
2978 self._branchcaches.update_disk(filtered, detect_pure_topo=dpt)
2977 self._branchcaches.update_disk(filtered, detect_pure_topo=dpt)
2979
2978
2980 # flush all possibly delayed write.
2979 # flush all possibly delayed write.
2981 self._branchcaches.write_dirty(self)
2980 self._branchcaches.write_dirty(self)
2982
2981
2983 def invalidatecaches(self):
2982 def invalidatecaches(self):
2984 if '_tagscache' in vars(self):
2983 if '_tagscache' in vars(self):
2985 # can't use delattr on proxy
2984 # can't use delattr on proxy
2986 del self.__dict__['_tagscache']
2985 del self.__dict__['_tagscache']
2987
2986
2988 self._branchcaches.clear()
2987 self._branchcaches.clear()
2989 self.invalidatevolatilesets()
2988 self.invalidatevolatilesets()
2990 self._sparsesignaturecache.clear()
2989 self._sparsesignaturecache.clear()
2991
2990
2992 def invalidatevolatilesets(self):
2991 def invalidatevolatilesets(self):
2993 self.filteredrevcache.clear()
2992 self.filteredrevcache.clear()
2994 obsolete.clearobscaches(self)
2993 obsolete.clearobscaches(self)
2995 self._quick_access_changeid_invalidate()
2994 self._quick_access_changeid_invalidate()
2996
2995
2997 def invalidatedirstate(self):
2996 def invalidatedirstate(self):
2998 """Invalidates the dirstate, causing the next call to dirstate
2997 """Invalidates the dirstate, causing the next call to dirstate
2999 to check if it was modified since the last time it was read,
2998 to check if it was modified since the last time it was read,
3000 rereading it if it has.
2999 rereading it if it has.
3001
3000
3002 This is different to dirstate.invalidate() that it doesn't always
3001 This is different to dirstate.invalidate() that it doesn't always
3003 rereads the dirstate. Use dirstate.invalidate() if you want to
3002 rereads the dirstate. Use dirstate.invalidate() if you want to
3004 explicitly read the dirstate again (i.e. restoring it to a previous
3003 explicitly read the dirstate again (i.e. restoring it to a previous
3005 known good state)."""
3004 known good state)."""
3006 unfi = self.unfiltered()
3005 unfi = self.unfiltered()
3007 if 'dirstate' in unfi.__dict__:
3006 if 'dirstate' in unfi.__dict__:
3008 assert not self.dirstate.is_changing_any
3007 assert not self.dirstate.is_changing_any
3009 del unfi.__dict__['dirstate']
3008 del unfi.__dict__['dirstate']
3010
3009
3011 def invalidate(self, clearfilecache=False):
3010 def invalidate(self, clearfilecache=False):
3012 """Invalidates both store and non-store parts other than dirstate
3011 """Invalidates both store and non-store parts other than dirstate
3013
3012
3014 If a transaction is running, invalidation of store is omitted,
3013 If a transaction is running, invalidation of store is omitted,
3015 because discarding in-memory changes might cause inconsistency
3014 because discarding in-memory changes might cause inconsistency
3016 (e.g. incomplete fncache causes unintentional failure, but
3015 (e.g. incomplete fncache causes unintentional failure, but
3017 redundant one doesn't).
3016 redundant one doesn't).
3018 """
3017 """
3019 unfiltered = self.unfiltered() # all file caches are stored unfiltered
3018 unfiltered = self.unfiltered() # all file caches are stored unfiltered
3020 for k in list(self._filecache.keys()):
3019 for k in list(self._filecache.keys()):
3021 if (
3020 if (
3022 k == b'changelog'
3021 k == b'changelog'
3023 and self.currenttransaction()
3022 and self.currenttransaction()
3024 and self.changelog.is_delaying
3023 and self.changelog.is_delaying
3025 ):
3024 ):
3026 # The changelog object may store unwritten revisions. We don't
3025 # The changelog object may store unwritten revisions. We don't
3027 # want to lose them.
3026 # want to lose them.
3028 # TODO: Solve the problem instead of working around it.
3027 # TODO: Solve the problem instead of working around it.
3029 continue
3028 continue
3030
3029
3031 if clearfilecache:
3030 if clearfilecache:
3032 del self._filecache[k]
3031 del self._filecache[k]
3033 try:
3032 try:
3034 # XXX ideally, the key would be a unicode string to match the
3033 # XXX ideally, the key would be a unicode string to match the
3035 # fact it refers to an attribut name. However changing this was
3034 # fact it refers to an attribut name. However changing this was
3036 # a bit a scope creep compared to the series cleaning up
3035 # a bit a scope creep compared to the series cleaning up
3037 # del/set/getattr so we kept thing simple here.
3036 # del/set/getattr so we kept thing simple here.
3038 delattr(unfiltered, pycompat.sysstr(k))
3037 delattr(unfiltered, pycompat.sysstr(k))
3039 except AttributeError:
3038 except AttributeError:
3040 pass
3039 pass
3041 self.invalidatecaches()
3040 self.invalidatecaches()
3042 if not self.currenttransaction():
3041 if not self.currenttransaction():
3043 # TODO: Changing contents of store outside transaction
3042 # TODO: Changing contents of store outside transaction
3044 # causes inconsistency. We should make in-memory store
3043 # causes inconsistency. We should make in-memory store
3045 # changes detectable, and abort if changed.
3044 # changes detectable, and abort if changed.
3046 self.store.invalidatecaches()
3045 self.store.invalidatecaches()
3047
3046
3048 def invalidateall(self):
3047 def invalidateall(self):
3049 """Fully invalidates both store and non-store parts, causing the
3048 """Fully invalidates both store and non-store parts, causing the
3050 subsequent operation to reread any outside changes."""
3049 subsequent operation to reread any outside changes."""
3051 # extension should hook this to invalidate its caches
3050 # extension should hook this to invalidate its caches
3052 self.invalidate()
3051 self.invalidate()
3053 self.invalidatedirstate()
3052 self.invalidatedirstate()
3054
3053
3055 @unfilteredmethod
3054 @unfilteredmethod
3056 def _refreshfilecachestats(self, tr):
3055 def _refreshfilecachestats(self, tr):
3057 """Reload stats of cached files so that they are flagged as valid"""
3056 """Reload stats of cached files so that they are flagged as valid"""
3058 for k, ce in self._filecache.items():
3057 for k, ce in self._filecache.items():
3059 k = pycompat.sysstr(k)
3058 k = pycompat.sysstr(k)
3060 if k == 'dirstate' or k not in self.__dict__:
3059 if k == 'dirstate' or k not in self.__dict__:
3061 continue
3060 continue
3062 ce.refresh()
3061 ce.refresh()
3063
3062
3064 def _lock(
3063 def _lock(
3065 self,
3064 self,
3066 vfs,
3065 vfs,
3067 lockname,
3066 lockname,
3068 wait,
3067 wait,
3069 releasefn,
3068 releasefn,
3070 acquirefn,
3069 acquirefn,
3071 desc,
3070 desc,
3072 ):
3071 ):
3073 timeout = 0
3072 timeout = 0
3074 warntimeout = 0
3073 warntimeout = 0
3075 if wait:
3074 if wait:
3076 timeout = self.ui.configint(b"ui", b"timeout")
3075 timeout = self.ui.configint(b"ui", b"timeout")
3077 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
3076 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
3078 # internal config: ui.signal-safe-lock
3077 # internal config: ui.signal-safe-lock
3079 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
3078 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
3080 sync_file = self.ui.config(b'devel', b'lock-wait-sync-file')
3079 sync_file = self.ui.config(b'devel', b'lock-wait-sync-file')
3081 if not sync_file:
3080 if not sync_file:
3082 sync_file = None
3081 sync_file = None
3083
3082
3084 l = lockmod.trylock(
3083 l = lockmod.trylock(
3085 self.ui,
3084 self.ui,
3086 vfs,
3085 vfs,
3087 lockname,
3086 lockname,
3088 timeout,
3087 timeout,
3089 warntimeout,
3088 warntimeout,
3090 releasefn=releasefn,
3089 releasefn=releasefn,
3091 acquirefn=acquirefn,
3090 acquirefn=acquirefn,
3092 desc=desc,
3091 desc=desc,
3093 signalsafe=signalsafe,
3092 signalsafe=signalsafe,
3094 devel_wait_sync_file=sync_file,
3093 devel_wait_sync_file=sync_file,
3095 )
3094 )
3096 return l
3095 return l
3097
3096
3098 def _afterlock(self, callback):
3097 def _afterlock(self, callback):
3099 """add a callback to be run when the repository is fully unlocked
3098 """add a callback to be run when the repository is fully unlocked
3100
3099
3101 The callback will be executed when the outermost lock is released
3100 The callback will be executed when the outermost lock is released
3102 (with wlock being higher level than 'lock')."""
3101 (with wlock being higher level than 'lock')."""
3103 for ref in (self._wlockref, self._lockref):
3102 for ref in (self._wlockref, self._lockref):
3104 l = ref and ref()
3103 l = ref and ref()
3105 if l and l.held:
3104 if l and l.held:
3106 l.postrelease.append(callback)
3105 l.postrelease.append(callback)
3107 break
3106 break
3108 else: # no lock have been found.
3107 else: # no lock have been found.
3109 callback(True)
3108 callback(True)
3110
3109
3111 def lock(self, wait=True):
3110 def lock(self, wait=True):
3112 """Lock the repository store (.hg/store) and return a weak reference
3111 """Lock the repository store (.hg/store) and return a weak reference
3113 to the lock. Use this before modifying the store (e.g. committing or
3112 to the lock. Use this before modifying the store (e.g. committing or
3114 stripping). If you are opening a transaction, get a lock as well.)
3113 stripping). If you are opening a transaction, get a lock as well.)
3115
3114
3116 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3115 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3117 'wlock' first to avoid a dead-lock hazard."""
3116 'wlock' first to avoid a dead-lock hazard."""
3118 l = self._currentlock(self._lockref)
3117 l = self._currentlock(self._lockref)
3119 if l is not None:
3118 if l is not None:
3120 l.lock()
3119 l.lock()
3121 return l
3120 return l
3122
3121
3123 self.hook(b'prelock', throw=True)
3122 self.hook(b'prelock', throw=True)
3124 l = self._lock(
3123 l = self._lock(
3125 vfs=self.svfs,
3124 vfs=self.svfs,
3126 lockname=b"lock",
3125 lockname=b"lock",
3127 wait=wait,
3126 wait=wait,
3128 releasefn=None,
3127 releasefn=None,
3129 acquirefn=self.invalidate,
3128 acquirefn=self.invalidate,
3130 desc=_(b'repository %s') % self.origroot,
3129 desc=_(b'repository %s') % self.origroot,
3131 )
3130 )
3132 self._lockref = weakref.ref(l)
3131 self._lockref = weakref.ref(l)
3133 return l
3132 return l
3134
3133
3135 def wlock(self, wait=True):
3134 def wlock(self, wait=True):
3136 """Lock the non-store parts of the repository (everything under
3135 """Lock the non-store parts of the repository (everything under
3137 .hg except .hg/store) and return a weak reference to the lock.
3136 .hg except .hg/store) and return a weak reference to the lock.
3138
3137
3139 Use this before modifying files in .hg.
3138 Use this before modifying files in .hg.
3140
3139
3141 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3140 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3142 'wlock' first to avoid a dead-lock hazard."""
3141 'wlock' first to avoid a dead-lock hazard."""
3143 l = self._wlockref() if self._wlockref else None
3142 l = self._wlockref() if self._wlockref else None
3144 if l is not None and l.held:
3143 if l is not None and l.held:
3145 l.lock()
3144 l.lock()
3146 return l
3145 return l
3147
3146
3148 self.hook(b'prewlock', throw=True)
3147 self.hook(b'prewlock', throw=True)
3149 # We do not need to check for non-waiting lock acquisition. Such
3148 # We do not need to check for non-waiting lock acquisition. Such
3150 # acquisition would not cause dead-lock as they would just fail.
3149 # acquisition would not cause dead-lock as they would just fail.
3151 if wait and (
3150 if wait and (
3152 self.ui.configbool(b'devel', b'all-warnings')
3151 self.ui.configbool(b'devel', b'all-warnings')
3153 or self.ui.configbool(b'devel', b'check-locks')
3152 or self.ui.configbool(b'devel', b'check-locks')
3154 ):
3153 ):
3155 if self._currentlock(self._lockref) is not None:
3154 if self._currentlock(self._lockref) is not None:
3156 self.ui.develwarn(b'"wlock" acquired after "lock"')
3155 self.ui.develwarn(b'"wlock" acquired after "lock"')
3157
3156
3158 def unlock():
3157 def unlock():
3159 if self.dirstate.is_changing_any:
3158 if self.dirstate.is_changing_any:
3160 msg = b"wlock release in the middle of a changing parents"
3159 msg = b"wlock release in the middle of a changing parents"
3161 self.ui.develwarn(msg)
3160 self.ui.develwarn(msg)
3162 self.dirstate.invalidate()
3161 self.dirstate.invalidate()
3163 else:
3162 else:
3164 if self.dirstate._dirty:
3163 if self.dirstate._dirty:
3165 msg = b"dirty dirstate on wlock release"
3164 msg = b"dirty dirstate on wlock release"
3166 self.ui.develwarn(msg)
3165 self.ui.develwarn(msg)
3167 self.dirstate.write(None)
3166 self.dirstate.write(None)
3168
3167
3169 unfi = self.unfiltered()
3168 unfi = self.unfiltered()
3170 if 'dirstate' in unfi.__dict__:
3169 if 'dirstate' in unfi.__dict__:
3171 del unfi.__dict__['dirstate']
3170 del unfi.__dict__['dirstate']
3172
3171
3173 l = self._lock(
3172 l = self._lock(
3174 self.vfs,
3173 self.vfs,
3175 b"wlock",
3174 b"wlock",
3176 wait,
3175 wait,
3177 unlock,
3176 unlock,
3178 self.invalidatedirstate,
3177 self.invalidatedirstate,
3179 _(b'working directory of %s') % self.origroot,
3178 _(b'working directory of %s') % self.origroot,
3180 )
3179 )
3181 self._wlockref = weakref.ref(l)
3180 self._wlockref = weakref.ref(l)
3182 return l
3181 return l
3183
3182
3184 def _currentlock(self, lockref):
3183 def _currentlock(self, lockref):
3185 """Returns the lock if it's held, or None if it's not."""
3184 """Returns the lock if it's held, or None if it's not."""
3186 if lockref is None:
3185 if lockref is None:
3187 return None
3186 return None
3188 l = lockref()
3187 l = lockref()
3189 if l is None or not l.held:
3188 if l is None or not l.held:
3190 return None
3189 return None
3191 return l
3190 return l
3192
3191
3193 def currentwlock(self):
3192 def currentwlock(self):
3194 """Returns the wlock if it's held, or None if it's not."""
3193 """Returns the wlock if it's held, or None if it's not."""
3195 return self._currentlock(self._wlockref)
3194 return self._currentlock(self._wlockref)
3196
3195
3197 def currentlock(self):
3196 def currentlock(self):
3198 """Returns the lock if it's held, or None if it's not."""
3197 """Returns the lock if it's held, or None if it's not."""
3199 return self._currentlock(self._lockref)
3198 return self._currentlock(self._lockref)
3200
3199
3201 def checkcommitpatterns(self, wctx, match, status, fail):
3200 def checkcommitpatterns(self, wctx, match, status, fail):
3202 """check for commit arguments that aren't committable"""
3201 """check for commit arguments that aren't committable"""
3203 if match.isexact() or match.prefix():
3202 if match.isexact() or match.prefix():
3204 matched = set(status.modified + status.added + status.removed)
3203 matched = set(status.modified + status.added + status.removed)
3205
3204
3206 for f in match.files():
3205 for f in match.files():
3207 f = self.dirstate.normalize(f)
3206 f = self.dirstate.normalize(f)
3208 if f == b'.' or f in matched or f in wctx.substate:
3207 if f == b'.' or f in matched or f in wctx.substate:
3209 continue
3208 continue
3210 if f in status.deleted:
3209 if f in status.deleted:
3211 fail(f, _(b'file not found!'))
3210 fail(f, _(b'file not found!'))
3212 # Is it a directory that exists or used to exist?
3211 # Is it a directory that exists or used to exist?
3213 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3212 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3214 d = f + b'/'
3213 d = f + b'/'
3215 for mf in matched:
3214 for mf in matched:
3216 if mf.startswith(d):
3215 if mf.startswith(d):
3217 break
3216 break
3218 else:
3217 else:
3219 fail(f, _(b"no match under directory!"))
3218 fail(f, _(b"no match under directory!"))
3220 elif f not in self.dirstate:
3219 elif f not in self.dirstate:
3221 fail(f, _(b"file not tracked!"))
3220 fail(f, _(b"file not tracked!"))
3222
3221
3223 @unfilteredmethod
3222 @unfilteredmethod
3224 def commit(
3223 def commit(
3225 self,
3224 self,
3226 text=b"",
3225 text=b"",
3227 user=None,
3226 user=None,
3228 date=None,
3227 date=None,
3229 match=None,
3228 match=None,
3230 force=False,
3229 force=False,
3231 editor=None,
3230 editor=None,
3232 extra=None,
3231 extra=None,
3233 ):
3232 ):
3234 """Add a new revision to current repository.
3233 """Add a new revision to current repository.
3235
3234
3236 Revision information is gathered from the working directory,
3235 Revision information is gathered from the working directory,
3237 match can be used to filter the committed files. If editor is
3236 match can be used to filter the committed files. If editor is
3238 supplied, it is called to get a commit message.
3237 supplied, it is called to get a commit message.
3239 """
3238 """
3240 if extra is None:
3239 if extra is None:
3241 extra = {}
3240 extra = {}
3242
3241
3243 def fail(f, msg):
3242 def fail(f, msg):
3244 raise error.InputError(b'%s: %s' % (f, msg))
3243 raise error.InputError(b'%s: %s' % (f, msg))
3245
3244
3246 if not match:
3245 if not match:
3247 match = matchmod.always()
3246 match = matchmod.always()
3248
3247
3249 if not force:
3248 if not force:
3250 match.bad = fail
3249 match.bad = fail
3251
3250
3252 # lock() for recent changelog (see issue4368)
3251 # lock() for recent changelog (see issue4368)
3253 with self.wlock(), self.lock():
3252 with self.wlock(), self.lock():
3254 wctx = self[None]
3253 wctx = self[None]
3255 merge = len(wctx.parents()) > 1
3254 merge = len(wctx.parents()) > 1
3256
3255
3257 if not force and merge and not match.always():
3256 if not force and merge and not match.always():
3258 raise error.Abort(
3257 raise error.Abort(
3259 _(
3258 _(
3260 b'cannot partially commit a merge '
3259 b'cannot partially commit a merge '
3261 b'(do not specify files or patterns)'
3260 b'(do not specify files or patterns)'
3262 )
3261 )
3263 )
3262 )
3264
3263
3265 status = self.status(match=match, clean=force)
3264 status = self.status(match=match, clean=force)
3266 if force:
3265 if force:
3267 status.modified.extend(
3266 status.modified.extend(
3268 status.clean
3267 status.clean
3269 ) # mq may commit clean files
3268 ) # mq may commit clean files
3270
3269
3271 # check subrepos
3270 # check subrepos
3272 subs, commitsubs, newstate = subrepoutil.precommit(
3271 subs, commitsubs, newstate = subrepoutil.precommit(
3273 self.ui, wctx, status, match, force=force
3272 self.ui, wctx, status, match, force=force
3274 )
3273 )
3275
3274
3276 # make sure all explicit patterns are matched
3275 # make sure all explicit patterns are matched
3277 if not force:
3276 if not force:
3278 self.checkcommitpatterns(wctx, match, status, fail)
3277 self.checkcommitpatterns(wctx, match, status, fail)
3279
3278
3280 cctx = context.workingcommitctx(
3279 cctx = context.workingcommitctx(
3281 self, status, text, user, date, extra
3280 self, status, text, user, date, extra
3282 )
3281 )
3283
3282
3284 ms = mergestatemod.mergestate.read(self)
3283 ms = mergestatemod.mergestate.read(self)
3285 mergeutil.checkunresolved(ms)
3284 mergeutil.checkunresolved(ms)
3286
3285
3287 # internal config: ui.allowemptycommit
3286 # internal config: ui.allowemptycommit
3288 if cctx.isempty() and not self.ui.configbool(
3287 if cctx.isempty() and not self.ui.configbool(
3289 b'ui', b'allowemptycommit'
3288 b'ui', b'allowemptycommit'
3290 ):
3289 ):
3291 self.ui.debug(b'nothing to commit, clearing merge state\n')
3290 self.ui.debug(b'nothing to commit, clearing merge state\n')
3292 ms.reset()
3291 ms.reset()
3293 return None
3292 return None
3294
3293
3295 if merge and cctx.deleted():
3294 if merge and cctx.deleted():
3296 raise error.Abort(_(b"cannot commit merge with missing files"))
3295 raise error.Abort(_(b"cannot commit merge with missing files"))
3297
3296
3298 if editor:
3297 if editor:
3299 cctx._text = editor(self, cctx, subs)
3298 cctx._text = editor(self, cctx, subs)
3300 edited = text != cctx._text
3299 edited = text != cctx._text
3301
3300
3302 # Save commit message in case this transaction gets rolled back
3301 # Save commit message in case this transaction gets rolled back
3303 # (e.g. by a pretxncommit hook). Leave the content alone on
3302 # (e.g. by a pretxncommit hook). Leave the content alone on
3304 # the assumption that the user will use the same editor again.
3303 # the assumption that the user will use the same editor again.
3305 msg_path = self.savecommitmessage(cctx._text)
3304 msg_path = self.savecommitmessage(cctx._text)
3306
3305
3307 # commit subs and write new state
3306 # commit subs and write new state
3308 if subs:
3307 if subs:
3309 uipathfn = scmutil.getuipathfn(self)
3308 uipathfn = scmutil.getuipathfn(self)
3310 for s in sorted(commitsubs):
3309 for s in sorted(commitsubs):
3311 sub = wctx.sub(s)
3310 sub = wctx.sub(s)
3312 self.ui.status(
3311 self.ui.status(
3313 _(b'committing subrepository %s\n')
3312 _(b'committing subrepository %s\n')
3314 % uipathfn(subrepoutil.subrelpath(sub))
3313 % uipathfn(subrepoutil.subrelpath(sub))
3315 )
3314 )
3316 sr = sub.commit(cctx._text, user, date)
3315 sr = sub.commit(cctx._text, user, date)
3317 newstate[s] = (newstate[s][0], sr)
3316 newstate[s] = (newstate[s][0], sr)
3318 subrepoutil.writestate(self, newstate)
3317 subrepoutil.writestate(self, newstate)
3319
3318
3320 p1, p2 = self.dirstate.parents()
3319 p1, p2 = self.dirstate.parents()
3321 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3320 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3322 try:
3321 try:
3323 self.hook(
3322 self.hook(
3324 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3323 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3325 )
3324 )
3326 with self.transaction(b'commit'):
3325 with self.transaction(b'commit'):
3327 ret = self.commitctx(cctx, True)
3326 ret = self.commitctx(cctx, True)
3328 # update bookmarks, dirstate and mergestate
3327 # update bookmarks, dirstate and mergestate
3329 bookmarks.update(self, [p1, p2], ret)
3328 bookmarks.update(self, [p1, p2], ret)
3330 cctx.markcommitted(ret)
3329 cctx.markcommitted(ret)
3331 ms.reset()
3330 ms.reset()
3332 except: # re-raises
3331 except: # re-raises
3333 if edited:
3332 if edited:
3334 self.ui.write(
3333 self.ui.write(
3335 _(b'note: commit message saved in %s\n') % msg_path
3334 _(b'note: commit message saved in %s\n') % msg_path
3336 )
3335 )
3337 self.ui.write(
3336 self.ui.write(
3338 _(
3337 _(
3339 b"note: use 'hg commit --logfile "
3338 b"note: use 'hg commit --logfile "
3340 b"%s --edit' to reuse it\n"
3339 b"%s --edit' to reuse it\n"
3341 )
3340 )
3342 % msg_path
3341 % msg_path
3343 )
3342 )
3344 raise
3343 raise
3345
3344
3346 def commithook(unused_success):
3345 def commithook(unused_success):
3347 # hack for command that use a temporary commit (eg: histedit)
3346 # hack for command that use a temporary commit (eg: histedit)
3348 # temporary commit got stripped before hook release
3347 # temporary commit got stripped before hook release
3349 if self.changelog.hasnode(ret):
3348 if self.changelog.hasnode(ret):
3350 self.hook(
3349 self.hook(
3351 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3350 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3352 )
3351 )
3353
3352
3354 self._afterlock(commithook)
3353 self._afterlock(commithook)
3355 return ret
3354 return ret
3356
3355
3357 @unfilteredmethod
3356 @unfilteredmethod
3358 def commitctx(self, ctx, error=False, origctx=None):
3357 def commitctx(self, ctx, error=False, origctx=None):
3359 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3358 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3360
3359
3361 @unfilteredmethod
3360 @unfilteredmethod
3362 def destroying(self):
3361 def destroying(self):
3363 """Inform the repository that nodes are about to be destroyed.
3362 """Inform the repository that nodes are about to be destroyed.
3364 Intended for use by strip and rollback, so there's a common
3363 Intended for use by strip and rollback, so there's a common
3365 place for anything that has to be done before destroying history.
3364 place for anything that has to be done before destroying history.
3366
3365
3367 This is mostly useful for saving state that is in memory and waiting
3366 This is mostly useful for saving state that is in memory and waiting
3368 to be flushed when the current lock is released. Because a call to
3367 to be flushed when the current lock is released. Because a call to
3369 destroyed is imminent, the repo will be invalidated causing those
3368 destroyed is imminent, the repo will be invalidated causing those
3370 changes to stay in memory (waiting for the next unlock), or vanish
3369 changes to stay in memory (waiting for the next unlock), or vanish
3371 completely.
3370 completely.
3372 """
3371 """
3373 # When using the same lock to commit and strip, the phasecache is left
3372 # When using the same lock to commit and strip, the phasecache is left
3374 # dirty after committing. Then when we strip, the repo is invalidated,
3373 # dirty after committing. Then when we strip, the repo is invalidated,
3375 # causing those changes to disappear.
3374 # causing those changes to disappear.
3376 if '_phasecache' in vars(self):
3375 if '_phasecache' in vars(self):
3377 self._phasecache.write(self)
3376 self._phasecache.write(self)
3378
3377
3379 @unfilteredmethod
3378 @unfilteredmethod
3380 def destroyed(self):
3379 def destroyed(self):
3381 """Inform the repository that nodes have been destroyed.
3380 """Inform the repository that nodes have been destroyed.
3382 Intended for use by strip and rollback, so there's a common
3381 Intended for use by strip and rollback, so there's a common
3383 place for anything that has to be done after destroying history.
3382 place for anything that has to be done after destroying history.
3384 """
3383 """
3385 # refresh all repository caches
3384 # refresh all repository caches
3386 self.updatecaches()
3385 self.updatecaches()
3387
3386
3388 # Ensure the persistent tag cache is updated. Doing it now
3387 # Ensure the persistent tag cache is updated. Doing it now
3389 # means that the tag cache only has to worry about destroyed
3388 # means that the tag cache only has to worry about destroyed
3390 # heads immediately after a strip/rollback. That in turn
3389 # heads immediately after a strip/rollback. That in turn
3391 # guarantees that "cachetip == currenttip" (comparing both rev
3390 # guarantees that "cachetip == currenttip" (comparing both rev
3392 # and node) always means no nodes have been added or destroyed.
3391 # and node) always means no nodes have been added or destroyed.
3393
3392
3394 # XXX this is suboptimal when qrefresh'ing: we strip the current
3393 # XXX this is suboptimal when qrefresh'ing: we strip the current
3395 # head, refresh the tag cache, then immediately add a new head.
3394 # head, refresh the tag cache, then immediately add a new head.
3396 # But I think doing it this way is necessary for the "instant
3395 # But I think doing it this way is necessary for the "instant
3397 # tag cache retrieval" case to work.
3396 # tag cache retrieval" case to work.
3398 self.invalidate()
3397 self.invalidate()
3399
3398
3400 def status(
3399 def status(
3401 self,
3400 self,
3402 node1=b'.',
3401 node1=b'.',
3403 node2=None,
3402 node2=None,
3404 match=None,
3403 match=None,
3405 ignored=False,
3404 ignored=False,
3406 clean=False,
3405 clean=False,
3407 unknown=False,
3406 unknown=False,
3408 listsubrepos=False,
3407 listsubrepos=False,
3409 ):
3408 ):
3410 '''a convenience method that calls node1.status(node2)'''
3409 '''a convenience method that calls node1.status(node2)'''
3411 return self[node1].status(
3410 return self[node1].status(
3412 node2, match, ignored, clean, unknown, listsubrepos
3411 node2, match, ignored, clean, unknown, listsubrepos
3413 )
3412 )
3414
3413
3415 def addpostdsstatus(self, ps):
3414 def addpostdsstatus(self, ps):
3416 """Add a callback to run within the wlock, at the point at which status
3415 """Add a callback to run within the wlock, at the point at which status
3417 fixups happen.
3416 fixups happen.
3418
3417
3419 On status completion, callback(wctx, status) will be called with the
3418 On status completion, callback(wctx, status) will be called with the
3420 wlock held, unless the dirstate has changed from underneath or the wlock
3419 wlock held, unless the dirstate has changed from underneath or the wlock
3421 couldn't be grabbed.
3420 couldn't be grabbed.
3422
3421
3423 Callbacks should not capture and use a cached copy of the dirstate --
3422 Callbacks should not capture and use a cached copy of the dirstate --
3424 it might change in the meanwhile. Instead, they should access the
3423 it might change in the meanwhile. Instead, they should access the
3425 dirstate via wctx.repo().dirstate.
3424 dirstate via wctx.repo().dirstate.
3426
3425
3427 This list is emptied out after each status run -- extensions should
3426 This list is emptied out after each status run -- extensions should
3428 make sure it adds to this list each time dirstate.status is called.
3427 make sure it adds to this list each time dirstate.status is called.
3429 Extensions should also make sure they don't call this for statuses
3428 Extensions should also make sure they don't call this for statuses
3430 that don't involve the dirstate.
3429 that don't involve the dirstate.
3431 """
3430 """
3432
3431
3433 # The list is located here for uniqueness reasons -- it is actually
3432 # The list is located here for uniqueness reasons -- it is actually
3434 # managed by the workingctx, but that isn't unique per-repo.
3433 # managed by the workingctx, but that isn't unique per-repo.
3435 self._postdsstatus.append(ps)
3434 self._postdsstatus.append(ps)
3436
3435
3437 def postdsstatus(self):
3436 def postdsstatus(self):
3438 """Used by workingctx to get the list of post-dirstate-status hooks."""
3437 """Used by workingctx to get the list of post-dirstate-status hooks."""
3439 return self._postdsstatus
3438 return self._postdsstatus
3440
3439
3441 def clearpostdsstatus(self):
3440 def clearpostdsstatus(self):
3442 """Used by workingctx to clear post-dirstate-status hooks."""
3441 """Used by workingctx to clear post-dirstate-status hooks."""
3443 del self._postdsstatus[:]
3442 del self._postdsstatus[:]
3444
3443
3445 def heads(self, start=None):
3444 def heads(self, start=None):
3446 if start is None:
3445 if start is None:
3447 cl = self.changelog
3446 cl = self.changelog
3448 headrevs = reversed(cl.headrevs())
3447 headrevs = reversed(cl.headrevs())
3449 return [cl.node(rev) for rev in headrevs]
3448 return [cl.node(rev) for rev in headrevs]
3450
3449
3451 heads = self.changelog.heads(start)
3450 heads = self.changelog.heads(start)
3452 # sort the output in rev descending order
3451 # sort the output in rev descending order
3453 return sorted(heads, key=self.changelog.rev, reverse=True)
3452 return sorted(heads, key=self.changelog.rev, reverse=True)
3454
3453
3455 def branchheads(self, branch=None, start=None, closed=False):
3454 def branchheads(self, branch=None, start=None, closed=False):
3456 """return a (possibly filtered) list of heads for the given branch
3455 """return a (possibly filtered) list of heads for the given branch
3457
3456
3458 Heads are returned in topological order, from newest to oldest.
3457 Heads are returned in topological order, from newest to oldest.
3459 If branch is None, use the dirstate branch.
3458 If branch is None, use the dirstate branch.
3460 If start is not None, return only heads reachable from start.
3459 If start is not None, return only heads reachable from start.
3461 If closed is True, return heads that are marked as closed as well.
3460 If closed is True, return heads that are marked as closed as well.
3462 """
3461 """
3463 if branch is None:
3462 if branch is None:
3464 branch = self[None].branch()
3463 branch = self[None].branch()
3465 branches = self.branchmap()
3464 branches = self.branchmap()
3466 if not branches.hasbranch(branch):
3465 if not branches.hasbranch(branch):
3467 return []
3466 return []
3468 # the cache returns heads ordered lowest to highest
3467 # the cache returns heads ordered lowest to highest
3469 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3468 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3470 if start is not None:
3469 if start is not None:
3471 # filter out the heads that cannot be reached from startrev
3470 # filter out the heads that cannot be reached from startrev
3472 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3471 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3473 bheads = [h for h in bheads if h in fbheads]
3472 bheads = [h for h in bheads if h in fbheads]
3474 return bheads
3473 return bheads
3475
3474
3476 def branches(self, nodes):
3475 def branches(self, nodes):
3477 if not nodes:
3476 if not nodes:
3478 nodes = [self.changelog.tip()]
3477 nodes = [self.changelog.tip()]
3479 b = []
3478 b = []
3480 for n in nodes:
3479 for n in nodes:
3481 t = n
3480 t = n
3482 while True:
3481 while True:
3483 p = self.changelog.parents(n)
3482 p = self.changelog.parents(n)
3484 if p[1] != self.nullid or p[0] == self.nullid:
3483 if p[1] != self.nullid or p[0] == self.nullid:
3485 b.append((t, n, p[0], p[1]))
3484 b.append((t, n, p[0], p[1]))
3486 break
3485 break
3487 n = p[0]
3486 n = p[0]
3488 return b
3487 return b
3489
3488
3490 def between(self, pairs):
3489 def between(self, pairs):
3491 r = []
3490 r = []
3492
3491
3493 for top, bottom in pairs:
3492 for top, bottom in pairs:
3494 n, l, i = top, [], 0
3493 n, l, i = top, [], 0
3495 f = 1
3494 f = 1
3496
3495
3497 while n != bottom and n != self.nullid:
3496 while n != bottom and n != self.nullid:
3498 p = self.changelog.parents(n)[0]
3497 p = self.changelog.parents(n)[0]
3499 if i == f:
3498 if i == f:
3500 l.append(n)
3499 l.append(n)
3501 f = f * 2
3500 f = f * 2
3502 n = p
3501 n = p
3503 i += 1
3502 i += 1
3504
3503
3505 r.append(l)
3504 r.append(l)
3506
3505
3507 return r
3506 return r
3508
3507
3509 def checkpush(self, pushop):
3508 def checkpush(self, pushop):
3510 """Extensions can override this function if additional checks have
3509 """Extensions can override this function if additional checks have
3511 to be performed before pushing, or call it if they override push
3510 to be performed before pushing, or call it if they override push
3512 command.
3511 command.
3513 """
3512 """
3514
3513
3515 @unfilteredpropertycache
3514 @unfilteredpropertycache
3516 def prepushoutgoinghooks(self):
3515 def prepushoutgoinghooks(self):
3517 """Return util.hooks consists of a pushop with repo, remote, outgoing
3516 """Return util.hooks consists of a pushop with repo, remote, outgoing
3518 methods, which are called before pushing changesets.
3517 methods, which are called before pushing changesets.
3519 """
3518 """
3520 return util.hooks()
3519 return util.hooks()
3521
3520
3522 def pushkey(self, namespace, key, old, new):
3521 def pushkey(self, namespace, key, old, new):
3523 try:
3522 try:
3524 tr = self.currenttransaction()
3523 tr = self.currenttransaction()
3525 hookargs = {}
3524 hookargs = {}
3526 if tr is not None:
3525 if tr is not None:
3527 hookargs.update(tr.hookargs)
3526 hookargs.update(tr.hookargs)
3528 hookargs = pycompat.strkwargs(hookargs)
3527 hookargs = pycompat.strkwargs(hookargs)
3529 hookargs['namespace'] = namespace
3528 hookargs['namespace'] = namespace
3530 hookargs['key'] = key
3529 hookargs['key'] = key
3531 hookargs['old'] = old
3530 hookargs['old'] = old
3532 hookargs['new'] = new
3531 hookargs['new'] = new
3533 self.hook(b'prepushkey', throw=True, **hookargs)
3532 self.hook(b'prepushkey', throw=True, **hookargs)
3534 except error.HookAbort as exc:
3533 except error.HookAbort as exc:
3535 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3534 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3536 if exc.hint:
3535 if exc.hint:
3537 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3536 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3538 return False
3537 return False
3539 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3538 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3540 ret = pushkey.push(self, namespace, key, old, new)
3539 ret = pushkey.push(self, namespace, key, old, new)
3541
3540
3542 def runhook(unused_success):
3541 def runhook(unused_success):
3543 self.hook(
3542 self.hook(
3544 b'pushkey',
3543 b'pushkey',
3545 namespace=namespace,
3544 namespace=namespace,
3546 key=key,
3545 key=key,
3547 old=old,
3546 old=old,
3548 new=new,
3547 new=new,
3549 ret=ret,
3548 ret=ret,
3550 )
3549 )
3551
3550
3552 self._afterlock(runhook)
3551 self._afterlock(runhook)
3553 return ret
3552 return ret
3554
3553
3555 def listkeys(self, namespace):
3554 def listkeys(self, namespace):
3556 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3555 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3557 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3556 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3558 values = pushkey.list(self, namespace)
3557 values = pushkey.list(self, namespace)
3559 self.hook(b'listkeys', namespace=namespace, values=values)
3558 self.hook(b'listkeys', namespace=namespace, values=values)
3560 return values
3559 return values
3561
3560
3562 def debugwireargs(self, one, two, three=None, four=None, five=None):
3561 def debugwireargs(self, one, two, three=None, four=None, five=None):
3563 '''used to test argument passing over the wire'''
3562 '''used to test argument passing over the wire'''
3564 return b"%s %s %s %s %s" % (
3563 return b"%s %s %s %s %s" % (
3565 one,
3564 one,
3566 two,
3565 two,
3567 pycompat.bytestr(three),
3566 pycompat.bytestr(three),
3568 pycompat.bytestr(four),
3567 pycompat.bytestr(four),
3569 pycompat.bytestr(five),
3568 pycompat.bytestr(five),
3570 )
3569 )
3571
3570
3572 def savecommitmessage(self, text):
3571 def savecommitmessage(self, text):
3573 fp = self.vfs(b'last-message.txt', b'wb')
3572 fp = self.vfs(b'last-message.txt', b'wb')
3574 try:
3573 try:
3575 fp.write(text)
3574 fp.write(text)
3576 finally:
3575 finally:
3577 fp.close()
3576 fp.close()
3578 return self.pathto(fp.name[len(self.root) + 1 :])
3577 return self.pathto(fp.name[len(self.root) + 1 :])
3579
3578
3580 def register_wanted_sidedata(self, category):
3579 def register_wanted_sidedata(self, category):
3581 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3580 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3582 # Only revlogv2 repos can want sidedata.
3581 # Only revlogv2 repos can want sidedata.
3583 return
3582 return
3584 self._wanted_sidedata.add(pycompat.bytestr(category))
3583 self._wanted_sidedata.add(pycompat.bytestr(category))
3585
3584
3586 def register_sidedata_computer(
3585 def register_sidedata_computer(
3587 self, kind, category, keys, computer, flags, replace=False
3586 self, kind, category, keys, computer, flags, replace=False
3588 ):
3587 ):
3589 if kind not in revlogconst.ALL_KINDS:
3588 if kind not in revlogconst.ALL_KINDS:
3590 msg = _(b"unexpected revlog kind '%s'.")
3589 msg = _(b"unexpected revlog kind '%s'.")
3591 raise error.ProgrammingError(msg % kind)
3590 raise error.ProgrammingError(msg % kind)
3592 category = pycompat.bytestr(category)
3591 category = pycompat.bytestr(category)
3593 already_registered = category in self._sidedata_computers.get(kind, [])
3592 already_registered = category in self._sidedata_computers.get(kind, [])
3594 if already_registered and not replace:
3593 if already_registered and not replace:
3595 msg = _(
3594 msg = _(
3596 b"cannot register a sidedata computer twice for category '%s'."
3595 b"cannot register a sidedata computer twice for category '%s'."
3597 )
3596 )
3598 raise error.ProgrammingError(msg % category)
3597 raise error.ProgrammingError(msg % category)
3599 if replace and not already_registered:
3598 if replace and not already_registered:
3600 msg = _(
3599 msg = _(
3601 b"cannot replace a sidedata computer that isn't registered "
3600 b"cannot replace a sidedata computer that isn't registered "
3602 b"for category '%s'."
3601 b"for category '%s'."
3603 )
3602 )
3604 raise error.ProgrammingError(msg % category)
3603 raise error.ProgrammingError(msg % category)
3605 self._sidedata_computers.setdefault(kind, {})
3604 self._sidedata_computers.setdefault(kind, {})
3606 self._sidedata_computers[kind][category] = (keys, computer, flags)
3605 self._sidedata_computers[kind][category] = (keys, computer, flags)
3607
3606
3608
3607
3609 localrepository = interfaceutil.implementer(repository.ilocalrepositorymain)(
3610 LocalRepository
3611 )
3612
3613 if typing.TYPE_CHECKING:
3614 # Help pytype by hiding the interface stuff that confuses it.
3615 localrepository = LocalRepository
3616
3617
3618 def undoname(fn: bytes) -> bytes:
3608 def undoname(fn: bytes) -> bytes:
3619 base, name = os.path.split(fn)
3609 base, name = os.path.split(fn)
3620 assert name.startswith(b'journal')
3610 assert name.startswith(b'journal')
3621 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3611 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3622
3612
3623
3613
3624 def instance(ui, path: bytes, create, intents=None, createopts=None):
3614 def instance(ui, path: bytes, create, intents=None, createopts=None):
3625 # prevent cyclic import localrepo -> upgrade -> localrepo
3615 # prevent cyclic import localrepo -> upgrade -> localrepo
3626 from . import upgrade
3616 from . import upgrade
3627
3617
3628 localpath = urlutil.urllocalpath(path)
3618 localpath = urlutil.urllocalpath(path)
3629 if create:
3619 if create:
3630 createrepository(ui, localpath, createopts=createopts)
3620 createrepository(ui, localpath, createopts=createopts)
3631
3621
3632 def repo_maker():
3622 def repo_maker():
3633 return makelocalrepository(ui, localpath, intents=intents)
3623 return makelocalrepository(ui, localpath, intents=intents)
3634
3624
3635 repo = repo_maker()
3625 repo = repo_maker()
3636 repo = upgrade.may_auto_upgrade(repo, repo_maker)
3626 repo = upgrade.may_auto_upgrade(repo, repo_maker)
3637 return repo
3627 return repo
3638
3628
3639
3629
3640 def islocal(path: bytes) -> bool:
3630 def islocal(path: bytes) -> bool:
3641 return True
3631 return True
3642
3632
3643
3633
3644 def defaultcreateopts(ui, createopts=None):
3634 def defaultcreateopts(ui, createopts=None):
3645 """Populate the default creation options for a repository.
3635 """Populate the default creation options for a repository.
3646
3636
3647 A dictionary of explicitly requested creation options can be passed
3637 A dictionary of explicitly requested creation options can be passed
3648 in. Missing keys will be populated.
3638 in. Missing keys will be populated.
3649 """
3639 """
3650 createopts = dict(createopts or {})
3640 createopts = dict(createopts or {})
3651
3641
3652 if b'backend' not in createopts:
3642 if b'backend' not in createopts:
3653 # experimental config: storage.new-repo-backend
3643 # experimental config: storage.new-repo-backend
3654 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3644 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3655
3645
3656 return createopts
3646 return createopts
3657
3647
3658
3648
3659 def clone_requirements(ui, createopts, srcrepo):
3649 def clone_requirements(ui, createopts, srcrepo):
3660 """clone the requirements of a local repo for a local clone
3650 """clone the requirements of a local repo for a local clone
3661
3651
3662 The store requirements are unchanged while the working copy requirements
3652 The store requirements are unchanged while the working copy requirements
3663 depends on the configuration
3653 depends on the configuration
3664 """
3654 """
3665 target_requirements = set()
3655 target_requirements = set()
3666 if not srcrepo.requirements:
3656 if not srcrepo.requirements:
3667 # this is a legacy revlog "v0" repository, we cannot do anything fancy
3657 # this is a legacy revlog "v0" repository, we cannot do anything fancy
3668 # with it.
3658 # with it.
3669 return target_requirements
3659 return target_requirements
3670 createopts = defaultcreateopts(ui, createopts=createopts)
3660 createopts = defaultcreateopts(ui, createopts=createopts)
3671 for r in newreporequirements(ui, createopts):
3661 for r in newreporequirements(ui, createopts):
3672 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3662 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3673 target_requirements.add(r)
3663 target_requirements.add(r)
3674
3664
3675 for r in srcrepo.requirements:
3665 for r in srcrepo.requirements:
3676 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3666 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3677 target_requirements.add(r)
3667 target_requirements.add(r)
3678 return target_requirements
3668 return target_requirements
3679
3669
3680
3670
3681 def newreporequirements(ui, createopts):
3671 def newreporequirements(ui, createopts):
3682 """Determine the set of requirements for a new local repository.
3672 """Determine the set of requirements for a new local repository.
3683
3673
3684 Extensions can wrap this function to specify custom requirements for
3674 Extensions can wrap this function to specify custom requirements for
3685 new repositories.
3675 new repositories.
3686 """
3676 """
3687
3677
3688 if b'backend' not in createopts:
3678 if b'backend' not in createopts:
3689 raise error.ProgrammingError(
3679 raise error.ProgrammingError(
3690 b'backend key not present in createopts; '
3680 b'backend key not present in createopts; '
3691 b'was defaultcreateopts() called?'
3681 b'was defaultcreateopts() called?'
3692 )
3682 )
3693
3683
3694 if createopts[b'backend'] != b'revlogv1':
3684 if createopts[b'backend'] != b'revlogv1':
3695 raise error.Abort(
3685 raise error.Abort(
3696 _(
3686 _(
3697 b'unable to determine repository requirements for '
3687 b'unable to determine repository requirements for '
3698 b'storage backend: %s'
3688 b'storage backend: %s'
3699 )
3689 )
3700 % createopts[b'backend']
3690 % createopts[b'backend']
3701 )
3691 )
3702
3692
3703 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3693 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3704 if ui.configbool(b'format', b'usestore'):
3694 if ui.configbool(b'format', b'usestore'):
3705 requirements.add(requirementsmod.STORE_REQUIREMENT)
3695 requirements.add(requirementsmod.STORE_REQUIREMENT)
3706 if ui.configbool(b'format', b'usefncache'):
3696 if ui.configbool(b'format', b'usefncache'):
3707 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3697 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3708 if ui.configbool(b'format', b'dotencode'):
3698 if ui.configbool(b'format', b'dotencode'):
3709 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3699 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3710
3700
3711 compengines = ui.configlist(b'format', b'revlog-compression')
3701 compengines = ui.configlist(b'format', b'revlog-compression')
3712 for compengine in compengines:
3702 for compengine in compengines:
3713 if compengine in util.compengines:
3703 if compengine in util.compengines:
3714 engine = util.compengines[compengine]
3704 engine = util.compengines[compengine]
3715 if engine.available() and engine.revlogheader():
3705 if engine.available() and engine.revlogheader():
3716 break
3706 break
3717 else:
3707 else:
3718 raise error.Abort(
3708 raise error.Abort(
3719 _(
3709 _(
3720 b'compression engines %s defined by '
3710 b'compression engines %s defined by '
3721 b'format.revlog-compression not available'
3711 b'format.revlog-compression not available'
3722 )
3712 )
3723 % b', '.join(b'"%s"' % e for e in compengines),
3713 % b', '.join(b'"%s"' % e for e in compengines),
3724 hint=_(
3714 hint=_(
3725 b'run "hg debuginstall" to list available '
3715 b'run "hg debuginstall" to list available '
3726 b'compression engines'
3716 b'compression engines'
3727 ),
3717 ),
3728 )
3718 )
3729
3719
3730 # zlib is the historical default and doesn't need an explicit requirement.
3720 # zlib is the historical default and doesn't need an explicit requirement.
3731 if compengine == b'zstd':
3721 if compengine == b'zstd':
3732 requirements.add(b'revlog-compression-zstd')
3722 requirements.add(b'revlog-compression-zstd')
3733 elif compengine != b'zlib':
3723 elif compengine != b'zlib':
3734 requirements.add(b'exp-compression-%s' % compengine)
3724 requirements.add(b'exp-compression-%s' % compengine)
3735
3725
3736 if scmutil.gdinitconfig(ui):
3726 if scmutil.gdinitconfig(ui):
3737 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3727 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3738 if ui.configbool(b'format', b'sparse-revlog'):
3728 if ui.configbool(b'format', b'sparse-revlog'):
3739 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3729 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3740
3730
3741 # experimental config: format.use-dirstate-v2
3731 # experimental config: format.use-dirstate-v2
3742 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3732 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3743 if ui.configbool(b'format', b'use-dirstate-v2'):
3733 if ui.configbool(b'format', b'use-dirstate-v2'):
3744 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3734 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3745
3735
3746 # experimental config: format.exp-use-copies-side-data-changeset
3736 # experimental config: format.exp-use-copies-side-data-changeset
3747 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3737 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3748 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3738 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3749 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3739 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3750 if ui.configbool(b'experimental', b'treemanifest'):
3740 if ui.configbool(b'experimental', b'treemanifest'):
3751 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3741 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3752
3742
3753 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3743 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3754 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3744 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3755 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3745 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3756
3746
3757 revlogv2 = ui.config(b'experimental', b'revlogv2')
3747 revlogv2 = ui.config(b'experimental', b'revlogv2')
3758 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3748 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3759 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3749 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3760 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3750 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3761 # experimental config: format.internal-phase
3751 # experimental config: format.internal-phase
3762 if ui.configbool(b'format', b'use-internal-phase'):
3752 if ui.configbool(b'format', b'use-internal-phase'):
3763 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3753 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3764
3754
3765 # experimental config: format.exp-archived-phase
3755 # experimental config: format.exp-archived-phase
3766 if ui.configbool(b'format', b'exp-archived-phase'):
3756 if ui.configbool(b'format', b'exp-archived-phase'):
3767 requirements.add(requirementsmod.ARCHIVED_PHASE_REQUIREMENT)
3757 requirements.add(requirementsmod.ARCHIVED_PHASE_REQUIREMENT)
3768
3758
3769 if createopts.get(b'narrowfiles'):
3759 if createopts.get(b'narrowfiles'):
3770 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3760 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3771
3761
3772 if createopts.get(b'lfs'):
3762 if createopts.get(b'lfs'):
3773 requirements.add(b'lfs')
3763 requirements.add(b'lfs')
3774
3764
3775 if ui.configbool(b'format', b'bookmarks-in-store'):
3765 if ui.configbool(b'format', b'bookmarks-in-store'):
3776 requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3766 requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3777
3767
3778 # The feature is disabled unless a fast implementation is available.
3768 # The feature is disabled unless a fast implementation is available.
3779 persistent_nodemap_default = policy.importrust('revlog') is not None
3769 persistent_nodemap_default = policy.importrust('revlog') is not None
3780 if ui.configbool(
3770 if ui.configbool(
3781 b'format', b'use-persistent-nodemap', persistent_nodemap_default
3771 b'format', b'use-persistent-nodemap', persistent_nodemap_default
3782 ):
3772 ):
3783 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3773 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3784
3774
3785 # if share-safe is enabled, let's create the new repository with the new
3775 # if share-safe is enabled, let's create the new repository with the new
3786 # requirement
3776 # requirement
3787 if ui.configbool(b'format', b'use-share-safe'):
3777 if ui.configbool(b'format', b'use-share-safe'):
3788 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3778 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3789
3779
3790 # if we are creating a share-repoΒΉ we have to handle requirement
3780 # if we are creating a share-repoΒΉ we have to handle requirement
3791 # differently.
3781 # differently.
3792 #
3782 #
3793 # [1] (i.e. reusing the store from another repository, just having a
3783 # [1] (i.e. reusing the store from another repository, just having a
3794 # working copy)
3784 # working copy)
3795 if b'sharedrepo' in createopts:
3785 if b'sharedrepo' in createopts:
3796 source_requirements = set(createopts[b'sharedrepo'].requirements)
3786 source_requirements = set(createopts[b'sharedrepo'].requirements)
3797
3787
3798 if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements:
3788 if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements:
3799 # share to an old school repository, we have to copy the
3789 # share to an old school repository, we have to copy the
3800 # requirements and hope for the best.
3790 # requirements and hope for the best.
3801 requirements = source_requirements
3791 requirements = source_requirements
3802 else:
3792 else:
3803 # We have control on the working copy only, so "copy" the non
3793 # We have control on the working copy only, so "copy" the non
3804 # working copy part over, ignoring previous logic.
3794 # working copy part over, ignoring previous logic.
3805 to_drop = set()
3795 to_drop = set()
3806 for req in requirements:
3796 for req in requirements:
3807 if req in requirementsmod.WORKING_DIR_REQUIREMENTS:
3797 if req in requirementsmod.WORKING_DIR_REQUIREMENTS:
3808 continue
3798 continue
3809 if req in source_requirements:
3799 if req in source_requirements:
3810 continue
3800 continue
3811 to_drop.add(req)
3801 to_drop.add(req)
3812 requirements -= to_drop
3802 requirements -= to_drop
3813 requirements |= source_requirements
3803 requirements |= source_requirements
3814
3804
3815 if createopts.get(b'sharedrelative'):
3805 if createopts.get(b'sharedrelative'):
3816 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3806 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3817 else:
3807 else:
3818 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3808 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3819
3809
3820 if ui.configbool(b'format', b'use-dirstate-tracked-hint'):
3810 if ui.configbool(b'format', b'use-dirstate-tracked-hint'):
3821 version = ui.configint(b'format', b'use-dirstate-tracked-hint.version')
3811 version = ui.configint(b'format', b'use-dirstate-tracked-hint.version')
3822 msg = _(b"ignoring unknown tracked key version: %d\n")
3812 msg = _(b"ignoring unknown tracked key version: %d\n")
3823 hint = _(
3813 hint = _(
3824 b"see `hg help config.format.use-dirstate-tracked-hint-version"
3814 b"see `hg help config.format.use-dirstate-tracked-hint-version"
3825 )
3815 )
3826 if version != 1:
3816 if version != 1:
3827 ui.warn(msg % version, hint=hint)
3817 ui.warn(msg % version, hint=hint)
3828 else:
3818 else:
3829 requirements.add(requirementsmod.DIRSTATE_TRACKED_HINT_V1)
3819 requirements.add(requirementsmod.DIRSTATE_TRACKED_HINT_V1)
3830
3820
3831 return requirements
3821 return requirements
3832
3822
3833
3823
3834 def checkrequirementscompat(ui, requirements):
3824 def checkrequirementscompat(ui, requirements):
3835 """Checks compatibility of repository requirements enabled and disabled.
3825 """Checks compatibility of repository requirements enabled and disabled.
3836
3826
3837 Returns a set of requirements which needs to be dropped because dependend
3827 Returns a set of requirements which needs to be dropped because dependend
3838 requirements are not enabled. Also warns users about it"""
3828 requirements are not enabled. Also warns users about it"""
3839
3829
3840 dropped = set()
3830 dropped = set()
3841
3831
3842 if requirementsmod.STORE_REQUIREMENT not in requirements:
3832 if requirementsmod.STORE_REQUIREMENT not in requirements:
3843 if requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3833 if requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3844 ui.warn(
3834 ui.warn(
3845 _(
3835 _(
3846 b'ignoring enabled \'format.bookmarks-in-store\' config '
3836 b'ignoring enabled \'format.bookmarks-in-store\' config '
3847 b'beacuse it is incompatible with disabled '
3837 b'beacuse it is incompatible with disabled '
3848 b'\'format.usestore\' config\n'
3838 b'\'format.usestore\' config\n'
3849 )
3839 )
3850 )
3840 )
3851 dropped.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3841 dropped.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3852
3842
3853 if (
3843 if (
3854 requirementsmod.SHARED_REQUIREMENT in requirements
3844 requirementsmod.SHARED_REQUIREMENT in requirements
3855 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3845 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3856 ):
3846 ):
3857 raise error.Abort(
3847 raise error.Abort(
3858 _(
3848 _(
3859 b"cannot create shared repository as source was created"
3849 b"cannot create shared repository as source was created"
3860 b" with 'format.usestore' config disabled"
3850 b" with 'format.usestore' config disabled"
3861 )
3851 )
3862 )
3852 )
3863
3853
3864 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3854 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3865 if ui.hasconfig(b'format', b'use-share-safe'):
3855 if ui.hasconfig(b'format', b'use-share-safe'):
3866 msg = _(
3856 msg = _(
3867 b"ignoring enabled 'format.use-share-safe' config because "
3857 b"ignoring enabled 'format.use-share-safe' config because "
3868 b"it is incompatible with disabled 'format.usestore'"
3858 b"it is incompatible with disabled 'format.usestore'"
3869 b" config\n"
3859 b" config\n"
3870 )
3860 )
3871 ui.warn(msg)
3861 ui.warn(msg)
3872 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3862 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3873
3863
3874 return dropped
3864 return dropped
3875
3865
3876
3866
3877 def filterknowncreateopts(ui, createopts):
3867 def filterknowncreateopts(ui, createopts):
3878 """Filters a dict of repo creation options against options that are known.
3868 """Filters a dict of repo creation options against options that are known.
3879
3869
3880 Receives a dict of repo creation options and returns a dict of those
3870 Receives a dict of repo creation options and returns a dict of those
3881 options that we don't know how to handle.
3871 options that we don't know how to handle.
3882
3872
3883 This function is called as part of repository creation. If the
3873 This function is called as part of repository creation. If the
3884 returned dict contains any items, repository creation will not
3874 returned dict contains any items, repository creation will not
3885 be allowed, as it means there was a request to create a repository
3875 be allowed, as it means there was a request to create a repository
3886 with options not recognized by loaded code.
3876 with options not recognized by loaded code.
3887
3877
3888 Extensions can wrap this function to filter out creation options
3878 Extensions can wrap this function to filter out creation options
3889 they know how to handle.
3879 they know how to handle.
3890 """
3880 """
3891 known = {
3881 known = {
3892 b'backend',
3882 b'backend',
3893 b'lfs',
3883 b'lfs',
3894 b'narrowfiles',
3884 b'narrowfiles',
3895 b'sharedrepo',
3885 b'sharedrepo',
3896 b'sharedrelative',
3886 b'sharedrelative',
3897 b'shareditems',
3887 b'shareditems',
3898 b'shallowfilestore',
3888 b'shallowfilestore',
3899 }
3889 }
3900
3890
3901 return {k: v for k, v in createopts.items() if k not in known}
3891 return {k: v for k, v in createopts.items() if k not in known}
3902
3892
3903
3893
3904 def createrepository(ui, path: bytes, createopts=None, requirements=None):
3894 def createrepository(ui, path: bytes, createopts=None, requirements=None):
3905 """Create a new repository in a vfs.
3895 """Create a new repository in a vfs.
3906
3896
3907 ``path`` path to the new repo's working directory.
3897 ``path`` path to the new repo's working directory.
3908 ``createopts`` options for the new repository.
3898 ``createopts`` options for the new repository.
3909 ``requirement`` predefined set of requirements.
3899 ``requirement`` predefined set of requirements.
3910 (incompatible with ``createopts``)
3900 (incompatible with ``createopts``)
3911
3901
3912 The following keys for ``createopts`` are recognized:
3902 The following keys for ``createopts`` are recognized:
3913
3903
3914 backend
3904 backend
3915 The storage backend to use.
3905 The storage backend to use.
3916 lfs
3906 lfs
3917 Repository will be created with ``lfs`` requirement. The lfs extension
3907 Repository will be created with ``lfs`` requirement. The lfs extension
3918 will automatically be loaded when the repository is accessed.
3908 will automatically be loaded when the repository is accessed.
3919 narrowfiles
3909 narrowfiles
3920 Set up repository to support narrow file storage.
3910 Set up repository to support narrow file storage.
3921 sharedrepo
3911 sharedrepo
3922 Repository object from which storage should be shared.
3912 Repository object from which storage should be shared.
3923 sharedrelative
3913 sharedrelative
3924 Boolean indicating if the path to the shared repo should be
3914 Boolean indicating if the path to the shared repo should be
3925 stored as relative. By default, the pointer to the "parent" repo
3915 stored as relative. By default, the pointer to the "parent" repo
3926 is stored as an absolute path.
3916 is stored as an absolute path.
3927 shareditems
3917 shareditems
3928 Set of items to share to the new repository (in addition to storage).
3918 Set of items to share to the new repository (in addition to storage).
3929 shallowfilestore
3919 shallowfilestore
3930 Indicates that storage for files should be shallow (not all ancestor
3920 Indicates that storage for files should be shallow (not all ancestor
3931 revisions are known).
3921 revisions are known).
3932 """
3922 """
3933
3923
3934 if requirements is not None:
3924 if requirements is not None:
3935 if createopts is not None:
3925 if createopts is not None:
3936 msg = b'cannot specify both createopts and requirements'
3926 msg = b'cannot specify both createopts and requirements'
3937 raise error.ProgrammingError(msg)
3927 raise error.ProgrammingError(msg)
3938 createopts = {}
3928 createopts = {}
3939 else:
3929 else:
3940 createopts = defaultcreateopts(ui, createopts=createopts)
3930 createopts = defaultcreateopts(ui, createopts=createopts)
3941
3931
3942 unknownopts = filterknowncreateopts(ui, createopts)
3932 unknownopts = filterknowncreateopts(ui, createopts)
3943
3933
3944 if not isinstance(unknownopts, dict):
3934 if not isinstance(unknownopts, dict):
3945 raise error.ProgrammingError(
3935 raise error.ProgrammingError(
3946 b'filterknowncreateopts() did not return a dict'
3936 b'filterknowncreateopts() did not return a dict'
3947 )
3937 )
3948
3938
3949 if unknownopts:
3939 if unknownopts:
3950 raise error.Abort(
3940 raise error.Abort(
3951 _(
3941 _(
3952 b'unable to create repository because of unknown '
3942 b'unable to create repository because of unknown '
3953 b'creation option: %s'
3943 b'creation option: %s'
3954 )
3944 )
3955 % b', '.join(sorted(unknownopts)),
3945 % b', '.join(sorted(unknownopts)),
3956 hint=_(b'is a required extension not loaded?'),
3946 hint=_(b'is a required extension not loaded?'),
3957 )
3947 )
3958
3948
3959 requirements = newreporequirements(ui, createopts=createopts)
3949 requirements = newreporequirements(ui, createopts=createopts)
3960 requirements -= checkrequirementscompat(ui, requirements)
3950 requirements -= checkrequirementscompat(ui, requirements)
3961
3951
3962 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3952 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3963
3953
3964 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3954 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3965 if hgvfs.exists():
3955 if hgvfs.exists():
3966 raise error.RepoError(_(b'repository %s already exists') % path)
3956 raise error.RepoError(_(b'repository %s already exists') % path)
3967
3957
3968 if b'sharedrepo' in createopts:
3958 if b'sharedrepo' in createopts:
3969 sharedpath = createopts[b'sharedrepo'].sharedpath
3959 sharedpath = createopts[b'sharedrepo'].sharedpath
3970
3960
3971 if createopts.get(b'sharedrelative'):
3961 if createopts.get(b'sharedrelative'):
3972 try:
3962 try:
3973 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3963 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3974 sharedpath = util.pconvert(sharedpath)
3964 sharedpath = util.pconvert(sharedpath)
3975 except (IOError, ValueError) as e:
3965 except (IOError, ValueError) as e:
3976 # ValueError is raised on Windows if the drive letters differ
3966 # ValueError is raised on Windows if the drive letters differ
3977 # on each path.
3967 # on each path.
3978 raise error.Abort(
3968 raise error.Abort(
3979 _(b'cannot calculate relative path'),
3969 _(b'cannot calculate relative path'),
3980 hint=stringutil.forcebytestr(e),
3970 hint=stringutil.forcebytestr(e),
3981 )
3971 )
3982
3972
3983 if not wdirvfs.exists():
3973 if not wdirvfs.exists():
3984 wdirvfs.makedirs()
3974 wdirvfs.makedirs()
3985
3975
3986 hgvfs.makedir(notindexed=True)
3976 hgvfs.makedir(notindexed=True)
3987 if b'sharedrepo' not in createopts:
3977 if b'sharedrepo' not in createopts:
3988 hgvfs.mkdir(b'cache')
3978 hgvfs.mkdir(b'cache')
3989 hgvfs.mkdir(b'wcache')
3979 hgvfs.mkdir(b'wcache')
3990
3980
3991 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3981 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3992 if has_store and b'sharedrepo' not in createopts:
3982 if has_store and b'sharedrepo' not in createopts:
3993 hgvfs.mkdir(b'store')
3983 hgvfs.mkdir(b'store')
3994
3984
3995 # We create an invalid changelog outside the store so very old
3985 # We create an invalid changelog outside the store so very old
3996 # Mercurial versions (which didn't know about the requirements
3986 # Mercurial versions (which didn't know about the requirements
3997 # file) encounter an error on reading the changelog. This
3987 # file) encounter an error on reading the changelog. This
3998 # effectively locks out old clients and prevents them from
3988 # effectively locks out old clients and prevents them from
3999 # mucking with a repo in an unknown format.
3989 # mucking with a repo in an unknown format.
4000 #
3990 #
4001 # The revlog header has version 65535, which won't be recognized by
3991 # The revlog header has version 65535, which won't be recognized by
4002 # such old clients.
3992 # such old clients.
4003 hgvfs.append(
3993 hgvfs.append(
4004 b'00changelog.i',
3994 b'00changelog.i',
4005 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3995 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
4006 b'layout',
3996 b'layout',
4007 )
3997 )
4008
3998
4009 # Filter the requirements into working copy and store ones
3999 # Filter the requirements into working copy and store ones
4010 wcreq, storereq = scmutil.filterrequirements(requirements)
4000 wcreq, storereq = scmutil.filterrequirements(requirements)
4011 # write working copy ones
4001 # write working copy ones
4012 scmutil.writerequires(hgvfs, wcreq)
4002 scmutil.writerequires(hgvfs, wcreq)
4013 # If there are store requirements and the current repository
4003 # If there are store requirements and the current repository
4014 # is not a shared one, write stored requirements
4004 # is not a shared one, write stored requirements
4015 # For new shared repository, we don't need to write the store
4005 # For new shared repository, we don't need to write the store
4016 # requirements as they are already present in store requires
4006 # requirements as they are already present in store requires
4017 if storereq and b'sharedrepo' not in createopts:
4007 if storereq and b'sharedrepo' not in createopts:
4018 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
4008 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
4019 scmutil.writerequires(storevfs, storereq)
4009 scmutil.writerequires(storevfs, storereq)
4020
4010
4021 # Write out file telling readers where to find the shared store.
4011 # Write out file telling readers where to find the shared store.
4022 if b'sharedrepo' in createopts:
4012 if b'sharedrepo' in createopts:
4023 hgvfs.write(b'sharedpath', sharedpath)
4013 hgvfs.write(b'sharedpath', sharedpath)
4024
4014
4025 if createopts.get(b'shareditems'):
4015 if createopts.get(b'shareditems'):
4026 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
4016 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
4027 hgvfs.write(b'shared', shared)
4017 hgvfs.write(b'shared', shared)
4028
4018
4029
4019
4030 def poisonrepository(repo):
4020 def poisonrepository(repo):
4031 """Poison a repository instance so it can no longer be used."""
4021 """Poison a repository instance so it can no longer be used."""
4032 # Perform any cleanup on the instance.
4022 # Perform any cleanup on the instance.
4033 repo.close()
4023 repo.close()
4034
4024
4035 # Our strategy is to replace the type of the object with one that
4025 # Our strategy is to replace the type of the object with one that
4036 # has all attribute lookups result in error.
4026 # has all attribute lookups result in error.
4037 #
4027 #
4038 # But we have to allow the close() method because some constructors
4028 # But we have to allow the close() method because some constructors
4039 # of repos call close() on repo references.
4029 # of repos call close() on repo references.
4040 class poisonedrepository:
4030 class poisonedrepository:
4041 def __getattribute__(self, item):
4031 def __getattribute__(self, item):
4042 if item == 'close':
4032 if item == 'close':
4043 return object.__getattribute__(self, item)
4033 return object.__getattribute__(self, item)
4044
4034
4045 raise error.ProgrammingError(
4035 raise error.ProgrammingError(
4046 b'repo instances should not be used after unshare'
4036 b'repo instances should not be used after unshare'
4047 )
4037 )
4048
4038
4049 def close(self):
4039 def close(self):
4050 pass
4040 pass
4051
4041
4052 # We may have a repoview, which intercepts __setattr__. So be sure
4042 # We may have a repoview, which intercepts __setattr__. So be sure
4053 # we operate at the lowest level possible.
4043 # we operate at the lowest level possible.
4054 object.__setattr__(repo, '__class__', poisonedrepository)
4044 object.__setattr__(repo, '__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now