##// END OF EJS Templates
typing: add basic type hints to localrepo.py...
Matt Harbison -
r50466:8fa3f7c3 default
parent child Browse files
Show More
@@ -1,3954 +1,3971 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 # coding: utf-8
2 # coding: utf-8
3 #
3 #
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9
9
10 import functools
10 import functools
11 import os
11 import os
12 import random
12 import random
13 import sys
13 import sys
14 import time
14 import time
15 import weakref
15 import weakref
16
16
17 from concurrent import futures
17 from concurrent import futures
18 from typing import (
19 Optional,
20 )
21
18 from .i18n import _
22 from .i18n import _
19 from .node import (
23 from .node import (
20 bin,
24 bin,
21 hex,
25 hex,
22 nullrev,
26 nullrev,
23 sha1nodeconstants,
27 sha1nodeconstants,
24 short,
28 short,
25 )
29 )
26 from .pycompat import (
30 from .pycompat import (
27 delattr,
31 delattr,
28 getattr,
32 getattr,
29 )
33 )
30 from . import (
34 from . import (
31 bookmarks,
35 bookmarks,
32 branchmap,
36 branchmap,
33 bundle2,
37 bundle2,
34 bundlecaches,
38 bundlecaches,
35 changegroup,
39 changegroup,
36 color,
40 color,
37 commit,
41 commit,
38 context,
42 context,
39 dirstate,
43 dirstate,
40 dirstateguard,
44 dirstateguard,
41 discovery,
45 discovery,
42 encoding,
46 encoding,
43 error,
47 error,
44 exchange,
48 exchange,
45 extensions,
49 extensions,
46 filelog,
50 filelog,
47 hook,
51 hook,
48 lock as lockmod,
52 lock as lockmod,
49 match as matchmod,
53 match as matchmod,
50 mergestate as mergestatemod,
54 mergestate as mergestatemod,
51 mergeutil,
55 mergeutil,
52 namespaces,
56 namespaces,
53 narrowspec,
57 narrowspec,
54 obsolete,
58 obsolete,
55 pathutil,
59 pathutil,
56 phases,
60 phases,
57 pushkey,
61 pushkey,
58 pycompat,
62 pycompat,
59 rcutil,
63 rcutil,
60 repoview,
64 repoview,
61 requirements as requirementsmod,
65 requirements as requirementsmod,
62 revlog,
66 revlog,
63 revset,
67 revset,
64 revsetlang,
68 revsetlang,
65 scmutil,
69 scmutil,
66 sparse,
70 sparse,
67 store as storemod,
71 store as storemod,
68 subrepoutil,
72 subrepoutil,
69 tags as tagsmod,
73 tags as tagsmod,
70 transaction,
74 transaction,
71 txnutil,
75 txnutil,
72 util,
76 util,
73 vfs as vfsmod,
77 vfs as vfsmod,
74 wireprototypes,
78 wireprototypes,
75 )
79 )
76
80
77 from .interfaces import (
81 from .interfaces import (
78 repository,
82 repository,
79 util as interfaceutil,
83 util as interfaceutil,
80 )
84 )
81
85
82 from .utils import (
86 from .utils import (
83 hashutil,
87 hashutil,
84 procutil,
88 procutil,
85 stringutil,
89 stringutil,
86 urlutil,
90 urlutil,
87 )
91 )
88
92
89 from .revlogutils import (
93 from .revlogutils import (
90 concurrency_checker as revlogchecker,
94 concurrency_checker as revlogchecker,
91 constants as revlogconst,
95 constants as revlogconst,
92 sidedata as sidedatamod,
96 sidedata as sidedatamod,
93 )
97 )
94
98
95 release = lockmod.release
99 release = lockmod.release
96 urlerr = util.urlerr
100 urlerr = util.urlerr
97 urlreq = util.urlreq
101 urlreq = util.urlreq
98
102
99 # set of (path, vfs-location) tuples. vfs-location is:
103 # set of (path, vfs-location) tuples. vfs-location is:
100 # - 'plain for vfs relative paths
104 # - 'plain for vfs relative paths
101 # - '' for svfs relative paths
105 # - '' for svfs relative paths
102 _cachedfiles = set()
106 _cachedfiles = set()
103
107
104
108
105 class _basefilecache(scmutil.filecache):
109 class _basefilecache(scmutil.filecache):
106 """All filecache usage on repo are done for logic that should be unfiltered"""
110 """All filecache usage on repo are done for logic that should be unfiltered"""
107
111
108 def __get__(self, repo, type=None):
112 def __get__(self, repo, type=None):
109 if repo is None:
113 if repo is None:
110 return self
114 return self
111 # proxy to unfiltered __dict__ since filtered repo has no entry
115 # proxy to unfiltered __dict__ since filtered repo has no entry
112 unfi = repo.unfiltered()
116 unfi = repo.unfiltered()
113 try:
117 try:
114 return unfi.__dict__[self.sname]
118 return unfi.__dict__[self.sname]
115 except KeyError:
119 except KeyError:
116 pass
120 pass
117 return super(_basefilecache, self).__get__(unfi, type)
121 return super(_basefilecache, self).__get__(unfi, type)
118
122
119 def set(self, repo, value):
123 def set(self, repo, value):
120 return super(_basefilecache, self).set(repo.unfiltered(), value)
124 return super(_basefilecache, self).set(repo.unfiltered(), value)
121
125
122
126
123 class repofilecache(_basefilecache):
127 class repofilecache(_basefilecache):
124 """filecache for files in .hg but outside of .hg/store"""
128 """filecache for files in .hg but outside of .hg/store"""
125
129
126 def __init__(self, *paths):
130 def __init__(self, *paths):
127 super(repofilecache, self).__init__(*paths)
131 super(repofilecache, self).__init__(*paths)
128 for path in paths:
132 for path in paths:
129 _cachedfiles.add((path, b'plain'))
133 _cachedfiles.add((path, b'plain'))
130
134
131 def join(self, obj, fname):
135 def join(self, obj, fname):
132 return obj.vfs.join(fname)
136 return obj.vfs.join(fname)
133
137
134
138
135 class storecache(_basefilecache):
139 class storecache(_basefilecache):
136 """filecache for files in the store"""
140 """filecache for files in the store"""
137
141
138 def __init__(self, *paths):
142 def __init__(self, *paths):
139 super(storecache, self).__init__(*paths)
143 super(storecache, self).__init__(*paths)
140 for path in paths:
144 for path in paths:
141 _cachedfiles.add((path, b''))
145 _cachedfiles.add((path, b''))
142
146
143 def join(self, obj, fname):
147 def join(self, obj, fname):
144 return obj.sjoin(fname)
148 return obj.sjoin(fname)
145
149
146
150
147 class changelogcache(storecache):
151 class changelogcache(storecache):
148 """filecache for the changelog"""
152 """filecache for the changelog"""
149
153
150 def __init__(self):
154 def __init__(self):
151 super(changelogcache, self).__init__()
155 super(changelogcache, self).__init__()
152 _cachedfiles.add((b'00changelog.i', b''))
156 _cachedfiles.add((b'00changelog.i', b''))
153 _cachedfiles.add((b'00changelog.n', b''))
157 _cachedfiles.add((b'00changelog.n', b''))
154
158
155 def tracked_paths(self, obj):
159 def tracked_paths(self, obj):
156 paths = [self.join(obj, b'00changelog.i')]
160 paths = [self.join(obj, b'00changelog.i')]
157 if obj.store.opener.options.get(b'persistent-nodemap', False):
161 if obj.store.opener.options.get(b'persistent-nodemap', False):
158 paths.append(self.join(obj, b'00changelog.n'))
162 paths.append(self.join(obj, b'00changelog.n'))
159 return paths
163 return paths
160
164
161
165
162 class manifestlogcache(storecache):
166 class manifestlogcache(storecache):
163 """filecache for the manifestlog"""
167 """filecache for the manifestlog"""
164
168
165 def __init__(self):
169 def __init__(self):
166 super(manifestlogcache, self).__init__()
170 super(manifestlogcache, self).__init__()
167 _cachedfiles.add((b'00manifest.i', b''))
171 _cachedfiles.add((b'00manifest.i', b''))
168 _cachedfiles.add((b'00manifest.n', b''))
172 _cachedfiles.add((b'00manifest.n', b''))
169
173
170 def tracked_paths(self, obj):
174 def tracked_paths(self, obj):
171 paths = [self.join(obj, b'00manifest.i')]
175 paths = [self.join(obj, b'00manifest.i')]
172 if obj.store.opener.options.get(b'persistent-nodemap', False):
176 if obj.store.opener.options.get(b'persistent-nodemap', False):
173 paths.append(self.join(obj, b'00manifest.n'))
177 paths.append(self.join(obj, b'00manifest.n'))
174 return paths
178 return paths
175
179
176
180
177 class mixedrepostorecache(_basefilecache):
181 class mixedrepostorecache(_basefilecache):
178 """filecache for a mix files in .hg/store and outside"""
182 """filecache for a mix files in .hg/store and outside"""
179
183
180 def __init__(self, *pathsandlocations):
184 def __init__(self, *pathsandlocations):
181 # scmutil.filecache only uses the path for passing back into our
185 # scmutil.filecache only uses the path for passing back into our
182 # join(), so we can safely pass a list of paths and locations
186 # join(), so we can safely pass a list of paths and locations
183 super(mixedrepostorecache, self).__init__(*pathsandlocations)
187 super(mixedrepostorecache, self).__init__(*pathsandlocations)
184 _cachedfiles.update(pathsandlocations)
188 _cachedfiles.update(pathsandlocations)
185
189
186 def join(self, obj, fnameandlocation):
190 def join(self, obj, fnameandlocation):
187 fname, location = fnameandlocation
191 fname, location = fnameandlocation
188 if location == b'plain':
192 if location == b'plain':
189 return obj.vfs.join(fname)
193 return obj.vfs.join(fname)
190 else:
194 else:
191 if location != b'':
195 if location != b'':
192 raise error.ProgrammingError(
196 raise error.ProgrammingError(
193 b'unexpected location: %s' % location
197 b'unexpected location: %s' % location
194 )
198 )
195 return obj.sjoin(fname)
199 return obj.sjoin(fname)
196
200
197
201
198 def isfilecached(repo, name):
202 def isfilecached(repo, name):
199 """check if a repo has already cached "name" filecache-ed property
203 """check if a repo has already cached "name" filecache-ed property
200
204
201 This returns (cachedobj-or-None, iscached) tuple.
205 This returns (cachedobj-or-None, iscached) tuple.
202 """
206 """
203 cacheentry = repo.unfiltered()._filecache.get(name, None)
207 cacheentry = repo.unfiltered()._filecache.get(name, None)
204 if not cacheentry:
208 if not cacheentry:
205 return None, False
209 return None, False
206 return cacheentry.obj, True
210 return cacheentry.obj, True
207
211
208
212
209 class unfilteredpropertycache(util.propertycache):
213 class unfilteredpropertycache(util.propertycache):
210 """propertycache that apply to unfiltered repo only"""
214 """propertycache that apply to unfiltered repo only"""
211
215
212 def __get__(self, repo, type=None):
216 def __get__(self, repo, type=None):
213 unfi = repo.unfiltered()
217 unfi = repo.unfiltered()
214 if unfi is repo:
218 if unfi is repo:
215 return super(unfilteredpropertycache, self).__get__(unfi)
219 return super(unfilteredpropertycache, self).__get__(unfi)
216 return getattr(unfi, self.name)
220 return getattr(unfi, self.name)
217
221
218
222
219 class filteredpropertycache(util.propertycache):
223 class filteredpropertycache(util.propertycache):
220 """propertycache that must take filtering in account"""
224 """propertycache that must take filtering in account"""
221
225
222 def cachevalue(self, obj, value):
226 def cachevalue(self, obj, value):
223 object.__setattr__(obj, self.name, value)
227 object.__setattr__(obj, self.name, value)
224
228
225
229
226 def hasunfilteredcache(repo, name):
230 def hasunfilteredcache(repo, name):
227 """check if a repo has an unfilteredpropertycache value for <name>"""
231 """check if a repo has an unfilteredpropertycache value for <name>"""
228 return name in vars(repo.unfiltered())
232 return name in vars(repo.unfiltered())
229
233
230
234
231 def unfilteredmethod(orig):
235 def unfilteredmethod(orig):
232 """decorate method that always need to be run on unfiltered version"""
236 """decorate method that always need to be run on unfiltered version"""
233
237
234 @functools.wraps(orig)
238 @functools.wraps(orig)
235 def wrapper(repo, *args, **kwargs):
239 def wrapper(repo, *args, **kwargs):
236 return orig(repo.unfiltered(), *args, **kwargs)
240 return orig(repo.unfiltered(), *args, **kwargs)
237
241
238 return wrapper
242 return wrapper
239
243
240
244
241 moderncaps = {
245 moderncaps = {
242 b'lookup',
246 b'lookup',
243 b'branchmap',
247 b'branchmap',
244 b'pushkey',
248 b'pushkey',
245 b'known',
249 b'known',
246 b'getbundle',
250 b'getbundle',
247 b'unbundle',
251 b'unbundle',
248 }
252 }
249 legacycaps = moderncaps.union({b'changegroupsubset'})
253 legacycaps = moderncaps.union({b'changegroupsubset'})
250
254
251
255
252 @interfaceutil.implementer(repository.ipeercommandexecutor)
256 @interfaceutil.implementer(repository.ipeercommandexecutor)
253 class localcommandexecutor:
257 class localcommandexecutor:
254 def __init__(self, peer):
258 def __init__(self, peer):
255 self._peer = peer
259 self._peer = peer
256 self._sent = False
260 self._sent = False
257 self._closed = False
261 self._closed = False
258
262
259 def __enter__(self):
263 def __enter__(self):
260 return self
264 return self
261
265
262 def __exit__(self, exctype, excvalue, exctb):
266 def __exit__(self, exctype, excvalue, exctb):
263 self.close()
267 self.close()
264
268
265 def callcommand(self, command, args):
269 def callcommand(self, command, args):
266 if self._sent:
270 if self._sent:
267 raise error.ProgrammingError(
271 raise error.ProgrammingError(
268 b'callcommand() cannot be used after sendcommands()'
272 b'callcommand() cannot be used after sendcommands()'
269 )
273 )
270
274
271 if self._closed:
275 if self._closed:
272 raise error.ProgrammingError(
276 raise error.ProgrammingError(
273 b'callcommand() cannot be used after close()'
277 b'callcommand() cannot be used after close()'
274 )
278 )
275
279
276 # We don't need to support anything fancy. Just call the named
280 # We don't need to support anything fancy. Just call the named
277 # method on the peer and return a resolved future.
281 # method on the peer and return a resolved future.
278 fn = getattr(self._peer, pycompat.sysstr(command))
282 fn = getattr(self._peer, pycompat.sysstr(command))
279
283
280 f = futures.Future()
284 f = futures.Future()
281
285
282 try:
286 try:
283 result = fn(**pycompat.strkwargs(args))
287 result = fn(**pycompat.strkwargs(args))
284 except Exception:
288 except Exception:
285 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
289 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
286 else:
290 else:
287 f.set_result(result)
291 f.set_result(result)
288
292
289 return f
293 return f
290
294
291 def sendcommands(self):
295 def sendcommands(self):
292 self._sent = True
296 self._sent = True
293
297
294 def close(self):
298 def close(self):
295 self._closed = True
299 self._closed = True
296
300
297
301
298 @interfaceutil.implementer(repository.ipeercommands)
302 @interfaceutil.implementer(repository.ipeercommands)
299 class localpeer(repository.peer):
303 class localpeer(repository.peer):
300 '''peer for a local repo; reflects only the most recent API'''
304 '''peer for a local repo; reflects only the most recent API'''
301
305
302 def __init__(self, repo, caps=None):
306 def __init__(self, repo, caps=None):
303 super(localpeer, self).__init__()
307 super(localpeer, self).__init__()
304
308
305 if caps is None:
309 if caps is None:
306 caps = moderncaps.copy()
310 caps = moderncaps.copy()
307 self._repo = repo.filtered(b'served')
311 self._repo = repo.filtered(b'served')
308 self.ui = repo.ui
312 self.ui = repo.ui
309
313
310 if repo._wanted_sidedata:
314 if repo._wanted_sidedata:
311 formatted = bundle2.format_remote_wanted_sidedata(repo)
315 formatted = bundle2.format_remote_wanted_sidedata(repo)
312 caps.add(b'exp-wanted-sidedata=' + formatted)
316 caps.add(b'exp-wanted-sidedata=' + formatted)
313
317
314 self._caps = repo._restrictcapabilities(caps)
318 self._caps = repo._restrictcapabilities(caps)
315
319
316 # Begin of _basepeer interface.
320 # Begin of _basepeer interface.
317
321
318 def url(self):
322 def url(self):
319 return self._repo.url()
323 return self._repo.url()
320
324
321 def local(self):
325 def local(self):
322 return self._repo
326 return self._repo
323
327
324 def peer(self):
328 def peer(self):
325 return self
329 return self
326
330
327 def canpush(self):
331 def canpush(self):
328 return True
332 return True
329
333
330 def close(self):
334 def close(self):
331 self._repo.close()
335 self._repo.close()
332
336
333 # End of _basepeer interface.
337 # End of _basepeer interface.
334
338
335 # Begin of _basewirecommands interface.
339 # Begin of _basewirecommands interface.
336
340
337 def branchmap(self):
341 def branchmap(self):
338 return self._repo.branchmap()
342 return self._repo.branchmap()
339
343
340 def capabilities(self):
344 def capabilities(self):
341 return self._caps
345 return self._caps
342
346
343 def clonebundles(self):
347 def clonebundles(self):
344 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
348 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
345
349
346 def debugwireargs(self, one, two, three=None, four=None, five=None):
350 def debugwireargs(self, one, two, three=None, four=None, five=None):
347 """Used to test argument passing over the wire"""
351 """Used to test argument passing over the wire"""
348 return b"%s %s %s %s %s" % (
352 return b"%s %s %s %s %s" % (
349 one,
353 one,
350 two,
354 two,
351 pycompat.bytestr(three),
355 pycompat.bytestr(three),
352 pycompat.bytestr(four),
356 pycompat.bytestr(four),
353 pycompat.bytestr(five),
357 pycompat.bytestr(five),
354 )
358 )
355
359
356 def getbundle(
360 def getbundle(
357 self,
361 self,
358 source,
362 source,
359 heads=None,
363 heads=None,
360 common=None,
364 common=None,
361 bundlecaps=None,
365 bundlecaps=None,
362 remote_sidedata=None,
366 remote_sidedata=None,
363 **kwargs
367 **kwargs
364 ):
368 ):
365 chunks = exchange.getbundlechunks(
369 chunks = exchange.getbundlechunks(
366 self._repo,
370 self._repo,
367 source,
371 source,
368 heads=heads,
372 heads=heads,
369 common=common,
373 common=common,
370 bundlecaps=bundlecaps,
374 bundlecaps=bundlecaps,
371 remote_sidedata=remote_sidedata,
375 remote_sidedata=remote_sidedata,
372 **kwargs
376 **kwargs
373 )[1]
377 )[1]
374 cb = util.chunkbuffer(chunks)
378 cb = util.chunkbuffer(chunks)
375
379
376 if exchange.bundle2requested(bundlecaps):
380 if exchange.bundle2requested(bundlecaps):
377 # When requesting a bundle2, getbundle returns a stream to make the
381 # When requesting a bundle2, getbundle returns a stream to make the
378 # wire level function happier. We need to build a proper object
382 # wire level function happier. We need to build a proper object
379 # from it in local peer.
383 # from it in local peer.
380 return bundle2.getunbundler(self.ui, cb)
384 return bundle2.getunbundler(self.ui, cb)
381 else:
385 else:
382 return changegroup.getunbundler(b'01', cb, None)
386 return changegroup.getunbundler(b'01', cb, None)
383
387
384 def heads(self):
388 def heads(self):
385 return self._repo.heads()
389 return self._repo.heads()
386
390
387 def known(self, nodes):
391 def known(self, nodes):
388 return self._repo.known(nodes)
392 return self._repo.known(nodes)
389
393
390 def listkeys(self, namespace):
394 def listkeys(self, namespace):
391 return self._repo.listkeys(namespace)
395 return self._repo.listkeys(namespace)
392
396
393 def lookup(self, key):
397 def lookup(self, key):
394 return self._repo.lookup(key)
398 return self._repo.lookup(key)
395
399
396 def pushkey(self, namespace, key, old, new):
400 def pushkey(self, namespace, key, old, new):
397 return self._repo.pushkey(namespace, key, old, new)
401 return self._repo.pushkey(namespace, key, old, new)
398
402
399 def stream_out(self):
403 def stream_out(self):
400 raise error.Abort(_(b'cannot perform stream clone against local peer'))
404 raise error.Abort(_(b'cannot perform stream clone against local peer'))
401
405
402 def unbundle(self, bundle, heads, url):
406 def unbundle(self, bundle, heads, url):
403 """apply a bundle on a repo
407 """apply a bundle on a repo
404
408
405 This function handles the repo locking itself."""
409 This function handles the repo locking itself."""
406 try:
410 try:
407 try:
411 try:
408 bundle = exchange.readbundle(self.ui, bundle, None)
412 bundle = exchange.readbundle(self.ui, bundle, None)
409 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
413 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
410 if util.safehasattr(ret, b'getchunks'):
414 if util.safehasattr(ret, b'getchunks'):
411 # This is a bundle20 object, turn it into an unbundler.
415 # This is a bundle20 object, turn it into an unbundler.
412 # This little dance should be dropped eventually when the
416 # This little dance should be dropped eventually when the
413 # API is finally improved.
417 # API is finally improved.
414 stream = util.chunkbuffer(ret.getchunks())
418 stream = util.chunkbuffer(ret.getchunks())
415 ret = bundle2.getunbundler(self.ui, stream)
419 ret = bundle2.getunbundler(self.ui, stream)
416 return ret
420 return ret
417 except Exception as exc:
421 except Exception as exc:
418 # If the exception contains output salvaged from a bundle2
422 # If the exception contains output salvaged from a bundle2
419 # reply, we need to make sure it is printed before continuing
423 # reply, we need to make sure it is printed before continuing
420 # to fail. So we build a bundle2 with such output and consume
424 # to fail. So we build a bundle2 with such output and consume
421 # it directly.
425 # it directly.
422 #
426 #
423 # This is not very elegant but allows a "simple" solution for
427 # This is not very elegant but allows a "simple" solution for
424 # issue4594
428 # issue4594
425 output = getattr(exc, '_bundle2salvagedoutput', ())
429 output = getattr(exc, '_bundle2salvagedoutput', ())
426 if output:
430 if output:
427 bundler = bundle2.bundle20(self._repo.ui)
431 bundler = bundle2.bundle20(self._repo.ui)
428 for out in output:
432 for out in output:
429 bundler.addpart(out)
433 bundler.addpart(out)
430 stream = util.chunkbuffer(bundler.getchunks())
434 stream = util.chunkbuffer(bundler.getchunks())
431 b = bundle2.getunbundler(self.ui, stream)
435 b = bundle2.getunbundler(self.ui, stream)
432 bundle2.processbundle(self._repo, b)
436 bundle2.processbundle(self._repo, b)
433 raise
437 raise
434 except error.PushRaced as exc:
438 except error.PushRaced as exc:
435 raise error.ResponseError(
439 raise error.ResponseError(
436 _(b'push failed:'), stringutil.forcebytestr(exc)
440 _(b'push failed:'), stringutil.forcebytestr(exc)
437 )
441 )
438
442
439 # End of _basewirecommands interface.
443 # End of _basewirecommands interface.
440
444
441 # Begin of peer interface.
445 # Begin of peer interface.
442
446
443 def commandexecutor(self):
447 def commandexecutor(self):
444 return localcommandexecutor(self)
448 return localcommandexecutor(self)
445
449
446 # End of peer interface.
450 # End of peer interface.
447
451
448
452
449 @interfaceutil.implementer(repository.ipeerlegacycommands)
453 @interfaceutil.implementer(repository.ipeerlegacycommands)
450 class locallegacypeer(localpeer):
454 class locallegacypeer(localpeer):
451 """peer extension which implements legacy methods too; used for tests with
455 """peer extension which implements legacy methods too; used for tests with
452 restricted capabilities"""
456 restricted capabilities"""
453
457
454 def __init__(self, repo):
458 def __init__(self, repo):
455 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
459 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
456
460
457 # Begin of baselegacywirecommands interface.
461 # Begin of baselegacywirecommands interface.
458
462
459 def between(self, pairs):
463 def between(self, pairs):
460 return self._repo.between(pairs)
464 return self._repo.between(pairs)
461
465
462 def branches(self, nodes):
466 def branches(self, nodes):
463 return self._repo.branches(nodes)
467 return self._repo.branches(nodes)
464
468
465 def changegroup(self, nodes, source):
469 def changegroup(self, nodes, source):
466 outgoing = discovery.outgoing(
470 outgoing = discovery.outgoing(
467 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
471 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
468 )
472 )
469 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
473 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
470
474
471 def changegroupsubset(self, bases, heads, source):
475 def changegroupsubset(self, bases, heads, source):
472 outgoing = discovery.outgoing(
476 outgoing = discovery.outgoing(
473 self._repo, missingroots=bases, ancestorsof=heads
477 self._repo, missingroots=bases, ancestorsof=heads
474 )
478 )
475 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
479 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
476
480
477 # End of baselegacywirecommands interface.
481 # End of baselegacywirecommands interface.
478
482
479
483
480 # Functions receiving (ui, features) that extensions can register to impact
484 # Functions receiving (ui, features) that extensions can register to impact
481 # the ability to load repositories with custom requirements. Only
485 # the ability to load repositories with custom requirements. Only
482 # functions defined in loaded extensions are called.
486 # functions defined in loaded extensions are called.
483 #
487 #
484 # The function receives a set of requirement strings that the repository
488 # The function receives a set of requirement strings that the repository
485 # is capable of opening. Functions will typically add elements to the
489 # is capable of opening. Functions will typically add elements to the
486 # set to reflect that the extension knows how to handle that requirements.
490 # set to reflect that the extension knows how to handle that requirements.
487 featuresetupfuncs = set()
491 featuresetupfuncs = set()
488
492
489
493
490 def _getsharedvfs(hgvfs, requirements):
494 def _getsharedvfs(hgvfs, requirements):
491 """returns the vfs object pointing to root of shared source
495 """returns the vfs object pointing to root of shared source
492 repo for a shared repository
496 repo for a shared repository
493
497
494 hgvfs is vfs pointing at .hg/ of current repo (shared one)
498 hgvfs is vfs pointing at .hg/ of current repo (shared one)
495 requirements is a set of requirements of current repo (shared one)
499 requirements is a set of requirements of current repo (shared one)
496 """
500 """
497 # The ``shared`` or ``relshared`` requirements indicate the
501 # The ``shared`` or ``relshared`` requirements indicate the
498 # store lives in the path contained in the ``.hg/sharedpath`` file.
502 # store lives in the path contained in the ``.hg/sharedpath`` file.
499 # This is an absolute path for ``shared`` and relative to
503 # This is an absolute path for ``shared`` and relative to
500 # ``.hg/`` for ``relshared``.
504 # ``.hg/`` for ``relshared``.
501 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
505 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
502 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
506 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
503 sharedpath = util.normpath(hgvfs.join(sharedpath))
507 sharedpath = util.normpath(hgvfs.join(sharedpath))
504
508
505 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
509 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
506
510
507 if not sharedvfs.exists():
511 if not sharedvfs.exists():
508 raise error.RepoError(
512 raise error.RepoError(
509 _(b'.hg/sharedpath points to nonexistent directory %s')
513 _(b'.hg/sharedpath points to nonexistent directory %s')
510 % sharedvfs.base
514 % sharedvfs.base
511 )
515 )
512 return sharedvfs
516 return sharedvfs
513
517
514
518
515 def _readrequires(vfs, allowmissing):
519 def _readrequires(vfs, allowmissing):
516 """reads the require file present at root of this vfs
520 """reads the require file present at root of this vfs
517 and return a set of requirements
521 and return a set of requirements
518
522
519 If allowmissing is True, we suppress FileNotFoundError if raised"""
523 If allowmissing is True, we suppress FileNotFoundError if raised"""
520 # requires file contains a newline-delimited list of
524 # requires file contains a newline-delimited list of
521 # features/capabilities the opener (us) must have in order to use
525 # features/capabilities the opener (us) must have in order to use
522 # the repository. This file was introduced in Mercurial 0.9.2,
526 # the repository. This file was introduced in Mercurial 0.9.2,
523 # which means very old repositories may not have one. We assume
527 # which means very old repositories may not have one. We assume
524 # a missing file translates to no requirements.
528 # a missing file translates to no requirements.
525 read = vfs.tryread if allowmissing else vfs.read
529 read = vfs.tryread if allowmissing else vfs.read
526 return set(read(b'requires').splitlines())
530 return set(read(b'requires').splitlines())
527
531
528
532
529 def makelocalrepository(baseui, path, intents=None):
533 def makelocalrepository(baseui, path: bytes, intents=None):
530 """Create a local repository object.
534 """Create a local repository object.
531
535
532 Given arguments needed to construct a local repository, this function
536 Given arguments needed to construct a local repository, this function
533 performs various early repository loading functionality (such as
537 performs various early repository loading functionality (such as
534 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
538 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
535 the repository can be opened, derives a type suitable for representing
539 the repository can be opened, derives a type suitable for representing
536 that repository, and returns an instance of it.
540 that repository, and returns an instance of it.
537
541
538 The returned object conforms to the ``repository.completelocalrepository``
542 The returned object conforms to the ``repository.completelocalrepository``
539 interface.
543 interface.
540
544
541 The repository type is derived by calling a series of factory functions
545 The repository type is derived by calling a series of factory functions
542 for each aspect/interface of the final repository. These are defined by
546 for each aspect/interface of the final repository. These are defined by
543 ``REPO_INTERFACES``.
547 ``REPO_INTERFACES``.
544
548
545 Each factory function is called to produce a type implementing a specific
549 Each factory function is called to produce a type implementing a specific
546 interface. The cumulative list of returned types will be combined into a
550 interface. The cumulative list of returned types will be combined into a
547 new type and that type will be instantiated to represent the local
551 new type and that type will be instantiated to represent the local
548 repository.
552 repository.
549
553
550 The factory functions each receive various state that may be consulted
554 The factory functions each receive various state that may be consulted
551 as part of deriving a type.
555 as part of deriving a type.
552
556
553 Extensions should wrap these factory functions to customize repository type
557 Extensions should wrap these factory functions to customize repository type
554 creation. Note that an extension's wrapped function may be called even if
558 creation. Note that an extension's wrapped function may be called even if
555 that extension is not loaded for the repo being constructed. Extensions
559 that extension is not loaded for the repo being constructed. Extensions
556 should check if their ``__name__`` appears in the
560 should check if their ``__name__`` appears in the
557 ``extensionmodulenames`` set passed to the factory function and no-op if
561 ``extensionmodulenames`` set passed to the factory function and no-op if
558 not.
562 not.
559 """
563 """
560 ui = baseui.copy()
564 ui = baseui.copy()
561 # Prevent copying repo configuration.
565 # Prevent copying repo configuration.
562 ui.copy = baseui.copy
566 ui.copy = baseui.copy
563
567
564 # Working directory VFS rooted at repository root.
568 # Working directory VFS rooted at repository root.
565 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
569 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
566
570
567 # Main VFS for .hg/ directory.
571 # Main VFS for .hg/ directory.
568 hgpath = wdirvfs.join(b'.hg')
572 hgpath = wdirvfs.join(b'.hg')
569 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
573 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
570 # Whether this repository is shared one or not
574 # Whether this repository is shared one or not
571 shared = False
575 shared = False
572 # If this repository is shared, vfs pointing to shared repo
576 # If this repository is shared, vfs pointing to shared repo
573 sharedvfs = None
577 sharedvfs = None
574
578
575 # The .hg/ path should exist and should be a directory. All other
579 # The .hg/ path should exist and should be a directory. All other
576 # cases are errors.
580 # cases are errors.
577 if not hgvfs.isdir():
581 if not hgvfs.isdir():
578 try:
582 try:
579 hgvfs.stat()
583 hgvfs.stat()
580 except FileNotFoundError:
584 except FileNotFoundError:
581 pass
585 pass
582 except ValueError as e:
586 except ValueError as e:
583 # Can be raised on Python 3.8 when path is invalid.
587 # Can be raised on Python 3.8 when path is invalid.
584 raise error.Abort(
588 raise error.Abort(
585 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
589 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
586 )
590 )
587
591
588 raise error.RepoError(_(b'repository %s not found') % path)
592 raise error.RepoError(_(b'repository %s not found') % path)
589
593
590 requirements = _readrequires(hgvfs, True)
594 requirements = _readrequires(hgvfs, True)
591 shared = (
595 shared = (
592 requirementsmod.SHARED_REQUIREMENT in requirements
596 requirementsmod.SHARED_REQUIREMENT in requirements
593 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
597 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
594 )
598 )
595 storevfs = None
599 storevfs = None
596 if shared:
600 if shared:
597 # This is a shared repo
601 # This is a shared repo
598 sharedvfs = _getsharedvfs(hgvfs, requirements)
602 sharedvfs = _getsharedvfs(hgvfs, requirements)
599 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
603 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
600 else:
604 else:
601 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
605 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
602
606
603 # if .hg/requires contains the sharesafe requirement, it means
607 # if .hg/requires contains the sharesafe requirement, it means
604 # there exists a `.hg/store/requires` too and we should read it
608 # there exists a `.hg/store/requires` too and we should read it
605 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
609 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
606 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
610 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
607 # is not present, refer checkrequirementscompat() for that
611 # is not present, refer checkrequirementscompat() for that
608 #
612 #
609 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
613 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
610 # repository was shared the old way. We check the share source .hg/requires
614 # repository was shared the old way. We check the share source .hg/requires
611 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
615 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
612 # to be reshared
616 # to be reshared
613 hint = _(b"see `hg help config.format.use-share-safe` for more information")
617 hint = _(b"see `hg help config.format.use-share-safe` for more information")
614 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
618 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
615
619
616 if (
620 if (
617 shared
621 shared
618 and requirementsmod.SHARESAFE_REQUIREMENT
622 and requirementsmod.SHARESAFE_REQUIREMENT
619 not in _readrequires(sharedvfs, True)
623 not in _readrequires(sharedvfs, True)
620 ):
624 ):
621 mismatch_warn = ui.configbool(
625 mismatch_warn = ui.configbool(
622 b'share', b'safe-mismatch.source-not-safe.warn'
626 b'share', b'safe-mismatch.source-not-safe.warn'
623 )
627 )
624 mismatch_config = ui.config(
628 mismatch_config = ui.config(
625 b'share', b'safe-mismatch.source-not-safe'
629 b'share', b'safe-mismatch.source-not-safe'
626 )
630 )
627 mismatch_verbose_upgrade = ui.configbool(
631 mismatch_verbose_upgrade = ui.configbool(
628 b'share', b'safe-mismatch.source-not-safe:verbose-upgrade'
632 b'share', b'safe-mismatch.source-not-safe:verbose-upgrade'
629 )
633 )
630 if mismatch_config in (
634 if mismatch_config in (
631 b'downgrade-allow',
635 b'downgrade-allow',
632 b'allow',
636 b'allow',
633 b'downgrade-abort',
637 b'downgrade-abort',
634 ):
638 ):
635 # prevent cyclic import localrepo -> upgrade -> localrepo
639 # prevent cyclic import localrepo -> upgrade -> localrepo
636 from . import upgrade
640 from . import upgrade
637
641
638 upgrade.downgrade_share_to_non_safe(
642 upgrade.downgrade_share_to_non_safe(
639 ui,
643 ui,
640 hgvfs,
644 hgvfs,
641 sharedvfs,
645 sharedvfs,
642 requirements,
646 requirements,
643 mismatch_config,
647 mismatch_config,
644 mismatch_warn,
648 mismatch_warn,
645 mismatch_verbose_upgrade,
649 mismatch_verbose_upgrade,
646 )
650 )
647 elif mismatch_config == b'abort':
651 elif mismatch_config == b'abort':
648 raise error.Abort(
652 raise error.Abort(
649 _(b"share source does not support share-safe requirement"),
653 _(b"share source does not support share-safe requirement"),
650 hint=hint,
654 hint=hint,
651 )
655 )
652 else:
656 else:
653 raise error.Abort(
657 raise error.Abort(
654 _(
658 _(
655 b"share-safe mismatch with source.\nUnrecognized"
659 b"share-safe mismatch with source.\nUnrecognized"
656 b" value '%s' of `share.safe-mismatch.source-not-safe`"
660 b" value '%s' of `share.safe-mismatch.source-not-safe`"
657 b" set."
661 b" set."
658 )
662 )
659 % mismatch_config,
663 % mismatch_config,
660 hint=hint,
664 hint=hint,
661 )
665 )
662 else:
666 else:
663 requirements |= _readrequires(storevfs, False)
667 requirements |= _readrequires(storevfs, False)
664 elif shared:
668 elif shared:
665 sourcerequires = _readrequires(sharedvfs, False)
669 sourcerequires = _readrequires(sharedvfs, False)
666 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
670 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
667 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
671 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
668 mismatch_warn = ui.configbool(
672 mismatch_warn = ui.configbool(
669 b'share', b'safe-mismatch.source-safe.warn'
673 b'share', b'safe-mismatch.source-safe.warn'
670 )
674 )
671 mismatch_verbose_upgrade = ui.configbool(
675 mismatch_verbose_upgrade = ui.configbool(
672 b'share', b'safe-mismatch.source-safe:verbose-upgrade'
676 b'share', b'safe-mismatch.source-safe:verbose-upgrade'
673 )
677 )
674 if mismatch_config in (
678 if mismatch_config in (
675 b'upgrade-allow',
679 b'upgrade-allow',
676 b'allow',
680 b'allow',
677 b'upgrade-abort',
681 b'upgrade-abort',
678 ):
682 ):
679 # prevent cyclic import localrepo -> upgrade -> localrepo
683 # prevent cyclic import localrepo -> upgrade -> localrepo
680 from . import upgrade
684 from . import upgrade
681
685
682 upgrade.upgrade_share_to_safe(
686 upgrade.upgrade_share_to_safe(
683 ui,
687 ui,
684 hgvfs,
688 hgvfs,
685 storevfs,
689 storevfs,
686 requirements,
690 requirements,
687 mismatch_config,
691 mismatch_config,
688 mismatch_warn,
692 mismatch_warn,
689 mismatch_verbose_upgrade,
693 mismatch_verbose_upgrade,
690 )
694 )
691 elif mismatch_config == b'abort':
695 elif mismatch_config == b'abort':
692 raise error.Abort(
696 raise error.Abort(
693 _(
697 _(
694 b'version mismatch: source uses share-safe'
698 b'version mismatch: source uses share-safe'
695 b' functionality while the current share does not'
699 b' functionality while the current share does not'
696 ),
700 ),
697 hint=hint,
701 hint=hint,
698 )
702 )
699 else:
703 else:
700 raise error.Abort(
704 raise error.Abort(
701 _(
705 _(
702 b"share-safe mismatch with source.\nUnrecognized"
706 b"share-safe mismatch with source.\nUnrecognized"
703 b" value '%s' of `share.safe-mismatch.source-safe` set."
707 b" value '%s' of `share.safe-mismatch.source-safe` set."
704 )
708 )
705 % mismatch_config,
709 % mismatch_config,
706 hint=hint,
710 hint=hint,
707 )
711 )
708
712
709 # The .hg/hgrc file may load extensions or contain config options
713 # The .hg/hgrc file may load extensions or contain config options
710 # that influence repository construction. Attempt to load it and
714 # that influence repository construction. Attempt to load it and
711 # process any new extensions that it may have pulled in.
715 # process any new extensions that it may have pulled in.
712 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
716 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
713 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
717 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
714 extensions.loadall(ui)
718 extensions.loadall(ui)
715 extensions.populateui(ui)
719 extensions.populateui(ui)
716
720
717 # Set of module names of extensions loaded for this repository.
721 # Set of module names of extensions loaded for this repository.
718 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
722 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
719
723
720 supportedrequirements = gathersupportedrequirements(ui)
724 supportedrequirements = gathersupportedrequirements(ui)
721
725
722 # We first validate the requirements are known.
726 # We first validate the requirements are known.
723 ensurerequirementsrecognized(requirements, supportedrequirements)
727 ensurerequirementsrecognized(requirements, supportedrequirements)
724
728
725 # Then we validate that the known set is reasonable to use together.
729 # Then we validate that the known set is reasonable to use together.
726 ensurerequirementscompatible(ui, requirements)
730 ensurerequirementscompatible(ui, requirements)
727
731
728 # TODO there are unhandled edge cases related to opening repositories with
732 # TODO there are unhandled edge cases related to opening repositories with
729 # shared storage. If storage is shared, we should also test for requirements
733 # shared storage. If storage is shared, we should also test for requirements
730 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
734 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
731 # that repo, as that repo may load extensions needed to open it. This is a
735 # that repo, as that repo may load extensions needed to open it. This is a
732 # bit complicated because we don't want the other hgrc to overwrite settings
736 # bit complicated because we don't want the other hgrc to overwrite settings
733 # in this hgrc.
737 # in this hgrc.
734 #
738 #
735 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
739 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
736 # file when sharing repos. But if a requirement is added after the share is
740 # file when sharing repos. But if a requirement is added after the share is
737 # performed, thereby introducing a new requirement for the opener, we may
741 # performed, thereby introducing a new requirement for the opener, we may
738 # will not see that and could encounter a run-time error interacting with
742 # will not see that and could encounter a run-time error interacting with
739 # that shared store since it has an unknown-to-us requirement.
743 # that shared store since it has an unknown-to-us requirement.
740
744
741 # At this point, we know we should be capable of opening the repository.
745 # At this point, we know we should be capable of opening the repository.
742 # Now get on with doing that.
746 # Now get on with doing that.
743
747
744 features = set()
748 features = set()
745
749
746 # The "store" part of the repository holds versioned data. How it is
750 # The "store" part of the repository holds versioned data. How it is
747 # accessed is determined by various requirements. If `shared` or
751 # accessed is determined by various requirements. If `shared` or
748 # `relshared` requirements are present, this indicates current repository
752 # `relshared` requirements are present, this indicates current repository
749 # is a share and store exists in path mentioned in `.hg/sharedpath`
753 # is a share and store exists in path mentioned in `.hg/sharedpath`
750 if shared:
754 if shared:
751 storebasepath = sharedvfs.base
755 storebasepath = sharedvfs.base
752 cachepath = sharedvfs.join(b'cache')
756 cachepath = sharedvfs.join(b'cache')
753 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
757 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
754 else:
758 else:
755 storebasepath = hgvfs.base
759 storebasepath = hgvfs.base
756 cachepath = hgvfs.join(b'cache')
760 cachepath = hgvfs.join(b'cache')
757 wcachepath = hgvfs.join(b'wcache')
761 wcachepath = hgvfs.join(b'wcache')
758
762
759 # The store has changed over time and the exact layout is dictated by
763 # The store has changed over time and the exact layout is dictated by
760 # requirements. The store interface abstracts differences across all
764 # requirements. The store interface abstracts differences across all
761 # of them.
765 # of them.
762 store = makestore(
766 store = makestore(
763 requirements,
767 requirements,
764 storebasepath,
768 storebasepath,
765 lambda base: vfsmod.vfs(base, cacheaudited=True),
769 lambda base: vfsmod.vfs(base, cacheaudited=True),
766 )
770 )
767 hgvfs.createmode = store.createmode
771 hgvfs.createmode = store.createmode
768
772
769 storevfs = store.vfs
773 storevfs = store.vfs
770 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
774 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
771
775
772 if (
776 if (
773 requirementsmod.REVLOGV2_REQUIREMENT in requirements
777 requirementsmod.REVLOGV2_REQUIREMENT in requirements
774 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
778 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
775 ):
779 ):
776 features.add(repository.REPO_FEATURE_SIDE_DATA)
780 features.add(repository.REPO_FEATURE_SIDE_DATA)
777 # the revlogv2 docket introduced race condition that we need to fix
781 # the revlogv2 docket introduced race condition that we need to fix
778 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
782 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
779
783
780 # The cache vfs is used to manage cache files.
784 # The cache vfs is used to manage cache files.
781 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
785 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
782 cachevfs.createmode = store.createmode
786 cachevfs.createmode = store.createmode
783 # The cache vfs is used to manage cache files related to the working copy
787 # The cache vfs is used to manage cache files related to the working copy
784 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
788 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
785 wcachevfs.createmode = store.createmode
789 wcachevfs.createmode = store.createmode
786
790
787 # Now resolve the type for the repository object. We do this by repeatedly
791 # Now resolve the type for the repository object. We do this by repeatedly
788 # calling a factory function to produces types for specific aspects of the
792 # calling a factory function to produces types for specific aspects of the
789 # repo's operation. The aggregate returned types are used as base classes
793 # repo's operation. The aggregate returned types are used as base classes
790 # for a dynamically-derived type, which will represent our new repository.
794 # for a dynamically-derived type, which will represent our new repository.
791
795
792 bases = []
796 bases = []
793 extrastate = {}
797 extrastate = {}
794
798
795 for iface, fn in REPO_INTERFACES:
799 for iface, fn in REPO_INTERFACES:
796 # We pass all potentially useful state to give extensions tons of
800 # We pass all potentially useful state to give extensions tons of
797 # flexibility.
801 # flexibility.
798 typ = fn()(
802 typ = fn()(
799 ui=ui,
803 ui=ui,
800 intents=intents,
804 intents=intents,
801 requirements=requirements,
805 requirements=requirements,
802 features=features,
806 features=features,
803 wdirvfs=wdirvfs,
807 wdirvfs=wdirvfs,
804 hgvfs=hgvfs,
808 hgvfs=hgvfs,
805 store=store,
809 store=store,
806 storevfs=storevfs,
810 storevfs=storevfs,
807 storeoptions=storevfs.options,
811 storeoptions=storevfs.options,
808 cachevfs=cachevfs,
812 cachevfs=cachevfs,
809 wcachevfs=wcachevfs,
813 wcachevfs=wcachevfs,
810 extensionmodulenames=extensionmodulenames,
814 extensionmodulenames=extensionmodulenames,
811 extrastate=extrastate,
815 extrastate=extrastate,
812 baseclasses=bases,
816 baseclasses=bases,
813 )
817 )
814
818
815 if not isinstance(typ, type):
819 if not isinstance(typ, type):
816 raise error.ProgrammingError(
820 raise error.ProgrammingError(
817 b'unable to construct type for %s' % iface
821 b'unable to construct type for %s' % iface
818 )
822 )
819
823
820 bases.append(typ)
824 bases.append(typ)
821
825
822 # type() allows you to use characters in type names that wouldn't be
826 # type() allows you to use characters in type names that wouldn't be
823 # recognized as Python symbols in source code. We abuse that to add
827 # recognized as Python symbols in source code. We abuse that to add
824 # rich information about our constructed repo.
828 # rich information about our constructed repo.
825 name = pycompat.sysstr(
829 name = pycompat.sysstr(
826 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
830 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
827 )
831 )
828
832
829 cls = type(name, tuple(bases), {})
833 cls = type(name, tuple(bases), {})
830
834
831 return cls(
835 return cls(
832 baseui=baseui,
836 baseui=baseui,
833 ui=ui,
837 ui=ui,
834 origroot=path,
838 origroot=path,
835 wdirvfs=wdirvfs,
839 wdirvfs=wdirvfs,
836 hgvfs=hgvfs,
840 hgvfs=hgvfs,
837 requirements=requirements,
841 requirements=requirements,
838 supportedrequirements=supportedrequirements,
842 supportedrequirements=supportedrequirements,
839 sharedpath=storebasepath,
843 sharedpath=storebasepath,
840 store=store,
844 store=store,
841 cachevfs=cachevfs,
845 cachevfs=cachevfs,
842 wcachevfs=wcachevfs,
846 wcachevfs=wcachevfs,
843 features=features,
847 features=features,
844 intents=intents,
848 intents=intents,
845 )
849 )
846
850
847
851
848 def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None):
852 def loadhgrc(
853 ui,
854 wdirvfs: vfsmod.vfs,
855 hgvfs: vfsmod.vfs,
856 requirements,
857 sharedvfs: Optional[vfsmod.vfs] = None,
858 ):
849 """Load hgrc files/content into a ui instance.
859 """Load hgrc files/content into a ui instance.
850
860
851 This is called during repository opening to load any additional
861 This is called during repository opening to load any additional
852 config files or settings relevant to the current repository.
862 config files or settings relevant to the current repository.
853
863
854 Returns a bool indicating whether any additional configs were loaded.
864 Returns a bool indicating whether any additional configs were loaded.
855
865
856 Extensions should monkeypatch this function to modify how per-repo
866 Extensions should monkeypatch this function to modify how per-repo
857 configs are loaded. For example, an extension may wish to pull in
867 configs are loaded. For example, an extension may wish to pull in
858 configs from alternate files or sources.
868 configs from alternate files or sources.
859
869
860 sharedvfs is vfs object pointing to source repo if the current one is a
870 sharedvfs is vfs object pointing to source repo if the current one is a
861 shared one
871 shared one
862 """
872 """
863 if not rcutil.use_repo_hgrc():
873 if not rcutil.use_repo_hgrc():
864 return False
874 return False
865
875
866 ret = False
876 ret = False
867 # first load config from shared source if we has to
877 # first load config from shared source if we has to
868 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
878 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
869 try:
879 try:
870 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
880 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
871 ret = True
881 ret = True
872 except IOError:
882 except IOError:
873 pass
883 pass
874
884
875 try:
885 try:
876 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
886 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
877 ret = True
887 ret = True
878 except IOError:
888 except IOError:
879 pass
889 pass
880
890
881 try:
891 try:
882 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
892 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
883 ret = True
893 ret = True
884 except IOError:
894 except IOError:
885 pass
895 pass
886
896
887 return ret
897 return ret
888
898
889
899
890 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
900 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
891 """Perform additional actions after .hg/hgrc is loaded.
901 """Perform additional actions after .hg/hgrc is loaded.
892
902
893 This function is called during repository loading immediately after
903 This function is called during repository loading immediately after
894 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
904 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
895
905
896 The function can be used to validate configs, automatically add
906 The function can be used to validate configs, automatically add
897 options (including extensions) based on requirements, etc.
907 options (including extensions) based on requirements, etc.
898 """
908 """
899
909
900 # Map of requirements to list of extensions to load automatically when
910 # Map of requirements to list of extensions to load automatically when
901 # requirement is present.
911 # requirement is present.
902 autoextensions = {
912 autoextensions = {
903 b'git': [b'git'],
913 b'git': [b'git'],
904 b'largefiles': [b'largefiles'],
914 b'largefiles': [b'largefiles'],
905 b'lfs': [b'lfs'],
915 b'lfs': [b'lfs'],
906 }
916 }
907
917
908 for requirement, names in sorted(autoextensions.items()):
918 for requirement, names in sorted(autoextensions.items()):
909 if requirement not in requirements:
919 if requirement not in requirements:
910 continue
920 continue
911
921
912 for name in names:
922 for name in names:
913 if not ui.hasconfig(b'extensions', name):
923 if not ui.hasconfig(b'extensions', name):
914 ui.setconfig(b'extensions', name, b'', source=b'autoload')
924 ui.setconfig(b'extensions', name, b'', source=b'autoload')
915
925
916
926
917 def gathersupportedrequirements(ui):
927 def gathersupportedrequirements(ui):
918 """Determine the complete set of recognized requirements."""
928 """Determine the complete set of recognized requirements."""
919 # Start with all requirements supported by this file.
929 # Start with all requirements supported by this file.
920 supported = set(localrepository._basesupported)
930 supported = set(localrepository._basesupported)
921
931
922 # Execute ``featuresetupfuncs`` entries if they belong to an extension
932 # Execute ``featuresetupfuncs`` entries if they belong to an extension
923 # relevant to this ui instance.
933 # relevant to this ui instance.
924 modules = {m.__name__ for n, m in extensions.extensions(ui)}
934 modules = {m.__name__ for n, m in extensions.extensions(ui)}
925
935
926 for fn in featuresetupfuncs:
936 for fn in featuresetupfuncs:
927 if fn.__module__ in modules:
937 if fn.__module__ in modules:
928 fn(ui, supported)
938 fn(ui, supported)
929
939
930 # Add derived requirements from registered compression engines.
940 # Add derived requirements from registered compression engines.
931 for name in util.compengines:
941 for name in util.compengines:
932 engine = util.compengines[name]
942 engine = util.compengines[name]
933 if engine.available() and engine.revlogheader():
943 if engine.available() and engine.revlogheader():
934 supported.add(b'exp-compression-%s' % name)
944 supported.add(b'exp-compression-%s' % name)
935 if engine.name() == b'zstd':
945 if engine.name() == b'zstd':
936 supported.add(requirementsmod.REVLOG_COMPRESSION_ZSTD)
946 supported.add(requirementsmod.REVLOG_COMPRESSION_ZSTD)
937
947
938 return supported
948 return supported
939
949
940
950
941 def ensurerequirementsrecognized(requirements, supported):
951 def ensurerequirementsrecognized(requirements, supported):
942 """Validate that a set of local requirements is recognized.
952 """Validate that a set of local requirements is recognized.
943
953
944 Receives a set of requirements. Raises an ``error.RepoError`` if there
954 Receives a set of requirements. Raises an ``error.RepoError`` if there
945 exists any requirement in that set that currently loaded code doesn't
955 exists any requirement in that set that currently loaded code doesn't
946 recognize.
956 recognize.
947
957
948 Returns a set of supported requirements.
958 Returns a set of supported requirements.
949 """
959 """
950 missing = set()
960 missing = set()
951
961
952 for requirement in requirements:
962 for requirement in requirements:
953 if requirement in supported:
963 if requirement in supported:
954 continue
964 continue
955
965
956 if not requirement or not requirement[0:1].isalnum():
966 if not requirement or not requirement[0:1].isalnum():
957 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
967 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
958
968
959 missing.add(requirement)
969 missing.add(requirement)
960
970
961 if missing:
971 if missing:
962 raise error.RequirementError(
972 raise error.RequirementError(
963 _(b'repository requires features unknown to this Mercurial: %s')
973 _(b'repository requires features unknown to this Mercurial: %s')
964 % b' '.join(sorted(missing)),
974 % b' '.join(sorted(missing)),
965 hint=_(
975 hint=_(
966 b'see https://mercurial-scm.org/wiki/MissingRequirement '
976 b'see https://mercurial-scm.org/wiki/MissingRequirement '
967 b'for more information'
977 b'for more information'
968 ),
978 ),
969 )
979 )
970
980
971
981
972 def ensurerequirementscompatible(ui, requirements):
982 def ensurerequirementscompatible(ui, requirements):
973 """Validates that a set of recognized requirements is mutually compatible.
983 """Validates that a set of recognized requirements is mutually compatible.
974
984
975 Some requirements may not be compatible with others or require
985 Some requirements may not be compatible with others or require
976 config options that aren't enabled. This function is called during
986 config options that aren't enabled. This function is called during
977 repository opening to ensure that the set of requirements needed
987 repository opening to ensure that the set of requirements needed
978 to open a repository is sane and compatible with config options.
988 to open a repository is sane and compatible with config options.
979
989
980 Extensions can monkeypatch this function to perform additional
990 Extensions can monkeypatch this function to perform additional
981 checking.
991 checking.
982
992
983 ``error.RepoError`` should be raised on failure.
993 ``error.RepoError`` should be raised on failure.
984 """
994 """
985 if (
995 if (
986 requirementsmod.SPARSE_REQUIREMENT in requirements
996 requirementsmod.SPARSE_REQUIREMENT in requirements
987 and not sparse.enabled
997 and not sparse.enabled
988 ):
998 ):
989 raise error.RepoError(
999 raise error.RepoError(
990 _(
1000 _(
991 b'repository is using sparse feature but '
1001 b'repository is using sparse feature but '
992 b'sparse is not enabled; enable the '
1002 b'sparse is not enabled; enable the '
993 b'"sparse" extensions to access'
1003 b'"sparse" extensions to access'
994 )
1004 )
995 )
1005 )
996
1006
997
1007
998 def makestore(requirements, path, vfstype):
1008 def makestore(requirements, path, vfstype):
999 """Construct a storage object for a repository."""
1009 """Construct a storage object for a repository."""
1000 if requirementsmod.STORE_REQUIREMENT in requirements:
1010 if requirementsmod.STORE_REQUIREMENT in requirements:
1001 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
1011 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
1002 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1012 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1003 return storemod.fncachestore(path, vfstype, dotencode)
1013 return storemod.fncachestore(path, vfstype, dotencode)
1004
1014
1005 return storemod.encodedstore(path, vfstype)
1015 return storemod.encodedstore(path, vfstype)
1006
1016
1007 return storemod.basicstore(path, vfstype)
1017 return storemod.basicstore(path, vfstype)
1008
1018
1009
1019
1010 def resolvestorevfsoptions(ui, requirements, features):
1020 def resolvestorevfsoptions(ui, requirements, features):
1011 """Resolve the options to pass to the store vfs opener.
1021 """Resolve the options to pass to the store vfs opener.
1012
1022
1013 The returned dict is used to influence behavior of the storage layer.
1023 The returned dict is used to influence behavior of the storage layer.
1014 """
1024 """
1015 options = {}
1025 options = {}
1016
1026
1017 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1027 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1018 options[b'treemanifest'] = True
1028 options[b'treemanifest'] = True
1019
1029
1020 # experimental config: format.manifestcachesize
1030 # experimental config: format.manifestcachesize
1021 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1031 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1022 if manifestcachesize is not None:
1032 if manifestcachesize is not None:
1023 options[b'manifestcachesize'] = manifestcachesize
1033 options[b'manifestcachesize'] = manifestcachesize
1024
1034
1025 # In the absence of another requirement superseding a revlog-related
1035 # In the absence of another requirement superseding a revlog-related
1026 # requirement, we have to assume the repo is using revlog version 0.
1036 # requirement, we have to assume the repo is using revlog version 0.
1027 # This revlog format is super old and we don't bother trying to parse
1037 # This revlog format is super old and we don't bother trying to parse
1028 # opener options for it because those options wouldn't do anything
1038 # opener options for it because those options wouldn't do anything
1029 # meaningful on such old repos.
1039 # meaningful on such old repos.
1030 if (
1040 if (
1031 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1041 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1032 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1042 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1033 ):
1043 ):
1034 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1044 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1035 else: # explicitly mark repo as using revlogv0
1045 else: # explicitly mark repo as using revlogv0
1036 options[b'revlogv0'] = True
1046 options[b'revlogv0'] = True
1037
1047
1038 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1048 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1039 options[b'copies-storage'] = b'changeset-sidedata'
1049 options[b'copies-storage'] = b'changeset-sidedata'
1040 else:
1050 else:
1041 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1051 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1042 copiesextramode = (b'changeset-only', b'compatibility')
1052 copiesextramode = (b'changeset-only', b'compatibility')
1043 if writecopiesto in copiesextramode:
1053 if writecopiesto in copiesextramode:
1044 options[b'copies-storage'] = b'extra'
1054 options[b'copies-storage'] = b'extra'
1045
1055
1046 return options
1056 return options
1047
1057
1048
1058
1049 def resolverevlogstorevfsoptions(ui, requirements, features):
1059 def resolverevlogstorevfsoptions(ui, requirements, features):
1050 """Resolve opener options specific to revlogs."""
1060 """Resolve opener options specific to revlogs."""
1051
1061
1052 options = {}
1062 options = {}
1053 options[b'flagprocessors'] = {}
1063 options[b'flagprocessors'] = {}
1054
1064
1055 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1065 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1056 options[b'revlogv1'] = True
1066 options[b'revlogv1'] = True
1057 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1067 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1058 options[b'revlogv2'] = True
1068 options[b'revlogv2'] = True
1059 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1069 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1060 options[b'changelogv2'] = True
1070 options[b'changelogv2'] = True
1061
1071
1062 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1072 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1063 options[b'generaldelta'] = True
1073 options[b'generaldelta'] = True
1064
1074
1065 # experimental config: format.chunkcachesize
1075 # experimental config: format.chunkcachesize
1066 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1076 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1067 if chunkcachesize is not None:
1077 if chunkcachesize is not None:
1068 options[b'chunkcachesize'] = chunkcachesize
1078 options[b'chunkcachesize'] = chunkcachesize
1069
1079
1070 deltabothparents = ui.configbool(
1080 deltabothparents = ui.configbool(
1071 b'storage', b'revlog.optimize-delta-parent-choice'
1081 b'storage', b'revlog.optimize-delta-parent-choice'
1072 )
1082 )
1073 options[b'deltabothparents'] = deltabothparents
1083 options[b'deltabothparents'] = deltabothparents
1074 options[b'debug-delta'] = ui.configbool(b'debug', b'revlog.debug-delta')
1084 options[b'debug-delta'] = ui.configbool(b'debug', b'revlog.debug-delta')
1075
1085
1076 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1086 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1077 options[b'issue6528.fix-incoming'] = issue6528
1087 options[b'issue6528.fix-incoming'] = issue6528
1078
1088
1079 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1089 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1080 lazydeltabase = False
1090 lazydeltabase = False
1081 if lazydelta:
1091 if lazydelta:
1082 lazydeltabase = ui.configbool(
1092 lazydeltabase = ui.configbool(
1083 b'storage', b'revlog.reuse-external-delta-parent'
1093 b'storage', b'revlog.reuse-external-delta-parent'
1084 )
1094 )
1085 if lazydeltabase is None:
1095 if lazydeltabase is None:
1086 lazydeltabase = not scmutil.gddeltaconfig(ui)
1096 lazydeltabase = not scmutil.gddeltaconfig(ui)
1087 options[b'lazydelta'] = lazydelta
1097 options[b'lazydelta'] = lazydelta
1088 options[b'lazydeltabase'] = lazydeltabase
1098 options[b'lazydeltabase'] = lazydeltabase
1089
1099
1090 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1100 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1091 if 0 <= chainspan:
1101 if 0 <= chainspan:
1092 options[b'maxdeltachainspan'] = chainspan
1102 options[b'maxdeltachainspan'] = chainspan
1093
1103
1094 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1104 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1095 if mmapindexthreshold is not None:
1105 if mmapindexthreshold is not None:
1096 options[b'mmapindexthreshold'] = mmapindexthreshold
1106 options[b'mmapindexthreshold'] = mmapindexthreshold
1097
1107
1098 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1108 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1099 srdensitythres = float(
1109 srdensitythres = float(
1100 ui.config(b'experimental', b'sparse-read.density-threshold')
1110 ui.config(b'experimental', b'sparse-read.density-threshold')
1101 )
1111 )
1102 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1112 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1103 options[b'with-sparse-read'] = withsparseread
1113 options[b'with-sparse-read'] = withsparseread
1104 options[b'sparse-read-density-threshold'] = srdensitythres
1114 options[b'sparse-read-density-threshold'] = srdensitythres
1105 options[b'sparse-read-min-gap-size'] = srmingapsize
1115 options[b'sparse-read-min-gap-size'] = srmingapsize
1106
1116
1107 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1117 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1108 options[b'sparse-revlog'] = sparserevlog
1118 options[b'sparse-revlog'] = sparserevlog
1109 if sparserevlog:
1119 if sparserevlog:
1110 options[b'generaldelta'] = True
1120 options[b'generaldelta'] = True
1111
1121
1112 maxchainlen = None
1122 maxchainlen = None
1113 if sparserevlog:
1123 if sparserevlog:
1114 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1124 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1115 # experimental config: format.maxchainlen
1125 # experimental config: format.maxchainlen
1116 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1126 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1117 if maxchainlen is not None:
1127 if maxchainlen is not None:
1118 options[b'maxchainlen'] = maxchainlen
1128 options[b'maxchainlen'] = maxchainlen
1119
1129
1120 for r in requirements:
1130 for r in requirements:
1121 # we allow multiple compression engine requirement to co-exist because
1131 # we allow multiple compression engine requirement to co-exist because
1122 # strickly speaking, revlog seems to support mixed compression style.
1132 # strickly speaking, revlog seems to support mixed compression style.
1123 #
1133 #
1124 # The compression used for new entries will be "the last one"
1134 # The compression used for new entries will be "the last one"
1125 prefix = r.startswith
1135 prefix = r.startswith
1126 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1136 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1127 options[b'compengine'] = r.split(b'-', 2)[2]
1137 options[b'compengine'] = r.split(b'-', 2)[2]
1128
1138
1129 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1139 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1130 if options[b'zlib.level'] is not None:
1140 if options[b'zlib.level'] is not None:
1131 if not (0 <= options[b'zlib.level'] <= 9):
1141 if not (0 <= options[b'zlib.level'] <= 9):
1132 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1142 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1133 raise error.Abort(msg % options[b'zlib.level'])
1143 raise error.Abort(msg % options[b'zlib.level'])
1134 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1144 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1135 if options[b'zstd.level'] is not None:
1145 if options[b'zstd.level'] is not None:
1136 if not (0 <= options[b'zstd.level'] <= 22):
1146 if not (0 <= options[b'zstd.level'] <= 22):
1137 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1147 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1138 raise error.Abort(msg % options[b'zstd.level'])
1148 raise error.Abort(msg % options[b'zstd.level'])
1139
1149
1140 if requirementsmod.NARROW_REQUIREMENT in requirements:
1150 if requirementsmod.NARROW_REQUIREMENT in requirements:
1141 options[b'enableellipsis'] = True
1151 options[b'enableellipsis'] = True
1142
1152
1143 if ui.configbool(b'experimental', b'rust.index'):
1153 if ui.configbool(b'experimental', b'rust.index'):
1144 options[b'rust.index'] = True
1154 options[b'rust.index'] = True
1145 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1155 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1146 slow_path = ui.config(
1156 slow_path = ui.config(
1147 b'storage', b'revlog.persistent-nodemap.slow-path'
1157 b'storage', b'revlog.persistent-nodemap.slow-path'
1148 )
1158 )
1149 if slow_path not in (b'allow', b'warn', b'abort'):
1159 if slow_path not in (b'allow', b'warn', b'abort'):
1150 default = ui.config_default(
1160 default = ui.config_default(
1151 b'storage', b'revlog.persistent-nodemap.slow-path'
1161 b'storage', b'revlog.persistent-nodemap.slow-path'
1152 )
1162 )
1153 msg = _(
1163 msg = _(
1154 b'unknown value for config '
1164 b'unknown value for config '
1155 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1165 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1156 )
1166 )
1157 ui.warn(msg % slow_path)
1167 ui.warn(msg % slow_path)
1158 if not ui.quiet:
1168 if not ui.quiet:
1159 ui.warn(_(b'falling back to default value: %s\n') % default)
1169 ui.warn(_(b'falling back to default value: %s\n') % default)
1160 slow_path = default
1170 slow_path = default
1161
1171
1162 msg = _(
1172 msg = _(
1163 b"accessing `persistent-nodemap` repository without associated "
1173 b"accessing `persistent-nodemap` repository without associated "
1164 b"fast implementation."
1174 b"fast implementation."
1165 )
1175 )
1166 hint = _(
1176 hint = _(
1167 b"check `hg help config.format.use-persistent-nodemap` "
1177 b"check `hg help config.format.use-persistent-nodemap` "
1168 b"for details"
1178 b"for details"
1169 )
1179 )
1170 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1180 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1171 if slow_path == b'warn':
1181 if slow_path == b'warn':
1172 msg = b"warning: " + msg + b'\n'
1182 msg = b"warning: " + msg + b'\n'
1173 ui.warn(msg)
1183 ui.warn(msg)
1174 if not ui.quiet:
1184 if not ui.quiet:
1175 hint = b'(' + hint + b')\n'
1185 hint = b'(' + hint + b')\n'
1176 ui.warn(hint)
1186 ui.warn(hint)
1177 if slow_path == b'abort':
1187 if slow_path == b'abort':
1178 raise error.Abort(msg, hint=hint)
1188 raise error.Abort(msg, hint=hint)
1179 options[b'persistent-nodemap'] = True
1189 options[b'persistent-nodemap'] = True
1180 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1190 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1181 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1191 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1182 if slow_path not in (b'allow', b'warn', b'abort'):
1192 if slow_path not in (b'allow', b'warn', b'abort'):
1183 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1193 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1184 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1194 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1185 ui.warn(msg % slow_path)
1195 ui.warn(msg % slow_path)
1186 if not ui.quiet:
1196 if not ui.quiet:
1187 ui.warn(_(b'falling back to default value: %s\n') % default)
1197 ui.warn(_(b'falling back to default value: %s\n') % default)
1188 slow_path = default
1198 slow_path = default
1189
1199
1190 msg = _(
1200 msg = _(
1191 b"accessing `dirstate-v2` repository without associated "
1201 b"accessing `dirstate-v2` repository without associated "
1192 b"fast implementation."
1202 b"fast implementation."
1193 )
1203 )
1194 hint = _(
1204 hint = _(
1195 b"check `hg help config.format.use-dirstate-v2` " b"for details"
1205 b"check `hg help config.format.use-dirstate-v2` " b"for details"
1196 )
1206 )
1197 if not dirstate.HAS_FAST_DIRSTATE_V2:
1207 if not dirstate.HAS_FAST_DIRSTATE_V2:
1198 if slow_path == b'warn':
1208 if slow_path == b'warn':
1199 msg = b"warning: " + msg + b'\n'
1209 msg = b"warning: " + msg + b'\n'
1200 ui.warn(msg)
1210 ui.warn(msg)
1201 if not ui.quiet:
1211 if not ui.quiet:
1202 hint = b'(' + hint + b')\n'
1212 hint = b'(' + hint + b')\n'
1203 ui.warn(hint)
1213 ui.warn(hint)
1204 if slow_path == b'abort':
1214 if slow_path == b'abort':
1205 raise error.Abort(msg, hint=hint)
1215 raise error.Abort(msg, hint=hint)
1206 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1216 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1207 options[b'persistent-nodemap.mmap'] = True
1217 options[b'persistent-nodemap.mmap'] = True
1208 if ui.configbool(b'devel', b'persistent-nodemap'):
1218 if ui.configbool(b'devel', b'persistent-nodemap'):
1209 options[b'devel-force-nodemap'] = True
1219 options[b'devel-force-nodemap'] = True
1210
1220
1211 return options
1221 return options
1212
1222
1213
1223
1214 def makemain(**kwargs):
1224 def makemain(**kwargs):
1215 """Produce a type conforming to ``ilocalrepositorymain``."""
1225 """Produce a type conforming to ``ilocalrepositorymain``."""
1216 return localrepository
1226 return localrepository
1217
1227
1218
1228
1219 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1229 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1220 class revlogfilestorage:
1230 class revlogfilestorage:
1221 """File storage when using revlogs."""
1231 """File storage when using revlogs."""
1222
1232
1223 def file(self, path):
1233 def file(self, path):
1224 if path.startswith(b'/'):
1234 if path.startswith(b'/'):
1225 path = path[1:]
1235 path = path[1:]
1226
1236
1227 return filelog.filelog(self.svfs, path)
1237 return filelog.filelog(self.svfs, path)
1228
1238
1229
1239
1230 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1240 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1231 class revlognarrowfilestorage:
1241 class revlognarrowfilestorage:
1232 """File storage when using revlogs and narrow files."""
1242 """File storage when using revlogs and narrow files."""
1233
1243
1234 def file(self, path):
1244 def file(self, path):
1235 if path.startswith(b'/'):
1245 if path.startswith(b'/'):
1236 path = path[1:]
1246 path = path[1:]
1237
1247
1238 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1248 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1239
1249
1240
1250
1241 def makefilestorage(requirements, features, **kwargs):
1251 def makefilestorage(requirements, features, **kwargs):
1242 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1252 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1243 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1253 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1244 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1254 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1245
1255
1246 if requirementsmod.NARROW_REQUIREMENT in requirements:
1256 if requirementsmod.NARROW_REQUIREMENT in requirements:
1247 return revlognarrowfilestorage
1257 return revlognarrowfilestorage
1248 else:
1258 else:
1249 return revlogfilestorage
1259 return revlogfilestorage
1250
1260
1251
1261
1252 # List of repository interfaces and factory functions for them. Each
1262 # List of repository interfaces and factory functions for them. Each
1253 # will be called in order during ``makelocalrepository()`` to iteratively
1263 # will be called in order during ``makelocalrepository()`` to iteratively
1254 # derive the final type for a local repository instance. We capture the
1264 # derive the final type for a local repository instance. We capture the
1255 # function as a lambda so we don't hold a reference and the module-level
1265 # function as a lambda so we don't hold a reference and the module-level
1256 # functions can be wrapped.
1266 # functions can be wrapped.
1257 REPO_INTERFACES = [
1267 REPO_INTERFACES = [
1258 (repository.ilocalrepositorymain, lambda: makemain),
1268 (repository.ilocalrepositorymain, lambda: makemain),
1259 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1269 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1260 ]
1270 ]
1261
1271
1262
1272
1263 @interfaceutil.implementer(repository.ilocalrepositorymain)
1273 @interfaceutil.implementer(repository.ilocalrepositorymain)
1264 class localrepository:
1274 class localrepository:
1265 """Main class for representing local repositories.
1275 """Main class for representing local repositories.
1266
1276
1267 All local repositories are instances of this class.
1277 All local repositories are instances of this class.
1268
1278
1269 Constructed on its own, instances of this class are not usable as
1279 Constructed on its own, instances of this class are not usable as
1270 repository objects. To obtain a usable repository object, call
1280 repository objects. To obtain a usable repository object, call
1271 ``hg.repository()``, ``localrepo.instance()``, or
1281 ``hg.repository()``, ``localrepo.instance()``, or
1272 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1282 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1273 ``instance()`` adds support for creating new repositories.
1283 ``instance()`` adds support for creating new repositories.
1274 ``hg.repository()`` adds more extension integration, including calling
1284 ``hg.repository()`` adds more extension integration, including calling
1275 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1285 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1276 used.
1286 used.
1277 """
1287 """
1278
1288
1279 _basesupported = {
1289 _basesupported = {
1280 requirementsmod.ARCHIVED_PHASE_REQUIREMENT,
1290 requirementsmod.ARCHIVED_PHASE_REQUIREMENT,
1281 requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT,
1291 requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT,
1282 requirementsmod.CHANGELOGV2_REQUIREMENT,
1292 requirementsmod.CHANGELOGV2_REQUIREMENT,
1283 requirementsmod.COPIESSDC_REQUIREMENT,
1293 requirementsmod.COPIESSDC_REQUIREMENT,
1284 requirementsmod.DIRSTATE_TRACKED_HINT_V1,
1294 requirementsmod.DIRSTATE_TRACKED_HINT_V1,
1285 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1295 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1286 requirementsmod.DOTENCODE_REQUIREMENT,
1296 requirementsmod.DOTENCODE_REQUIREMENT,
1287 requirementsmod.FNCACHE_REQUIREMENT,
1297 requirementsmod.FNCACHE_REQUIREMENT,
1288 requirementsmod.GENERALDELTA_REQUIREMENT,
1298 requirementsmod.GENERALDELTA_REQUIREMENT,
1289 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1299 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1290 requirementsmod.NODEMAP_REQUIREMENT,
1300 requirementsmod.NODEMAP_REQUIREMENT,
1291 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1301 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1292 requirementsmod.REVLOGV1_REQUIREMENT,
1302 requirementsmod.REVLOGV1_REQUIREMENT,
1293 requirementsmod.REVLOGV2_REQUIREMENT,
1303 requirementsmod.REVLOGV2_REQUIREMENT,
1294 requirementsmod.SHARED_REQUIREMENT,
1304 requirementsmod.SHARED_REQUIREMENT,
1295 requirementsmod.SHARESAFE_REQUIREMENT,
1305 requirementsmod.SHARESAFE_REQUIREMENT,
1296 requirementsmod.SPARSE_REQUIREMENT,
1306 requirementsmod.SPARSE_REQUIREMENT,
1297 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1307 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1298 requirementsmod.STORE_REQUIREMENT,
1308 requirementsmod.STORE_REQUIREMENT,
1299 requirementsmod.TREEMANIFEST_REQUIREMENT,
1309 requirementsmod.TREEMANIFEST_REQUIREMENT,
1300 }
1310 }
1301
1311
1302 # list of prefix for file which can be written without 'wlock'
1312 # list of prefix for file which can be written without 'wlock'
1303 # Extensions should extend this list when needed
1313 # Extensions should extend this list when needed
1304 _wlockfreeprefix = {
1314 _wlockfreeprefix = {
1305 # We migh consider requiring 'wlock' for the next
1315 # We migh consider requiring 'wlock' for the next
1306 # two, but pretty much all the existing code assume
1316 # two, but pretty much all the existing code assume
1307 # wlock is not needed so we keep them excluded for
1317 # wlock is not needed so we keep them excluded for
1308 # now.
1318 # now.
1309 b'hgrc',
1319 b'hgrc',
1310 b'requires',
1320 b'requires',
1311 # XXX cache is a complicatged business someone
1321 # XXX cache is a complicatged business someone
1312 # should investigate this in depth at some point
1322 # should investigate this in depth at some point
1313 b'cache/',
1323 b'cache/',
1314 # XXX shouldn't be dirstate covered by the wlock?
1324 # XXX shouldn't be dirstate covered by the wlock?
1315 b'dirstate',
1325 b'dirstate',
1316 # XXX bisect was still a bit too messy at the time
1326 # XXX bisect was still a bit too messy at the time
1317 # this changeset was introduced. Someone should fix
1327 # this changeset was introduced. Someone should fix
1318 # the remainig bit and drop this line
1328 # the remainig bit and drop this line
1319 b'bisect.state',
1329 b'bisect.state',
1320 }
1330 }
1321
1331
1322 def __init__(
1332 def __init__(
1323 self,
1333 self,
1324 baseui,
1334 baseui,
1325 ui,
1335 ui,
1326 origroot,
1336 origroot: bytes,
1327 wdirvfs,
1337 wdirvfs: vfsmod.vfs,
1328 hgvfs,
1338 hgvfs: vfsmod.vfs,
1329 requirements,
1339 requirements,
1330 supportedrequirements,
1340 supportedrequirements,
1331 sharedpath,
1341 sharedpath: bytes,
1332 store,
1342 store,
1333 cachevfs,
1343 cachevfs: vfsmod.vfs,
1334 wcachevfs,
1344 wcachevfs: vfsmod.vfs,
1335 features,
1345 features,
1336 intents=None,
1346 intents=None,
1337 ):
1347 ):
1338 """Create a new local repository instance.
1348 """Create a new local repository instance.
1339
1349
1340 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1350 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1341 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1351 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1342 object.
1352 object.
1343
1353
1344 Arguments:
1354 Arguments:
1345
1355
1346 baseui
1356 baseui
1347 ``ui.ui`` instance that ``ui`` argument was based off of.
1357 ``ui.ui`` instance that ``ui`` argument was based off of.
1348
1358
1349 ui
1359 ui
1350 ``ui.ui`` instance for use by the repository.
1360 ``ui.ui`` instance for use by the repository.
1351
1361
1352 origroot
1362 origroot
1353 ``bytes`` path to working directory root of this repository.
1363 ``bytes`` path to working directory root of this repository.
1354
1364
1355 wdirvfs
1365 wdirvfs
1356 ``vfs.vfs`` rooted at the working directory.
1366 ``vfs.vfs`` rooted at the working directory.
1357
1367
1358 hgvfs
1368 hgvfs
1359 ``vfs.vfs`` rooted at .hg/
1369 ``vfs.vfs`` rooted at .hg/
1360
1370
1361 requirements
1371 requirements
1362 ``set`` of bytestrings representing repository opening requirements.
1372 ``set`` of bytestrings representing repository opening requirements.
1363
1373
1364 supportedrequirements
1374 supportedrequirements
1365 ``set`` of bytestrings representing repository requirements that we
1375 ``set`` of bytestrings representing repository requirements that we
1366 know how to open. May be a supetset of ``requirements``.
1376 know how to open. May be a supetset of ``requirements``.
1367
1377
1368 sharedpath
1378 sharedpath
1369 ``bytes`` Defining path to storage base directory. Points to a
1379 ``bytes`` Defining path to storage base directory. Points to a
1370 ``.hg/`` directory somewhere.
1380 ``.hg/`` directory somewhere.
1371
1381
1372 store
1382 store
1373 ``store.basicstore`` (or derived) instance providing access to
1383 ``store.basicstore`` (or derived) instance providing access to
1374 versioned storage.
1384 versioned storage.
1375
1385
1376 cachevfs
1386 cachevfs
1377 ``vfs.vfs`` used for cache files.
1387 ``vfs.vfs`` used for cache files.
1378
1388
1379 wcachevfs
1389 wcachevfs
1380 ``vfs.vfs`` used for cache files related to the working copy.
1390 ``vfs.vfs`` used for cache files related to the working copy.
1381
1391
1382 features
1392 features
1383 ``set`` of bytestrings defining features/capabilities of this
1393 ``set`` of bytestrings defining features/capabilities of this
1384 instance.
1394 instance.
1385
1395
1386 intents
1396 intents
1387 ``set`` of system strings indicating what this repo will be used
1397 ``set`` of system strings indicating what this repo will be used
1388 for.
1398 for.
1389 """
1399 """
1390 self.baseui = baseui
1400 self.baseui = baseui
1391 self.ui = ui
1401 self.ui = ui
1392 self.origroot = origroot
1402 self.origroot = origroot
1393 # vfs rooted at working directory.
1403 # vfs rooted at working directory.
1394 self.wvfs = wdirvfs
1404 self.wvfs = wdirvfs
1395 self.root = wdirvfs.base
1405 self.root = wdirvfs.base
1396 # vfs rooted at .hg/. Used to access most non-store paths.
1406 # vfs rooted at .hg/. Used to access most non-store paths.
1397 self.vfs = hgvfs
1407 self.vfs = hgvfs
1398 self.path = hgvfs.base
1408 self.path = hgvfs.base
1399 self.requirements = requirements
1409 self.requirements = requirements
1400 self.nodeconstants = sha1nodeconstants
1410 self.nodeconstants = sha1nodeconstants
1401 self.nullid = self.nodeconstants.nullid
1411 self.nullid = self.nodeconstants.nullid
1402 self.supported = supportedrequirements
1412 self.supported = supportedrequirements
1403 self.sharedpath = sharedpath
1413 self.sharedpath = sharedpath
1404 self.store = store
1414 self.store = store
1405 self.cachevfs = cachevfs
1415 self.cachevfs = cachevfs
1406 self.wcachevfs = wcachevfs
1416 self.wcachevfs = wcachevfs
1407 self.features = features
1417 self.features = features
1408
1418
1409 self.filtername = None
1419 self.filtername = None
1410
1420
1411 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1421 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1412 b'devel', b'check-locks'
1422 b'devel', b'check-locks'
1413 ):
1423 ):
1414 self.vfs.audit = self._getvfsward(self.vfs.audit)
1424 self.vfs.audit = self._getvfsward(self.vfs.audit)
1415 # A list of callback to shape the phase if no data were found.
1425 # A list of callback to shape the phase if no data were found.
1416 # Callback are in the form: func(repo, roots) --> processed root.
1426 # Callback are in the form: func(repo, roots) --> processed root.
1417 # This list it to be filled by extension during repo setup
1427 # This list it to be filled by extension during repo setup
1418 self._phasedefaults = []
1428 self._phasedefaults = []
1419
1429
1420 color.setup(self.ui)
1430 color.setup(self.ui)
1421
1431
1422 self.spath = self.store.path
1432 self.spath = self.store.path
1423 self.svfs = self.store.vfs
1433 self.svfs = self.store.vfs
1424 self.sjoin = self.store.join
1434 self.sjoin = self.store.join
1425 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1435 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1426 b'devel', b'check-locks'
1436 b'devel', b'check-locks'
1427 ):
1437 ):
1428 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1438 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1429 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1439 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1430 else: # standard vfs
1440 else: # standard vfs
1431 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1441 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1432
1442
1433 self._dirstatevalidatewarned = False
1443 self._dirstatevalidatewarned = False
1434
1444
1435 self._branchcaches = branchmap.BranchMapCache()
1445 self._branchcaches = branchmap.BranchMapCache()
1436 self._revbranchcache = None
1446 self._revbranchcache = None
1437 self._filterpats = {}
1447 self._filterpats = {}
1438 self._datafilters = {}
1448 self._datafilters = {}
1439 self._transref = self._lockref = self._wlockref = None
1449 self._transref = self._lockref = self._wlockref = None
1440
1450
1441 # A cache for various files under .hg/ that tracks file changes,
1451 # A cache for various files under .hg/ that tracks file changes,
1442 # (used by the filecache decorator)
1452 # (used by the filecache decorator)
1443 #
1453 #
1444 # Maps a property name to its util.filecacheentry
1454 # Maps a property name to its util.filecacheentry
1445 self._filecache = {}
1455 self._filecache = {}
1446
1456
1447 # hold sets of revision to be filtered
1457 # hold sets of revision to be filtered
1448 # should be cleared when something might have changed the filter value:
1458 # should be cleared when something might have changed the filter value:
1449 # - new changesets,
1459 # - new changesets,
1450 # - phase change,
1460 # - phase change,
1451 # - new obsolescence marker,
1461 # - new obsolescence marker,
1452 # - working directory parent change,
1462 # - working directory parent change,
1453 # - bookmark changes
1463 # - bookmark changes
1454 self.filteredrevcache = {}
1464 self.filteredrevcache = {}
1455
1465
1456 # post-dirstate-status hooks
1466 # post-dirstate-status hooks
1457 self._postdsstatus = []
1467 self._postdsstatus = []
1458
1468
1459 # generic mapping between names and nodes
1469 # generic mapping between names and nodes
1460 self.names = namespaces.namespaces()
1470 self.names = namespaces.namespaces()
1461
1471
1462 # Key to signature value.
1472 # Key to signature value.
1463 self._sparsesignaturecache = {}
1473 self._sparsesignaturecache = {}
1464 # Signature to cached matcher instance.
1474 # Signature to cached matcher instance.
1465 self._sparsematchercache = {}
1475 self._sparsematchercache = {}
1466
1476
1467 self._extrafilterid = repoview.extrafilter(ui)
1477 self._extrafilterid = repoview.extrafilter(ui)
1468
1478
1469 self.filecopiesmode = None
1479 self.filecopiesmode = None
1470 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1480 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1471 self.filecopiesmode = b'changeset-sidedata'
1481 self.filecopiesmode = b'changeset-sidedata'
1472
1482
1473 self._wanted_sidedata = set()
1483 self._wanted_sidedata = set()
1474 self._sidedata_computers = {}
1484 self._sidedata_computers = {}
1475 sidedatamod.set_sidedata_spec_for_repo(self)
1485 sidedatamod.set_sidedata_spec_for_repo(self)
1476
1486
1477 def _getvfsward(self, origfunc):
1487 def _getvfsward(self, origfunc):
1478 """build a ward for self.vfs"""
1488 """build a ward for self.vfs"""
1479 rref = weakref.ref(self)
1489 rref = weakref.ref(self)
1480
1490
1481 def checkvfs(path, mode=None):
1491 def checkvfs(path, mode=None):
1482 ret = origfunc(path, mode=mode)
1492 ret = origfunc(path, mode=mode)
1483 repo = rref()
1493 repo = rref()
1484 if (
1494 if (
1485 repo is None
1495 repo is None
1486 or not util.safehasattr(repo, b'_wlockref')
1496 or not util.safehasattr(repo, b'_wlockref')
1487 or not util.safehasattr(repo, b'_lockref')
1497 or not util.safehasattr(repo, b'_lockref')
1488 ):
1498 ):
1489 return
1499 return
1490 if mode in (None, b'r', b'rb'):
1500 if mode in (None, b'r', b'rb'):
1491 return
1501 return
1492 if path.startswith(repo.path):
1502 if path.startswith(repo.path):
1493 # truncate name relative to the repository (.hg)
1503 # truncate name relative to the repository (.hg)
1494 path = path[len(repo.path) + 1 :]
1504 path = path[len(repo.path) + 1 :]
1495 if path.startswith(b'cache/'):
1505 if path.startswith(b'cache/'):
1496 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1506 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1497 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1507 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1498 # path prefixes covered by 'lock'
1508 # path prefixes covered by 'lock'
1499 vfs_path_prefixes = (
1509 vfs_path_prefixes = (
1500 b'journal.',
1510 b'journal.',
1501 b'undo.',
1511 b'undo.',
1502 b'strip-backup/',
1512 b'strip-backup/',
1503 b'cache/',
1513 b'cache/',
1504 )
1514 )
1505 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1515 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1506 if repo._currentlock(repo._lockref) is None:
1516 if repo._currentlock(repo._lockref) is None:
1507 repo.ui.develwarn(
1517 repo.ui.develwarn(
1508 b'write with no lock: "%s"' % path,
1518 b'write with no lock: "%s"' % path,
1509 stacklevel=3,
1519 stacklevel=3,
1510 config=b'check-locks',
1520 config=b'check-locks',
1511 )
1521 )
1512 elif repo._currentlock(repo._wlockref) is None:
1522 elif repo._currentlock(repo._wlockref) is None:
1513 # rest of vfs files are covered by 'wlock'
1523 # rest of vfs files are covered by 'wlock'
1514 #
1524 #
1515 # exclude special files
1525 # exclude special files
1516 for prefix in self._wlockfreeprefix:
1526 for prefix in self._wlockfreeprefix:
1517 if path.startswith(prefix):
1527 if path.startswith(prefix):
1518 return
1528 return
1519 repo.ui.develwarn(
1529 repo.ui.develwarn(
1520 b'write with no wlock: "%s"' % path,
1530 b'write with no wlock: "%s"' % path,
1521 stacklevel=3,
1531 stacklevel=3,
1522 config=b'check-locks',
1532 config=b'check-locks',
1523 )
1533 )
1524 return ret
1534 return ret
1525
1535
1526 return checkvfs
1536 return checkvfs
1527
1537
1528 def _getsvfsward(self, origfunc):
1538 def _getsvfsward(self, origfunc):
1529 """build a ward for self.svfs"""
1539 """build a ward for self.svfs"""
1530 rref = weakref.ref(self)
1540 rref = weakref.ref(self)
1531
1541
1532 def checksvfs(path, mode=None):
1542 def checksvfs(path, mode=None):
1533 ret = origfunc(path, mode=mode)
1543 ret = origfunc(path, mode=mode)
1534 repo = rref()
1544 repo = rref()
1535 if repo is None or not util.safehasattr(repo, b'_lockref'):
1545 if repo is None or not util.safehasattr(repo, b'_lockref'):
1536 return
1546 return
1537 if mode in (None, b'r', b'rb'):
1547 if mode in (None, b'r', b'rb'):
1538 return
1548 return
1539 if path.startswith(repo.sharedpath):
1549 if path.startswith(repo.sharedpath):
1540 # truncate name relative to the repository (.hg)
1550 # truncate name relative to the repository (.hg)
1541 path = path[len(repo.sharedpath) + 1 :]
1551 path = path[len(repo.sharedpath) + 1 :]
1542 if repo._currentlock(repo._lockref) is None:
1552 if repo._currentlock(repo._lockref) is None:
1543 repo.ui.develwarn(
1553 repo.ui.develwarn(
1544 b'write with no lock: "%s"' % path, stacklevel=4
1554 b'write with no lock: "%s"' % path, stacklevel=4
1545 )
1555 )
1546 return ret
1556 return ret
1547
1557
1548 return checksvfs
1558 return checksvfs
1549
1559
1550 def close(self):
1560 def close(self):
1551 self._writecaches()
1561 self._writecaches()
1552
1562
1553 def _writecaches(self):
1563 def _writecaches(self):
1554 if self._revbranchcache:
1564 if self._revbranchcache:
1555 self._revbranchcache.write()
1565 self._revbranchcache.write()
1556
1566
1557 def _restrictcapabilities(self, caps):
1567 def _restrictcapabilities(self, caps):
1558 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1568 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1559 caps = set(caps)
1569 caps = set(caps)
1560 capsblob = bundle2.encodecaps(
1570 capsblob = bundle2.encodecaps(
1561 bundle2.getrepocaps(self, role=b'client')
1571 bundle2.getrepocaps(self, role=b'client')
1562 )
1572 )
1563 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1573 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1564 if self.ui.configbool(b'experimental', b'narrow'):
1574 if self.ui.configbool(b'experimental', b'narrow'):
1565 caps.add(wireprototypes.NARROWCAP)
1575 caps.add(wireprototypes.NARROWCAP)
1566 return caps
1576 return caps
1567
1577
1568 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1578 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1569 # self -> auditor -> self._checknested -> self
1579 # self -> auditor -> self._checknested -> self
1570
1580
1571 @property
1581 @property
1572 def auditor(self):
1582 def auditor(self):
1573 # This is only used by context.workingctx.match in order to
1583 # This is only used by context.workingctx.match in order to
1574 # detect files in subrepos.
1584 # detect files in subrepos.
1575 return pathutil.pathauditor(self.root, callback=self._checknested)
1585 return pathutil.pathauditor(self.root, callback=self._checknested)
1576
1586
1577 @property
1587 @property
1578 def nofsauditor(self):
1588 def nofsauditor(self):
1579 # This is only used by context.basectx.match in order to detect
1589 # This is only used by context.basectx.match in order to detect
1580 # files in subrepos.
1590 # files in subrepos.
1581 return pathutil.pathauditor(
1591 return pathutil.pathauditor(
1582 self.root, callback=self._checknested, realfs=False, cached=True
1592 self.root, callback=self._checknested, realfs=False, cached=True
1583 )
1593 )
1584
1594
1585 def _checknested(self, path):
1595 def _checknested(self, path):
1586 """Determine if path is a legal nested repository."""
1596 """Determine if path is a legal nested repository."""
1587 if not path.startswith(self.root):
1597 if not path.startswith(self.root):
1588 return False
1598 return False
1589 subpath = path[len(self.root) + 1 :]
1599 subpath = path[len(self.root) + 1 :]
1590 normsubpath = util.pconvert(subpath)
1600 normsubpath = util.pconvert(subpath)
1591
1601
1592 # XXX: Checking against the current working copy is wrong in
1602 # XXX: Checking against the current working copy is wrong in
1593 # the sense that it can reject things like
1603 # the sense that it can reject things like
1594 #
1604 #
1595 # $ hg cat -r 10 sub/x.txt
1605 # $ hg cat -r 10 sub/x.txt
1596 #
1606 #
1597 # if sub/ is no longer a subrepository in the working copy
1607 # if sub/ is no longer a subrepository in the working copy
1598 # parent revision.
1608 # parent revision.
1599 #
1609 #
1600 # However, it can of course also allow things that would have
1610 # However, it can of course also allow things that would have
1601 # been rejected before, such as the above cat command if sub/
1611 # been rejected before, such as the above cat command if sub/
1602 # is a subrepository now, but was a normal directory before.
1612 # is a subrepository now, but was a normal directory before.
1603 # The old path auditor would have rejected by mistake since it
1613 # The old path auditor would have rejected by mistake since it
1604 # panics when it sees sub/.hg/.
1614 # panics when it sees sub/.hg/.
1605 #
1615 #
1606 # All in all, checking against the working copy seems sensible
1616 # All in all, checking against the working copy seems sensible
1607 # since we want to prevent access to nested repositories on
1617 # since we want to prevent access to nested repositories on
1608 # the filesystem *now*.
1618 # the filesystem *now*.
1609 ctx = self[None]
1619 ctx = self[None]
1610 parts = util.splitpath(subpath)
1620 parts = util.splitpath(subpath)
1611 while parts:
1621 while parts:
1612 prefix = b'/'.join(parts)
1622 prefix = b'/'.join(parts)
1613 if prefix in ctx.substate:
1623 if prefix in ctx.substate:
1614 if prefix == normsubpath:
1624 if prefix == normsubpath:
1615 return True
1625 return True
1616 else:
1626 else:
1617 sub = ctx.sub(prefix)
1627 sub = ctx.sub(prefix)
1618 return sub.checknested(subpath[len(prefix) + 1 :])
1628 return sub.checknested(subpath[len(prefix) + 1 :])
1619 else:
1629 else:
1620 parts.pop()
1630 parts.pop()
1621 return False
1631 return False
1622
1632
1623 def peer(self):
1633 def peer(self):
1624 return localpeer(self) # not cached to avoid reference cycle
1634 return localpeer(self) # not cached to avoid reference cycle
1625
1635
1626 def unfiltered(self):
1636 def unfiltered(self):
1627 """Return unfiltered version of the repository
1637 """Return unfiltered version of the repository
1628
1638
1629 Intended to be overwritten by filtered repo."""
1639 Intended to be overwritten by filtered repo."""
1630 return self
1640 return self
1631
1641
1632 def filtered(self, name, visibilityexceptions=None):
1642 def filtered(self, name, visibilityexceptions=None):
1633 """Return a filtered version of a repository
1643 """Return a filtered version of a repository
1634
1644
1635 The `name` parameter is the identifier of the requested view. This
1645 The `name` parameter is the identifier of the requested view. This
1636 will return a repoview object set "exactly" to the specified view.
1646 will return a repoview object set "exactly" to the specified view.
1637
1647
1638 This function does not apply recursive filtering to a repository. For
1648 This function does not apply recursive filtering to a repository. For
1639 example calling `repo.filtered("served")` will return a repoview using
1649 example calling `repo.filtered("served")` will return a repoview using
1640 the "served" view, regardless of the initial view used by `repo`.
1650 the "served" view, regardless of the initial view used by `repo`.
1641
1651
1642 In other word, there is always only one level of `repoview` "filtering".
1652 In other word, there is always only one level of `repoview` "filtering".
1643 """
1653 """
1644 if self._extrafilterid is not None and b'%' not in name:
1654 if self._extrafilterid is not None and b'%' not in name:
1645 name = name + b'%' + self._extrafilterid
1655 name = name + b'%' + self._extrafilterid
1646
1656
1647 cls = repoview.newtype(self.unfiltered().__class__)
1657 cls = repoview.newtype(self.unfiltered().__class__)
1648 return cls(self, name, visibilityexceptions)
1658 return cls(self, name, visibilityexceptions)
1649
1659
1650 @mixedrepostorecache(
1660 @mixedrepostorecache(
1651 (b'bookmarks', b'plain'),
1661 (b'bookmarks', b'plain'),
1652 (b'bookmarks.current', b'plain'),
1662 (b'bookmarks.current', b'plain'),
1653 (b'bookmarks', b''),
1663 (b'bookmarks', b''),
1654 (b'00changelog.i', b''),
1664 (b'00changelog.i', b''),
1655 )
1665 )
1656 def _bookmarks(self):
1666 def _bookmarks(self):
1657 # Since the multiple files involved in the transaction cannot be
1667 # Since the multiple files involved in the transaction cannot be
1658 # written atomically (with current repository format), there is a race
1668 # written atomically (with current repository format), there is a race
1659 # condition here.
1669 # condition here.
1660 #
1670 #
1661 # 1) changelog content A is read
1671 # 1) changelog content A is read
1662 # 2) outside transaction update changelog to content B
1672 # 2) outside transaction update changelog to content B
1663 # 3) outside transaction update bookmark file referring to content B
1673 # 3) outside transaction update bookmark file referring to content B
1664 # 4) bookmarks file content is read and filtered against changelog-A
1674 # 4) bookmarks file content is read and filtered against changelog-A
1665 #
1675 #
1666 # When this happens, bookmarks against nodes missing from A are dropped.
1676 # When this happens, bookmarks against nodes missing from A are dropped.
1667 #
1677 #
1668 # Having this happening during read is not great, but it become worse
1678 # Having this happening during read is not great, but it become worse
1669 # when this happen during write because the bookmarks to the "unknown"
1679 # when this happen during write because the bookmarks to the "unknown"
1670 # nodes will be dropped for good. However, writes happen within locks.
1680 # nodes will be dropped for good. However, writes happen within locks.
1671 # This locking makes it possible to have a race free consistent read.
1681 # This locking makes it possible to have a race free consistent read.
1672 # For this purpose data read from disc before locking are
1682 # For this purpose data read from disc before locking are
1673 # "invalidated" right after the locks are taken. This invalidations are
1683 # "invalidated" right after the locks are taken. This invalidations are
1674 # "light", the `filecache` mechanism keep the data in memory and will
1684 # "light", the `filecache` mechanism keep the data in memory and will
1675 # reuse them if the underlying files did not changed. Not parsing the
1685 # reuse them if the underlying files did not changed. Not parsing the
1676 # same data multiple times helps performances.
1686 # same data multiple times helps performances.
1677 #
1687 #
1678 # Unfortunately in the case describe above, the files tracked by the
1688 # Unfortunately in the case describe above, the files tracked by the
1679 # bookmarks file cache might not have changed, but the in-memory
1689 # bookmarks file cache might not have changed, but the in-memory
1680 # content is still "wrong" because we used an older changelog content
1690 # content is still "wrong" because we used an older changelog content
1681 # to process the on-disk data. So after locking, the changelog would be
1691 # to process the on-disk data. So after locking, the changelog would be
1682 # refreshed but `_bookmarks` would be preserved.
1692 # refreshed but `_bookmarks` would be preserved.
1683 # Adding `00changelog.i` to the list of tracked file is not
1693 # Adding `00changelog.i` to the list of tracked file is not
1684 # enough, because at the time we build the content for `_bookmarks` in
1694 # enough, because at the time we build the content for `_bookmarks` in
1685 # (4), the changelog file has already diverged from the content used
1695 # (4), the changelog file has already diverged from the content used
1686 # for loading `changelog` in (1)
1696 # for loading `changelog` in (1)
1687 #
1697 #
1688 # To prevent the issue, we force the changelog to be explicitly
1698 # To prevent the issue, we force the changelog to be explicitly
1689 # reloaded while computing `_bookmarks`. The data race can still happen
1699 # reloaded while computing `_bookmarks`. The data race can still happen
1690 # without the lock (with a narrower window), but it would no longer go
1700 # without the lock (with a narrower window), but it would no longer go
1691 # undetected during the lock time refresh.
1701 # undetected during the lock time refresh.
1692 #
1702 #
1693 # The new schedule is as follow
1703 # The new schedule is as follow
1694 #
1704 #
1695 # 1) filecache logic detect that `_bookmarks` needs to be computed
1705 # 1) filecache logic detect that `_bookmarks` needs to be computed
1696 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1706 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1697 # 3) We force `changelog` filecache to be tested
1707 # 3) We force `changelog` filecache to be tested
1698 # 4) cachestat for `changelog` are captured (for changelog)
1708 # 4) cachestat for `changelog` are captured (for changelog)
1699 # 5) `_bookmarks` is computed and cached
1709 # 5) `_bookmarks` is computed and cached
1700 #
1710 #
1701 # The step in (3) ensure we have a changelog at least as recent as the
1711 # The step in (3) ensure we have a changelog at least as recent as the
1702 # cache stat computed in (1). As a result at locking time:
1712 # cache stat computed in (1). As a result at locking time:
1703 # * if the changelog did not changed since (1) -> we can reuse the data
1713 # * if the changelog did not changed since (1) -> we can reuse the data
1704 # * otherwise -> the bookmarks get refreshed.
1714 # * otherwise -> the bookmarks get refreshed.
1705 self._refreshchangelog()
1715 self._refreshchangelog()
1706 return bookmarks.bmstore(self)
1716 return bookmarks.bmstore(self)
1707
1717
1708 def _refreshchangelog(self):
1718 def _refreshchangelog(self):
1709 """make sure the in memory changelog match the on-disk one"""
1719 """make sure the in memory changelog match the on-disk one"""
1710 if 'changelog' in vars(self) and self.currenttransaction() is None:
1720 if 'changelog' in vars(self) and self.currenttransaction() is None:
1711 del self.changelog
1721 del self.changelog
1712
1722
1713 @property
1723 @property
1714 def _activebookmark(self):
1724 def _activebookmark(self):
1715 return self._bookmarks.active
1725 return self._bookmarks.active
1716
1726
1717 # _phasesets depend on changelog. what we need is to call
1727 # _phasesets depend on changelog. what we need is to call
1718 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1728 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1719 # can't be easily expressed in filecache mechanism.
1729 # can't be easily expressed in filecache mechanism.
1720 @storecache(b'phaseroots', b'00changelog.i')
1730 @storecache(b'phaseroots', b'00changelog.i')
1721 def _phasecache(self):
1731 def _phasecache(self):
1722 return phases.phasecache(self, self._phasedefaults)
1732 return phases.phasecache(self, self._phasedefaults)
1723
1733
1724 @storecache(b'obsstore')
1734 @storecache(b'obsstore')
1725 def obsstore(self):
1735 def obsstore(self):
1726 return obsolete.makestore(self.ui, self)
1736 return obsolete.makestore(self.ui, self)
1727
1737
1728 @changelogcache()
1738 @changelogcache()
1729 def changelog(repo):
1739 def changelog(repo):
1730 # load dirstate before changelog to avoid race see issue6303
1740 # load dirstate before changelog to avoid race see issue6303
1731 repo.dirstate.prefetch_parents()
1741 repo.dirstate.prefetch_parents()
1732 return repo.store.changelog(
1742 return repo.store.changelog(
1733 txnutil.mayhavepending(repo.root),
1743 txnutil.mayhavepending(repo.root),
1734 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1744 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1735 )
1745 )
1736
1746
1737 @manifestlogcache()
1747 @manifestlogcache()
1738 def manifestlog(self):
1748 def manifestlog(self):
1739 return self.store.manifestlog(self, self._storenarrowmatch)
1749 return self.store.manifestlog(self, self._storenarrowmatch)
1740
1750
1741 @repofilecache(b'dirstate')
1751 @repofilecache(b'dirstate')
1742 def dirstate(self):
1752 def dirstate(self):
1743 return self._makedirstate()
1753 return self._makedirstate()
1744
1754
1745 def _makedirstate(self):
1755 def _makedirstate(self):
1746 """Extension point for wrapping the dirstate per-repo."""
1756 """Extension point for wrapping the dirstate per-repo."""
1747 sparsematchfn = None
1757 sparsematchfn = None
1748 if sparse.use_sparse(self):
1758 if sparse.use_sparse(self):
1749 sparsematchfn = lambda: sparse.matcher(self)
1759 sparsematchfn = lambda: sparse.matcher(self)
1750 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1760 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1751 th = requirementsmod.DIRSTATE_TRACKED_HINT_V1
1761 th = requirementsmod.DIRSTATE_TRACKED_HINT_V1
1752 use_dirstate_v2 = v2_req in self.requirements
1762 use_dirstate_v2 = v2_req in self.requirements
1753 use_tracked_hint = th in self.requirements
1763 use_tracked_hint = th in self.requirements
1754
1764
1755 return dirstate.dirstate(
1765 return dirstate.dirstate(
1756 self.vfs,
1766 self.vfs,
1757 self.ui,
1767 self.ui,
1758 self.root,
1768 self.root,
1759 self._dirstatevalidate,
1769 self._dirstatevalidate,
1760 sparsematchfn,
1770 sparsematchfn,
1761 self.nodeconstants,
1771 self.nodeconstants,
1762 use_dirstate_v2,
1772 use_dirstate_v2,
1763 use_tracked_hint=use_tracked_hint,
1773 use_tracked_hint=use_tracked_hint,
1764 )
1774 )
1765
1775
1766 def _dirstatevalidate(self, node):
1776 def _dirstatevalidate(self, node):
1767 try:
1777 try:
1768 self.changelog.rev(node)
1778 self.changelog.rev(node)
1769 return node
1779 return node
1770 except error.LookupError:
1780 except error.LookupError:
1771 if not self._dirstatevalidatewarned:
1781 if not self._dirstatevalidatewarned:
1772 self._dirstatevalidatewarned = True
1782 self._dirstatevalidatewarned = True
1773 self.ui.warn(
1783 self.ui.warn(
1774 _(b"warning: ignoring unknown working parent %s!\n")
1784 _(b"warning: ignoring unknown working parent %s!\n")
1775 % short(node)
1785 % short(node)
1776 )
1786 )
1777 return self.nullid
1787 return self.nullid
1778
1788
1779 @storecache(narrowspec.FILENAME)
1789 @storecache(narrowspec.FILENAME)
1780 def narrowpats(self):
1790 def narrowpats(self):
1781 """matcher patterns for this repository's narrowspec
1791 """matcher patterns for this repository's narrowspec
1782
1792
1783 A tuple of (includes, excludes).
1793 A tuple of (includes, excludes).
1784 """
1794 """
1785 return narrowspec.load(self)
1795 return narrowspec.load(self)
1786
1796
1787 @storecache(narrowspec.FILENAME)
1797 @storecache(narrowspec.FILENAME)
1788 def _storenarrowmatch(self):
1798 def _storenarrowmatch(self):
1789 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1799 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1790 return matchmod.always()
1800 return matchmod.always()
1791 include, exclude = self.narrowpats
1801 include, exclude = self.narrowpats
1792 return narrowspec.match(self.root, include=include, exclude=exclude)
1802 return narrowspec.match(self.root, include=include, exclude=exclude)
1793
1803
1794 @storecache(narrowspec.FILENAME)
1804 @storecache(narrowspec.FILENAME)
1795 def _narrowmatch(self):
1805 def _narrowmatch(self):
1796 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1806 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1797 return matchmod.always()
1807 return matchmod.always()
1798 narrowspec.checkworkingcopynarrowspec(self)
1808 narrowspec.checkworkingcopynarrowspec(self)
1799 include, exclude = self.narrowpats
1809 include, exclude = self.narrowpats
1800 return narrowspec.match(self.root, include=include, exclude=exclude)
1810 return narrowspec.match(self.root, include=include, exclude=exclude)
1801
1811
1802 def narrowmatch(self, match=None, includeexact=False):
1812 def narrowmatch(self, match=None, includeexact=False):
1803 """matcher corresponding the the repo's narrowspec
1813 """matcher corresponding the the repo's narrowspec
1804
1814
1805 If `match` is given, then that will be intersected with the narrow
1815 If `match` is given, then that will be intersected with the narrow
1806 matcher.
1816 matcher.
1807
1817
1808 If `includeexact` is True, then any exact matches from `match` will
1818 If `includeexact` is True, then any exact matches from `match` will
1809 be included even if they're outside the narrowspec.
1819 be included even if they're outside the narrowspec.
1810 """
1820 """
1811 if match:
1821 if match:
1812 if includeexact and not self._narrowmatch.always():
1822 if includeexact and not self._narrowmatch.always():
1813 # do not exclude explicitly-specified paths so that they can
1823 # do not exclude explicitly-specified paths so that they can
1814 # be warned later on
1824 # be warned later on
1815 em = matchmod.exact(match.files())
1825 em = matchmod.exact(match.files())
1816 nm = matchmod.unionmatcher([self._narrowmatch, em])
1826 nm = matchmod.unionmatcher([self._narrowmatch, em])
1817 return matchmod.intersectmatchers(match, nm)
1827 return matchmod.intersectmatchers(match, nm)
1818 return matchmod.intersectmatchers(match, self._narrowmatch)
1828 return matchmod.intersectmatchers(match, self._narrowmatch)
1819 return self._narrowmatch
1829 return self._narrowmatch
1820
1830
1821 def setnarrowpats(self, newincludes, newexcludes):
1831 def setnarrowpats(self, newincludes, newexcludes):
1822 narrowspec.save(self, newincludes, newexcludes)
1832 narrowspec.save(self, newincludes, newexcludes)
1823 self.invalidate(clearfilecache=True)
1833 self.invalidate(clearfilecache=True)
1824
1834
1825 @unfilteredpropertycache
1835 @unfilteredpropertycache
1826 def _quick_access_changeid_null(self):
1836 def _quick_access_changeid_null(self):
1827 return {
1837 return {
1828 b'null': (nullrev, self.nodeconstants.nullid),
1838 b'null': (nullrev, self.nodeconstants.nullid),
1829 nullrev: (nullrev, self.nodeconstants.nullid),
1839 nullrev: (nullrev, self.nodeconstants.nullid),
1830 self.nullid: (nullrev, self.nullid),
1840 self.nullid: (nullrev, self.nullid),
1831 }
1841 }
1832
1842
1833 @unfilteredpropertycache
1843 @unfilteredpropertycache
1834 def _quick_access_changeid_wc(self):
1844 def _quick_access_changeid_wc(self):
1835 # also fast path access to the working copy parents
1845 # also fast path access to the working copy parents
1836 # however, only do it for filter that ensure wc is visible.
1846 # however, only do it for filter that ensure wc is visible.
1837 quick = self._quick_access_changeid_null.copy()
1847 quick = self._quick_access_changeid_null.copy()
1838 cl = self.unfiltered().changelog
1848 cl = self.unfiltered().changelog
1839 for node in self.dirstate.parents():
1849 for node in self.dirstate.parents():
1840 if node == self.nullid:
1850 if node == self.nullid:
1841 continue
1851 continue
1842 rev = cl.index.get_rev(node)
1852 rev = cl.index.get_rev(node)
1843 if rev is None:
1853 if rev is None:
1844 # unknown working copy parent case:
1854 # unknown working copy parent case:
1845 #
1855 #
1846 # skip the fast path and let higher code deal with it
1856 # skip the fast path and let higher code deal with it
1847 continue
1857 continue
1848 pair = (rev, node)
1858 pair = (rev, node)
1849 quick[rev] = pair
1859 quick[rev] = pair
1850 quick[node] = pair
1860 quick[node] = pair
1851 # also add the parents of the parents
1861 # also add the parents of the parents
1852 for r in cl.parentrevs(rev):
1862 for r in cl.parentrevs(rev):
1853 if r == nullrev:
1863 if r == nullrev:
1854 continue
1864 continue
1855 n = cl.node(r)
1865 n = cl.node(r)
1856 pair = (r, n)
1866 pair = (r, n)
1857 quick[r] = pair
1867 quick[r] = pair
1858 quick[n] = pair
1868 quick[n] = pair
1859 p1node = self.dirstate.p1()
1869 p1node = self.dirstate.p1()
1860 if p1node != self.nullid:
1870 if p1node != self.nullid:
1861 quick[b'.'] = quick[p1node]
1871 quick[b'.'] = quick[p1node]
1862 return quick
1872 return quick
1863
1873
1864 @unfilteredmethod
1874 @unfilteredmethod
1865 def _quick_access_changeid_invalidate(self):
1875 def _quick_access_changeid_invalidate(self):
1866 if '_quick_access_changeid_wc' in vars(self):
1876 if '_quick_access_changeid_wc' in vars(self):
1867 del self.__dict__['_quick_access_changeid_wc']
1877 del self.__dict__['_quick_access_changeid_wc']
1868
1878
1869 @property
1879 @property
1870 def _quick_access_changeid(self):
1880 def _quick_access_changeid(self):
1871 """an helper dictionnary for __getitem__ calls
1881 """an helper dictionnary for __getitem__ calls
1872
1882
1873 This contains a list of symbol we can recognise right away without
1883 This contains a list of symbol we can recognise right away without
1874 further processing.
1884 further processing.
1875 """
1885 """
1876 if self.filtername in repoview.filter_has_wc:
1886 if self.filtername in repoview.filter_has_wc:
1877 return self._quick_access_changeid_wc
1887 return self._quick_access_changeid_wc
1878 return self._quick_access_changeid_null
1888 return self._quick_access_changeid_null
1879
1889
1880 def __getitem__(self, changeid):
1890 def __getitem__(self, changeid):
1881 # dealing with special cases
1891 # dealing with special cases
1882 if changeid is None:
1892 if changeid is None:
1883 return context.workingctx(self)
1893 return context.workingctx(self)
1884 if isinstance(changeid, context.basectx):
1894 if isinstance(changeid, context.basectx):
1885 return changeid
1895 return changeid
1886
1896
1887 # dealing with multiple revisions
1897 # dealing with multiple revisions
1888 if isinstance(changeid, slice):
1898 if isinstance(changeid, slice):
1889 # wdirrev isn't contiguous so the slice shouldn't include it
1899 # wdirrev isn't contiguous so the slice shouldn't include it
1890 return [
1900 return [
1891 self[i]
1901 self[i]
1892 for i in range(*changeid.indices(len(self)))
1902 for i in range(*changeid.indices(len(self)))
1893 if i not in self.changelog.filteredrevs
1903 if i not in self.changelog.filteredrevs
1894 ]
1904 ]
1895
1905
1896 # dealing with some special values
1906 # dealing with some special values
1897 quick_access = self._quick_access_changeid.get(changeid)
1907 quick_access = self._quick_access_changeid.get(changeid)
1898 if quick_access is not None:
1908 if quick_access is not None:
1899 rev, node = quick_access
1909 rev, node = quick_access
1900 return context.changectx(self, rev, node, maybe_filtered=False)
1910 return context.changectx(self, rev, node, maybe_filtered=False)
1901 if changeid == b'tip':
1911 if changeid == b'tip':
1902 node = self.changelog.tip()
1912 node = self.changelog.tip()
1903 rev = self.changelog.rev(node)
1913 rev = self.changelog.rev(node)
1904 return context.changectx(self, rev, node)
1914 return context.changectx(self, rev, node)
1905
1915
1906 # dealing with arbitrary values
1916 # dealing with arbitrary values
1907 try:
1917 try:
1908 if isinstance(changeid, int):
1918 if isinstance(changeid, int):
1909 node = self.changelog.node(changeid)
1919 node = self.changelog.node(changeid)
1910 rev = changeid
1920 rev = changeid
1911 elif changeid == b'.':
1921 elif changeid == b'.':
1912 # this is a hack to delay/avoid loading obsmarkers
1922 # this is a hack to delay/avoid loading obsmarkers
1913 # when we know that '.' won't be hidden
1923 # when we know that '.' won't be hidden
1914 node = self.dirstate.p1()
1924 node = self.dirstate.p1()
1915 rev = self.unfiltered().changelog.rev(node)
1925 rev = self.unfiltered().changelog.rev(node)
1916 elif len(changeid) == self.nodeconstants.nodelen:
1926 elif len(changeid) == self.nodeconstants.nodelen:
1917 try:
1927 try:
1918 node = changeid
1928 node = changeid
1919 rev = self.changelog.rev(changeid)
1929 rev = self.changelog.rev(changeid)
1920 except error.FilteredLookupError:
1930 except error.FilteredLookupError:
1921 changeid = hex(changeid) # for the error message
1931 changeid = hex(changeid) # for the error message
1922 raise
1932 raise
1923 except LookupError:
1933 except LookupError:
1924 # check if it might have come from damaged dirstate
1934 # check if it might have come from damaged dirstate
1925 #
1935 #
1926 # XXX we could avoid the unfiltered if we had a recognizable
1936 # XXX we could avoid the unfiltered if we had a recognizable
1927 # exception for filtered changeset access
1937 # exception for filtered changeset access
1928 if (
1938 if (
1929 self.local()
1939 self.local()
1930 and changeid in self.unfiltered().dirstate.parents()
1940 and changeid in self.unfiltered().dirstate.parents()
1931 ):
1941 ):
1932 msg = _(b"working directory has unknown parent '%s'!")
1942 msg = _(b"working directory has unknown parent '%s'!")
1933 raise error.Abort(msg % short(changeid))
1943 raise error.Abort(msg % short(changeid))
1934 changeid = hex(changeid) # for the error message
1944 changeid = hex(changeid) # for the error message
1935 raise
1945 raise
1936
1946
1937 elif len(changeid) == 2 * self.nodeconstants.nodelen:
1947 elif len(changeid) == 2 * self.nodeconstants.nodelen:
1938 node = bin(changeid)
1948 node = bin(changeid)
1939 rev = self.changelog.rev(node)
1949 rev = self.changelog.rev(node)
1940 else:
1950 else:
1941 raise error.ProgrammingError(
1951 raise error.ProgrammingError(
1942 b"unsupported changeid '%s' of type %s"
1952 b"unsupported changeid '%s' of type %s"
1943 % (changeid, pycompat.bytestr(type(changeid)))
1953 % (changeid, pycompat.bytestr(type(changeid)))
1944 )
1954 )
1945
1955
1946 return context.changectx(self, rev, node)
1956 return context.changectx(self, rev, node)
1947
1957
1948 except (error.FilteredIndexError, error.FilteredLookupError):
1958 except (error.FilteredIndexError, error.FilteredLookupError):
1949 raise error.FilteredRepoLookupError(
1959 raise error.FilteredRepoLookupError(
1950 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1960 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1951 )
1961 )
1952 except (IndexError, LookupError):
1962 except (IndexError, LookupError):
1953 raise error.RepoLookupError(
1963 raise error.RepoLookupError(
1954 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1964 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1955 )
1965 )
1956 except error.WdirUnsupported:
1966 except error.WdirUnsupported:
1957 return context.workingctx(self)
1967 return context.workingctx(self)
1958
1968
1959 def __contains__(self, changeid):
1969 def __contains__(self, changeid):
1960 """True if the given changeid exists"""
1970 """True if the given changeid exists"""
1961 try:
1971 try:
1962 self[changeid]
1972 self[changeid]
1963 return True
1973 return True
1964 except error.RepoLookupError:
1974 except error.RepoLookupError:
1965 return False
1975 return False
1966
1976
1967 def __nonzero__(self):
1977 def __nonzero__(self):
1968 return True
1978 return True
1969
1979
1970 __bool__ = __nonzero__
1980 __bool__ = __nonzero__
1971
1981
1972 def __len__(self):
1982 def __len__(self):
1973 # no need to pay the cost of repoview.changelog
1983 # no need to pay the cost of repoview.changelog
1974 unfi = self.unfiltered()
1984 unfi = self.unfiltered()
1975 return len(unfi.changelog)
1985 return len(unfi.changelog)
1976
1986
1977 def __iter__(self):
1987 def __iter__(self):
1978 return iter(self.changelog)
1988 return iter(self.changelog)
1979
1989
1980 def revs(self, expr, *args):
1990 def revs(self, expr: bytes, *args):
1981 """Find revisions matching a revset.
1991 """Find revisions matching a revset.
1982
1992
1983 The revset is specified as a string ``expr`` that may contain
1993 The revset is specified as a string ``expr`` that may contain
1984 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1994 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1985
1995
1986 Revset aliases from the configuration are not expanded. To expand
1996 Revset aliases from the configuration are not expanded. To expand
1987 user aliases, consider calling ``scmutil.revrange()`` or
1997 user aliases, consider calling ``scmutil.revrange()`` or
1988 ``repo.anyrevs([expr], user=True)``.
1998 ``repo.anyrevs([expr], user=True)``.
1989
1999
1990 Returns a smartset.abstractsmartset, which is a list-like interface
2000 Returns a smartset.abstractsmartset, which is a list-like interface
1991 that contains integer revisions.
2001 that contains integer revisions.
1992 """
2002 """
1993 tree = revsetlang.spectree(expr, *args)
2003 tree = revsetlang.spectree(expr, *args)
1994 return revset.makematcher(tree)(self)
2004 return revset.makematcher(tree)(self)
1995
2005
1996 def set(self, expr, *args):
2006 def set(self, expr: bytes, *args):
1997 """Find revisions matching a revset and emit changectx instances.
2007 """Find revisions matching a revset and emit changectx instances.
1998
2008
1999 This is a convenience wrapper around ``revs()`` that iterates the
2009 This is a convenience wrapper around ``revs()`` that iterates the
2000 result and is a generator of changectx instances.
2010 result and is a generator of changectx instances.
2001
2011
2002 Revset aliases from the configuration are not expanded. To expand
2012 Revset aliases from the configuration are not expanded. To expand
2003 user aliases, consider calling ``scmutil.revrange()``.
2013 user aliases, consider calling ``scmutil.revrange()``.
2004 """
2014 """
2005 for r in self.revs(expr, *args):
2015 for r in self.revs(expr, *args):
2006 yield self[r]
2016 yield self[r]
2007
2017
2008 def anyrevs(self, specs, user=False, localalias=None):
2018 def anyrevs(self, specs: bytes, user=False, localalias=None):
2009 """Find revisions matching one of the given revsets.
2019 """Find revisions matching one of the given revsets.
2010
2020
2011 Revset aliases from the configuration are not expanded by default. To
2021 Revset aliases from the configuration are not expanded by default. To
2012 expand user aliases, specify ``user=True``. To provide some local
2022 expand user aliases, specify ``user=True``. To provide some local
2013 definitions overriding user aliases, set ``localalias`` to
2023 definitions overriding user aliases, set ``localalias`` to
2014 ``{name: definitionstring}``.
2024 ``{name: definitionstring}``.
2015 """
2025 """
2016 if specs == [b'null']:
2026 if specs == [b'null']:
2017 return revset.baseset([nullrev])
2027 return revset.baseset([nullrev])
2018 if specs == [b'.']:
2028 if specs == [b'.']:
2019 quick_data = self._quick_access_changeid.get(b'.')
2029 quick_data = self._quick_access_changeid.get(b'.')
2020 if quick_data is not None:
2030 if quick_data is not None:
2021 return revset.baseset([quick_data[0]])
2031 return revset.baseset([quick_data[0]])
2022 if user:
2032 if user:
2023 m = revset.matchany(
2033 m = revset.matchany(
2024 self.ui,
2034 self.ui,
2025 specs,
2035 specs,
2026 lookup=revset.lookupfn(self),
2036 lookup=revset.lookupfn(self),
2027 localalias=localalias,
2037 localalias=localalias,
2028 )
2038 )
2029 else:
2039 else:
2030 m = revset.matchany(None, specs, localalias=localalias)
2040 m = revset.matchany(None, specs, localalias=localalias)
2031 return m(self)
2041 return m(self)
2032
2042
2033 def url(self):
2043 def url(self) -> bytes:
2034 return b'file:' + self.root
2044 return b'file:' + self.root
2035
2045
2036 def hook(self, name, throw=False, **args):
2046 def hook(self, name, throw=False, **args):
2037 """Call a hook, passing this repo instance.
2047 """Call a hook, passing this repo instance.
2038
2048
2039 This a convenience method to aid invoking hooks. Extensions likely
2049 This a convenience method to aid invoking hooks. Extensions likely
2040 won't call this unless they have registered a custom hook or are
2050 won't call this unless they have registered a custom hook or are
2041 replacing code that is expected to call a hook.
2051 replacing code that is expected to call a hook.
2042 """
2052 """
2043 return hook.hook(self.ui, self, name, throw, **args)
2053 return hook.hook(self.ui, self, name, throw, **args)
2044
2054
2045 @filteredpropertycache
2055 @filteredpropertycache
2046 def _tagscache(self):
2056 def _tagscache(self):
2047 """Returns a tagscache object that contains various tags related
2057 """Returns a tagscache object that contains various tags related
2048 caches."""
2058 caches."""
2049
2059
2050 # This simplifies its cache management by having one decorated
2060 # This simplifies its cache management by having one decorated
2051 # function (this one) and the rest simply fetch things from it.
2061 # function (this one) and the rest simply fetch things from it.
2052 class tagscache:
2062 class tagscache:
2053 def __init__(self):
2063 def __init__(self):
2054 # These two define the set of tags for this repository. tags
2064 # These two define the set of tags for this repository. tags
2055 # maps tag name to node; tagtypes maps tag name to 'global' or
2065 # maps tag name to node; tagtypes maps tag name to 'global' or
2056 # 'local'. (Global tags are defined by .hgtags across all
2066 # 'local'. (Global tags are defined by .hgtags across all
2057 # heads, and local tags are defined in .hg/localtags.)
2067 # heads, and local tags are defined in .hg/localtags.)
2058 # They constitute the in-memory cache of tags.
2068 # They constitute the in-memory cache of tags.
2059 self.tags = self.tagtypes = None
2069 self.tags = self.tagtypes = None
2060
2070
2061 self.nodetagscache = self.tagslist = None
2071 self.nodetagscache = self.tagslist = None
2062
2072
2063 cache = tagscache()
2073 cache = tagscache()
2064 cache.tags, cache.tagtypes = self._findtags()
2074 cache.tags, cache.tagtypes = self._findtags()
2065
2075
2066 return cache
2076 return cache
2067
2077
2068 def tags(self):
2078 def tags(self):
2069 '''return a mapping of tag to node'''
2079 '''return a mapping of tag to node'''
2070 t = {}
2080 t = {}
2071 if self.changelog.filteredrevs:
2081 if self.changelog.filteredrevs:
2072 tags, tt = self._findtags()
2082 tags, tt = self._findtags()
2073 else:
2083 else:
2074 tags = self._tagscache.tags
2084 tags = self._tagscache.tags
2075 rev = self.changelog.rev
2085 rev = self.changelog.rev
2076 for k, v in tags.items():
2086 for k, v in tags.items():
2077 try:
2087 try:
2078 # ignore tags to unknown nodes
2088 # ignore tags to unknown nodes
2079 rev(v)
2089 rev(v)
2080 t[k] = v
2090 t[k] = v
2081 except (error.LookupError, ValueError):
2091 except (error.LookupError, ValueError):
2082 pass
2092 pass
2083 return t
2093 return t
2084
2094
2085 def _findtags(self):
2095 def _findtags(self):
2086 """Do the hard work of finding tags. Return a pair of dicts
2096 """Do the hard work of finding tags. Return a pair of dicts
2087 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2097 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2088 maps tag name to a string like \'global\' or \'local\'.
2098 maps tag name to a string like \'global\' or \'local\'.
2089 Subclasses or extensions are free to add their own tags, but
2099 Subclasses or extensions are free to add their own tags, but
2090 should be aware that the returned dicts will be retained for the
2100 should be aware that the returned dicts will be retained for the
2091 duration of the localrepo object."""
2101 duration of the localrepo object."""
2092
2102
2093 # XXX what tagtype should subclasses/extensions use? Currently
2103 # XXX what tagtype should subclasses/extensions use? Currently
2094 # mq and bookmarks add tags, but do not set the tagtype at all.
2104 # mq and bookmarks add tags, but do not set the tagtype at all.
2095 # Should each extension invent its own tag type? Should there
2105 # Should each extension invent its own tag type? Should there
2096 # be one tagtype for all such "virtual" tags? Or is the status
2106 # be one tagtype for all such "virtual" tags? Or is the status
2097 # quo fine?
2107 # quo fine?
2098
2108
2099 # map tag name to (node, hist)
2109 # map tag name to (node, hist)
2100 alltags = tagsmod.findglobaltags(self.ui, self)
2110 alltags = tagsmod.findglobaltags(self.ui, self)
2101 # map tag name to tag type
2111 # map tag name to tag type
2102 tagtypes = {tag: b'global' for tag in alltags}
2112 tagtypes = {tag: b'global' for tag in alltags}
2103
2113
2104 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2114 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2105
2115
2106 # Build the return dicts. Have to re-encode tag names because
2116 # Build the return dicts. Have to re-encode tag names because
2107 # the tags module always uses UTF-8 (in order not to lose info
2117 # the tags module always uses UTF-8 (in order not to lose info
2108 # writing to the cache), but the rest of Mercurial wants them in
2118 # writing to the cache), but the rest of Mercurial wants them in
2109 # local encoding.
2119 # local encoding.
2110 tags = {}
2120 tags = {}
2111 for (name, (node, hist)) in alltags.items():
2121 for (name, (node, hist)) in alltags.items():
2112 if node != self.nullid:
2122 if node != self.nullid:
2113 tags[encoding.tolocal(name)] = node
2123 tags[encoding.tolocal(name)] = node
2114 tags[b'tip'] = self.changelog.tip()
2124 tags[b'tip'] = self.changelog.tip()
2115 tagtypes = {
2125 tagtypes = {
2116 encoding.tolocal(name): value for (name, value) in tagtypes.items()
2126 encoding.tolocal(name): value for (name, value) in tagtypes.items()
2117 }
2127 }
2118 return (tags, tagtypes)
2128 return (tags, tagtypes)
2119
2129
2120 def tagtype(self, tagname):
2130 def tagtype(self, tagname):
2121 """
2131 """
2122 return the type of the given tag. result can be:
2132 return the type of the given tag. result can be:
2123
2133
2124 'local' : a local tag
2134 'local' : a local tag
2125 'global' : a global tag
2135 'global' : a global tag
2126 None : tag does not exist
2136 None : tag does not exist
2127 """
2137 """
2128
2138
2129 return self._tagscache.tagtypes.get(tagname)
2139 return self._tagscache.tagtypes.get(tagname)
2130
2140
2131 def tagslist(self):
2141 def tagslist(self):
2132 '''return a list of tags ordered by revision'''
2142 '''return a list of tags ordered by revision'''
2133 if not self._tagscache.tagslist:
2143 if not self._tagscache.tagslist:
2134 l = []
2144 l = []
2135 for t, n in self.tags().items():
2145 for t, n in self.tags().items():
2136 l.append((self.changelog.rev(n), t, n))
2146 l.append((self.changelog.rev(n), t, n))
2137 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2147 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2138
2148
2139 return self._tagscache.tagslist
2149 return self._tagscache.tagslist
2140
2150
2141 def nodetags(self, node):
2151 def nodetags(self, node):
2142 '''return the tags associated with a node'''
2152 '''return the tags associated with a node'''
2143 if not self._tagscache.nodetagscache:
2153 if not self._tagscache.nodetagscache:
2144 nodetagscache = {}
2154 nodetagscache = {}
2145 for t, n in self._tagscache.tags.items():
2155 for t, n in self._tagscache.tags.items():
2146 nodetagscache.setdefault(n, []).append(t)
2156 nodetagscache.setdefault(n, []).append(t)
2147 for tags in nodetagscache.values():
2157 for tags in nodetagscache.values():
2148 tags.sort()
2158 tags.sort()
2149 self._tagscache.nodetagscache = nodetagscache
2159 self._tagscache.nodetagscache = nodetagscache
2150 return self._tagscache.nodetagscache.get(node, [])
2160 return self._tagscache.nodetagscache.get(node, [])
2151
2161
2152 def nodebookmarks(self, node):
2162 def nodebookmarks(self, node):
2153 """return the list of bookmarks pointing to the specified node"""
2163 """return the list of bookmarks pointing to the specified node"""
2154 return self._bookmarks.names(node)
2164 return self._bookmarks.names(node)
2155
2165
2156 def branchmap(self):
2166 def branchmap(self):
2157 """returns a dictionary {branch: [branchheads]} with branchheads
2167 """returns a dictionary {branch: [branchheads]} with branchheads
2158 ordered by increasing revision number"""
2168 ordered by increasing revision number"""
2159 return self._branchcaches[self]
2169 return self._branchcaches[self]
2160
2170
2161 @unfilteredmethod
2171 @unfilteredmethod
2162 def revbranchcache(self):
2172 def revbranchcache(self):
2163 if not self._revbranchcache:
2173 if not self._revbranchcache:
2164 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2174 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2165 return self._revbranchcache
2175 return self._revbranchcache
2166
2176
2167 def register_changeset(self, rev, changelogrevision):
2177 def register_changeset(self, rev, changelogrevision):
2168 self.revbranchcache().setdata(rev, changelogrevision)
2178 self.revbranchcache().setdata(rev, changelogrevision)
2169
2179
2170 def branchtip(self, branch, ignoremissing=False):
2180 def branchtip(self, branch, ignoremissing=False):
2171 """return the tip node for a given branch
2181 """return the tip node for a given branch
2172
2182
2173 If ignoremissing is True, then this method will not raise an error.
2183 If ignoremissing is True, then this method will not raise an error.
2174 This is helpful for callers that only expect None for a missing branch
2184 This is helpful for callers that only expect None for a missing branch
2175 (e.g. namespace).
2185 (e.g. namespace).
2176
2186
2177 """
2187 """
2178 try:
2188 try:
2179 return self.branchmap().branchtip(branch)
2189 return self.branchmap().branchtip(branch)
2180 except KeyError:
2190 except KeyError:
2181 if not ignoremissing:
2191 if not ignoremissing:
2182 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2192 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2183 else:
2193 else:
2184 pass
2194 pass
2185
2195
2186 def lookup(self, key):
2196 def lookup(self, key):
2187 node = scmutil.revsymbol(self, key).node()
2197 node = scmutil.revsymbol(self, key).node()
2188 if node is None:
2198 if node is None:
2189 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2199 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2190 return node
2200 return node
2191
2201
2192 def lookupbranch(self, key):
2202 def lookupbranch(self, key):
2193 if self.branchmap().hasbranch(key):
2203 if self.branchmap().hasbranch(key):
2194 return key
2204 return key
2195
2205
2196 return scmutil.revsymbol(self, key).branch()
2206 return scmutil.revsymbol(self, key).branch()
2197
2207
2198 def known(self, nodes):
2208 def known(self, nodes):
2199 cl = self.changelog
2209 cl = self.changelog
2200 get_rev = cl.index.get_rev
2210 get_rev = cl.index.get_rev
2201 filtered = cl.filteredrevs
2211 filtered = cl.filteredrevs
2202 result = []
2212 result = []
2203 for n in nodes:
2213 for n in nodes:
2204 r = get_rev(n)
2214 r = get_rev(n)
2205 resp = not (r is None or r in filtered)
2215 resp = not (r is None or r in filtered)
2206 result.append(resp)
2216 result.append(resp)
2207 return result
2217 return result
2208
2218
2209 def local(self):
2219 def local(self):
2210 return self
2220 return self
2211
2221
2212 def publishing(self):
2222 def publishing(self):
2213 # it's safe (and desirable) to trust the publish flag unconditionally
2223 # it's safe (and desirable) to trust the publish flag unconditionally
2214 # so that we don't finalize changes shared between users via ssh or nfs
2224 # so that we don't finalize changes shared between users via ssh or nfs
2215 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2225 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2216
2226
2217 def cancopy(self):
2227 def cancopy(self):
2218 # so statichttprepo's override of local() works
2228 # so statichttprepo's override of local() works
2219 if not self.local():
2229 if not self.local():
2220 return False
2230 return False
2221 if not self.publishing():
2231 if not self.publishing():
2222 return True
2232 return True
2223 # if publishing we can't copy if there is filtered content
2233 # if publishing we can't copy if there is filtered content
2224 return not self.filtered(b'visible').changelog.filteredrevs
2234 return not self.filtered(b'visible').changelog.filteredrevs
2225
2235
2226 def shared(self):
2236 def shared(self):
2227 '''the type of shared repository (None if not shared)'''
2237 '''the type of shared repository (None if not shared)'''
2228 if self.sharedpath != self.path:
2238 if self.sharedpath != self.path:
2229 return b'store'
2239 return b'store'
2230 return None
2240 return None
2231
2241
2232 def wjoin(self, f, *insidef):
2242 def wjoin(self, f: bytes, *insidef: bytes) -> bytes:
2233 return self.vfs.reljoin(self.root, f, *insidef)
2243 return self.vfs.reljoin(self.root, f, *insidef)
2234
2244
2235 def setparents(self, p1, p2=None):
2245 def setparents(self, p1, p2=None):
2236 if p2 is None:
2246 if p2 is None:
2237 p2 = self.nullid
2247 p2 = self.nullid
2238 self[None].setparents(p1, p2)
2248 self[None].setparents(p1, p2)
2239 self._quick_access_changeid_invalidate()
2249 self._quick_access_changeid_invalidate()
2240
2250
2241 def filectx(self, path, changeid=None, fileid=None, changectx=None):
2251 def filectx(self, path: bytes, changeid=None, fileid=None, changectx=None):
2242 """changeid must be a changeset revision, if specified.
2252 """changeid must be a changeset revision, if specified.
2243 fileid can be a file revision or node."""
2253 fileid can be a file revision or node."""
2244 return context.filectx(
2254 return context.filectx(
2245 self, path, changeid, fileid, changectx=changectx
2255 self, path, changeid, fileid, changectx=changectx
2246 )
2256 )
2247
2257
2248 def getcwd(self):
2258 def getcwd(self) -> bytes:
2249 return self.dirstate.getcwd()
2259 return self.dirstate.getcwd()
2250
2260
2251 def pathto(self, f, cwd=None):
2261 def pathto(self, f: bytes, cwd: Optional[bytes] = None) -> bytes:
2252 return self.dirstate.pathto(f, cwd)
2262 return self.dirstate.pathto(f, cwd)
2253
2263
2254 def _loadfilter(self, filter):
2264 def _loadfilter(self, filter):
2255 if filter not in self._filterpats:
2265 if filter not in self._filterpats:
2256 l = []
2266 l = []
2257 for pat, cmd in self.ui.configitems(filter):
2267 for pat, cmd in self.ui.configitems(filter):
2258 if cmd == b'!':
2268 if cmd == b'!':
2259 continue
2269 continue
2260 mf = matchmod.match(self.root, b'', [pat])
2270 mf = matchmod.match(self.root, b'', [pat])
2261 fn = None
2271 fn = None
2262 params = cmd
2272 params = cmd
2263 for name, filterfn in self._datafilters.items():
2273 for name, filterfn in self._datafilters.items():
2264 if cmd.startswith(name):
2274 if cmd.startswith(name):
2265 fn = filterfn
2275 fn = filterfn
2266 params = cmd[len(name) :].lstrip()
2276 params = cmd[len(name) :].lstrip()
2267 break
2277 break
2268 if not fn:
2278 if not fn:
2269 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2279 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2270 fn.__name__ = 'commandfilter'
2280 fn.__name__ = 'commandfilter'
2271 # Wrap old filters not supporting keyword arguments
2281 # Wrap old filters not supporting keyword arguments
2272 if not pycompat.getargspec(fn)[2]:
2282 if not pycompat.getargspec(fn)[2]:
2273 oldfn = fn
2283 oldfn = fn
2274 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2284 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2275 fn.__name__ = 'compat-' + oldfn.__name__
2285 fn.__name__ = 'compat-' + oldfn.__name__
2276 l.append((mf, fn, params))
2286 l.append((mf, fn, params))
2277 self._filterpats[filter] = l
2287 self._filterpats[filter] = l
2278 return self._filterpats[filter]
2288 return self._filterpats[filter]
2279
2289
2280 def _filter(self, filterpats, filename, data):
2290 def _filter(self, filterpats, filename, data):
2281 for mf, fn, cmd in filterpats:
2291 for mf, fn, cmd in filterpats:
2282 if mf(filename):
2292 if mf(filename):
2283 self.ui.debug(
2293 self.ui.debug(
2284 b"filtering %s through %s\n"
2294 b"filtering %s through %s\n"
2285 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2295 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2286 )
2296 )
2287 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2297 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2288 break
2298 break
2289
2299
2290 return data
2300 return data
2291
2301
2292 @unfilteredpropertycache
2302 @unfilteredpropertycache
2293 def _encodefilterpats(self):
2303 def _encodefilterpats(self):
2294 return self._loadfilter(b'encode')
2304 return self._loadfilter(b'encode')
2295
2305
2296 @unfilteredpropertycache
2306 @unfilteredpropertycache
2297 def _decodefilterpats(self):
2307 def _decodefilterpats(self):
2298 return self._loadfilter(b'decode')
2308 return self._loadfilter(b'decode')
2299
2309
2300 def adddatafilter(self, name, filter):
2310 def adddatafilter(self, name, filter):
2301 self._datafilters[name] = filter
2311 self._datafilters[name] = filter
2302
2312
2303 def wread(self, filename):
2313 def wread(self, filename: bytes) -> bytes:
2304 if self.wvfs.islink(filename):
2314 if self.wvfs.islink(filename):
2305 data = self.wvfs.readlink(filename)
2315 data = self.wvfs.readlink(filename)
2306 else:
2316 else:
2307 data = self.wvfs.read(filename)
2317 data = self.wvfs.read(filename)
2308 return self._filter(self._encodefilterpats, filename, data)
2318 return self._filter(self._encodefilterpats, filename, data)
2309
2319
2310 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2320 def wwrite(
2321 self,
2322 filename: bytes,
2323 data: bytes,
2324 flags: bytes,
2325 backgroundclose=False,
2326 **kwargs
2327 ) -> int:
2311 """write ``data`` into ``filename`` in the working directory
2328 """write ``data`` into ``filename`` in the working directory
2312
2329
2313 This returns length of written (maybe decoded) data.
2330 This returns length of written (maybe decoded) data.
2314 """
2331 """
2315 data = self._filter(self._decodefilterpats, filename, data)
2332 data = self._filter(self._decodefilterpats, filename, data)
2316 if b'l' in flags:
2333 if b'l' in flags:
2317 self.wvfs.symlink(data, filename)
2334 self.wvfs.symlink(data, filename)
2318 else:
2335 else:
2319 self.wvfs.write(
2336 self.wvfs.write(
2320 filename, data, backgroundclose=backgroundclose, **kwargs
2337 filename, data, backgroundclose=backgroundclose, **kwargs
2321 )
2338 )
2322 if b'x' in flags:
2339 if b'x' in flags:
2323 self.wvfs.setflags(filename, False, True)
2340 self.wvfs.setflags(filename, False, True)
2324 else:
2341 else:
2325 self.wvfs.setflags(filename, False, False)
2342 self.wvfs.setflags(filename, False, False)
2326 return len(data)
2343 return len(data)
2327
2344
2328 def wwritedata(self, filename, data):
2345 def wwritedata(self, filename: bytes, data: bytes) -> bytes:
2329 return self._filter(self._decodefilterpats, filename, data)
2346 return self._filter(self._decodefilterpats, filename, data)
2330
2347
2331 def currenttransaction(self):
2348 def currenttransaction(self):
2332 """return the current transaction or None if non exists"""
2349 """return the current transaction or None if non exists"""
2333 if self._transref:
2350 if self._transref:
2334 tr = self._transref()
2351 tr = self._transref()
2335 else:
2352 else:
2336 tr = None
2353 tr = None
2337
2354
2338 if tr and tr.running():
2355 if tr and tr.running():
2339 return tr
2356 return tr
2340 return None
2357 return None
2341
2358
2342 def transaction(self, desc, report=None):
2359 def transaction(self, desc, report=None):
2343 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2360 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2344 b'devel', b'check-locks'
2361 b'devel', b'check-locks'
2345 ):
2362 ):
2346 if self._currentlock(self._lockref) is None:
2363 if self._currentlock(self._lockref) is None:
2347 raise error.ProgrammingError(b'transaction requires locking')
2364 raise error.ProgrammingError(b'transaction requires locking')
2348 tr = self.currenttransaction()
2365 tr = self.currenttransaction()
2349 if tr is not None:
2366 if tr is not None:
2350 return tr.nest(name=desc)
2367 return tr.nest(name=desc)
2351
2368
2352 # abort here if the journal already exists
2369 # abort here if the journal already exists
2353 if self.svfs.exists(b"journal"):
2370 if self.svfs.exists(b"journal"):
2354 raise error.RepoError(
2371 raise error.RepoError(
2355 _(b"abandoned transaction found"),
2372 _(b"abandoned transaction found"),
2356 hint=_(b"run 'hg recover' to clean up transaction"),
2373 hint=_(b"run 'hg recover' to clean up transaction"),
2357 )
2374 )
2358
2375
2359 idbase = b"%.40f#%f" % (random.random(), time.time())
2376 idbase = b"%.40f#%f" % (random.random(), time.time())
2360 ha = hex(hashutil.sha1(idbase).digest())
2377 ha = hex(hashutil.sha1(idbase).digest())
2361 txnid = b'TXN:' + ha
2378 txnid = b'TXN:' + ha
2362 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2379 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2363
2380
2364 self._writejournal(desc)
2381 self._writejournal(desc)
2365 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2382 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2366 if report:
2383 if report:
2367 rp = report
2384 rp = report
2368 else:
2385 else:
2369 rp = self.ui.warn
2386 rp = self.ui.warn
2370 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2387 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2371 # we must avoid cyclic reference between repo and transaction.
2388 # we must avoid cyclic reference between repo and transaction.
2372 reporef = weakref.ref(self)
2389 reporef = weakref.ref(self)
2373 # Code to track tag movement
2390 # Code to track tag movement
2374 #
2391 #
2375 # Since tags are all handled as file content, it is actually quite hard
2392 # Since tags are all handled as file content, it is actually quite hard
2376 # to track these movement from a code perspective. So we fallback to a
2393 # to track these movement from a code perspective. So we fallback to a
2377 # tracking at the repository level. One could envision to track changes
2394 # tracking at the repository level. One could envision to track changes
2378 # to the '.hgtags' file through changegroup apply but that fails to
2395 # to the '.hgtags' file through changegroup apply but that fails to
2379 # cope with case where transaction expose new heads without changegroup
2396 # cope with case where transaction expose new heads without changegroup
2380 # being involved (eg: phase movement).
2397 # being involved (eg: phase movement).
2381 #
2398 #
2382 # For now, We gate the feature behind a flag since this likely comes
2399 # For now, We gate the feature behind a flag since this likely comes
2383 # with performance impacts. The current code run more often than needed
2400 # with performance impacts. The current code run more often than needed
2384 # and do not use caches as much as it could. The current focus is on
2401 # and do not use caches as much as it could. The current focus is on
2385 # the behavior of the feature so we disable it by default. The flag
2402 # the behavior of the feature so we disable it by default. The flag
2386 # will be removed when we are happy with the performance impact.
2403 # will be removed when we are happy with the performance impact.
2387 #
2404 #
2388 # Once this feature is no longer experimental move the following
2405 # Once this feature is no longer experimental move the following
2389 # documentation to the appropriate help section:
2406 # documentation to the appropriate help section:
2390 #
2407 #
2391 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2408 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2392 # tags (new or changed or deleted tags). In addition the details of
2409 # tags (new or changed or deleted tags). In addition the details of
2393 # these changes are made available in a file at:
2410 # these changes are made available in a file at:
2394 # ``REPOROOT/.hg/changes/tags.changes``.
2411 # ``REPOROOT/.hg/changes/tags.changes``.
2395 # Make sure you check for HG_TAG_MOVED before reading that file as it
2412 # Make sure you check for HG_TAG_MOVED before reading that file as it
2396 # might exist from a previous transaction even if no tag were touched
2413 # might exist from a previous transaction even if no tag were touched
2397 # in this one. Changes are recorded in a line base format::
2414 # in this one. Changes are recorded in a line base format::
2398 #
2415 #
2399 # <action> <hex-node> <tag-name>\n
2416 # <action> <hex-node> <tag-name>\n
2400 #
2417 #
2401 # Actions are defined as follow:
2418 # Actions are defined as follow:
2402 # "-R": tag is removed,
2419 # "-R": tag is removed,
2403 # "+A": tag is added,
2420 # "+A": tag is added,
2404 # "-M": tag is moved (old value),
2421 # "-M": tag is moved (old value),
2405 # "+M": tag is moved (new value),
2422 # "+M": tag is moved (new value),
2406 tracktags = lambda x: None
2423 tracktags = lambda x: None
2407 # experimental config: experimental.hook-track-tags
2424 # experimental config: experimental.hook-track-tags
2408 shouldtracktags = self.ui.configbool(
2425 shouldtracktags = self.ui.configbool(
2409 b'experimental', b'hook-track-tags'
2426 b'experimental', b'hook-track-tags'
2410 )
2427 )
2411 if desc != b'strip' and shouldtracktags:
2428 if desc != b'strip' and shouldtracktags:
2412 oldheads = self.changelog.headrevs()
2429 oldheads = self.changelog.headrevs()
2413
2430
2414 def tracktags(tr2):
2431 def tracktags(tr2):
2415 repo = reporef()
2432 repo = reporef()
2416 assert repo is not None # help pytype
2433 assert repo is not None # help pytype
2417 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2434 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2418 newheads = repo.changelog.headrevs()
2435 newheads = repo.changelog.headrevs()
2419 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2436 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2420 # notes: we compare lists here.
2437 # notes: we compare lists here.
2421 # As we do it only once buiding set would not be cheaper
2438 # As we do it only once buiding set would not be cheaper
2422 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2439 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2423 if changes:
2440 if changes:
2424 tr2.hookargs[b'tag_moved'] = b'1'
2441 tr2.hookargs[b'tag_moved'] = b'1'
2425 with repo.vfs(
2442 with repo.vfs(
2426 b'changes/tags.changes', b'w', atomictemp=True
2443 b'changes/tags.changes', b'w', atomictemp=True
2427 ) as changesfile:
2444 ) as changesfile:
2428 # note: we do not register the file to the transaction
2445 # note: we do not register the file to the transaction
2429 # because we needs it to still exist on the transaction
2446 # because we needs it to still exist on the transaction
2430 # is close (for txnclose hooks)
2447 # is close (for txnclose hooks)
2431 tagsmod.writediff(changesfile, changes)
2448 tagsmod.writediff(changesfile, changes)
2432
2449
2433 def validate(tr2):
2450 def validate(tr2):
2434 """will run pre-closing hooks"""
2451 """will run pre-closing hooks"""
2435 # XXX the transaction API is a bit lacking here so we take a hacky
2452 # XXX the transaction API is a bit lacking here so we take a hacky
2436 # path for now
2453 # path for now
2437 #
2454 #
2438 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2455 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2439 # dict is copied before these run. In addition we needs the data
2456 # dict is copied before these run. In addition we needs the data
2440 # available to in memory hooks too.
2457 # available to in memory hooks too.
2441 #
2458 #
2442 # Moreover, we also need to make sure this runs before txnclose
2459 # Moreover, we also need to make sure this runs before txnclose
2443 # hooks and there is no "pending" mechanism that would execute
2460 # hooks and there is no "pending" mechanism that would execute
2444 # logic only if hooks are about to run.
2461 # logic only if hooks are about to run.
2445 #
2462 #
2446 # Fixing this limitation of the transaction is also needed to track
2463 # Fixing this limitation of the transaction is also needed to track
2447 # other families of changes (bookmarks, phases, obsolescence).
2464 # other families of changes (bookmarks, phases, obsolescence).
2448 #
2465 #
2449 # This will have to be fixed before we remove the experimental
2466 # This will have to be fixed before we remove the experimental
2450 # gating.
2467 # gating.
2451 tracktags(tr2)
2468 tracktags(tr2)
2452 repo = reporef()
2469 repo = reporef()
2453 assert repo is not None # help pytype
2470 assert repo is not None # help pytype
2454
2471
2455 singleheadopt = (b'experimental', b'single-head-per-branch')
2472 singleheadopt = (b'experimental', b'single-head-per-branch')
2456 singlehead = repo.ui.configbool(*singleheadopt)
2473 singlehead = repo.ui.configbool(*singleheadopt)
2457 if singlehead:
2474 if singlehead:
2458 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2475 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2459 accountclosed = singleheadsub.get(
2476 accountclosed = singleheadsub.get(
2460 b"account-closed-heads", False
2477 b"account-closed-heads", False
2461 )
2478 )
2462 if singleheadsub.get(b"public-changes-only", False):
2479 if singleheadsub.get(b"public-changes-only", False):
2463 filtername = b"immutable"
2480 filtername = b"immutable"
2464 else:
2481 else:
2465 filtername = b"visible"
2482 filtername = b"visible"
2466 scmutil.enforcesinglehead(
2483 scmutil.enforcesinglehead(
2467 repo, tr2, desc, accountclosed, filtername
2484 repo, tr2, desc, accountclosed, filtername
2468 )
2485 )
2469 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2486 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2470 for name, (old, new) in sorted(
2487 for name, (old, new) in sorted(
2471 tr.changes[b'bookmarks'].items()
2488 tr.changes[b'bookmarks'].items()
2472 ):
2489 ):
2473 args = tr.hookargs.copy()
2490 args = tr.hookargs.copy()
2474 args.update(bookmarks.preparehookargs(name, old, new))
2491 args.update(bookmarks.preparehookargs(name, old, new))
2475 repo.hook(
2492 repo.hook(
2476 b'pretxnclose-bookmark',
2493 b'pretxnclose-bookmark',
2477 throw=True,
2494 throw=True,
2478 **pycompat.strkwargs(args)
2495 **pycompat.strkwargs(args)
2479 )
2496 )
2480 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2497 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2481 cl = repo.unfiltered().changelog
2498 cl = repo.unfiltered().changelog
2482 for revs, (old, new) in tr.changes[b'phases']:
2499 for revs, (old, new) in tr.changes[b'phases']:
2483 for rev in revs:
2500 for rev in revs:
2484 args = tr.hookargs.copy()
2501 args = tr.hookargs.copy()
2485 node = hex(cl.node(rev))
2502 node = hex(cl.node(rev))
2486 args.update(phases.preparehookargs(node, old, new))
2503 args.update(phases.preparehookargs(node, old, new))
2487 repo.hook(
2504 repo.hook(
2488 b'pretxnclose-phase',
2505 b'pretxnclose-phase',
2489 throw=True,
2506 throw=True,
2490 **pycompat.strkwargs(args)
2507 **pycompat.strkwargs(args)
2491 )
2508 )
2492
2509
2493 repo.hook(
2510 repo.hook(
2494 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2511 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2495 )
2512 )
2496
2513
2497 def releasefn(tr, success):
2514 def releasefn(tr, success):
2498 repo = reporef()
2515 repo = reporef()
2499 if repo is None:
2516 if repo is None:
2500 # If the repo has been GC'd (and this release function is being
2517 # If the repo has been GC'd (and this release function is being
2501 # called from transaction.__del__), there's not much we can do,
2518 # called from transaction.__del__), there's not much we can do,
2502 # so just leave the unfinished transaction there and let the
2519 # so just leave the unfinished transaction there and let the
2503 # user run `hg recover`.
2520 # user run `hg recover`.
2504 return
2521 return
2505 if success:
2522 if success:
2506 # this should be explicitly invoked here, because
2523 # this should be explicitly invoked here, because
2507 # in-memory changes aren't written out at closing
2524 # in-memory changes aren't written out at closing
2508 # transaction, if tr.addfilegenerator (via
2525 # transaction, if tr.addfilegenerator (via
2509 # dirstate.write or so) isn't invoked while
2526 # dirstate.write or so) isn't invoked while
2510 # transaction running
2527 # transaction running
2511 repo.dirstate.write(None)
2528 repo.dirstate.write(None)
2512 else:
2529 else:
2513 # discard all changes (including ones already written
2530 # discard all changes (including ones already written
2514 # out) in this transaction
2531 # out) in this transaction
2515 narrowspec.restorebackup(self, b'journal.narrowspec')
2532 narrowspec.restorebackup(self, b'journal.narrowspec')
2516 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2533 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2517 repo.dirstate.restorebackup(None, b'journal.dirstate')
2534 repo.dirstate.restorebackup(None, b'journal.dirstate')
2518
2535
2519 repo.invalidate(clearfilecache=True)
2536 repo.invalidate(clearfilecache=True)
2520
2537
2521 tr = transaction.transaction(
2538 tr = transaction.transaction(
2522 rp,
2539 rp,
2523 self.svfs,
2540 self.svfs,
2524 vfsmap,
2541 vfsmap,
2525 b"journal",
2542 b"journal",
2526 b"undo",
2543 b"undo",
2527 aftertrans(renames),
2544 aftertrans(renames),
2528 self.store.createmode,
2545 self.store.createmode,
2529 validator=validate,
2546 validator=validate,
2530 releasefn=releasefn,
2547 releasefn=releasefn,
2531 checkambigfiles=_cachedfiles,
2548 checkambigfiles=_cachedfiles,
2532 name=desc,
2549 name=desc,
2533 )
2550 )
2534 tr.changes[b'origrepolen'] = len(self)
2551 tr.changes[b'origrepolen'] = len(self)
2535 tr.changes[b'obsmarkers'] = set()
2552 tr.changes[b'obsmarkers'] = set()
2536 tr.changes[b'phases'] = []
2553 tr.changes[b'phases'] = []
2537 tr.changes[b'bookmarks'] = {}
2554 tr.changes[b'bookmarks'] = {}
2538
2555
2539 tr.hookargs[b'txnid'] = txnid
2556 tr.hookargs[b'txnid'] = txnid
2540 tr.hookargs[b'txnname'] = desc
2557 tr.hookargs[b'txnname'] = desc
2541 tr.hookargs[b'changes'] = tr.changes
2558 tr.hookargs[b'changes'] = tr.changes
2542 # note: writing the fncache only during finalize mean that the file is
2559 # note: writing the fncache only during finalize mean that the file is
2543 # outdated when running hooks. As fncache is used for streaming clone,
2560 # outdated when running hooks. As fncache is used for streaming clone,
2544 # this is not expected to break anything that happen during the hooks.
2561 # this is not expected to break anything that happen during the hooks.
2545 tr.addfinalize(b'flush-fncache', self.store.write)
2562 tr.addfinalize(b'flush-fncache', self.store.write)
2546
2563
2547 def txnclosehook(tr2):
2564 def txnclosehook(tr2):
2548 """To be run if transaction is successful, will schedule a hook run"""
2565 """To be run if transaction is successful, will schedule a hook run"""
2549 # Don't reference tr2 in hook() so we don't hold a reference.
2566 # Don't reference tr2 in hook() so we don't hold a reference.
2550 # This reduces memory consumption when there are multiple
2567 # This reduces memory consumption when there are multiple
2551 # transactions per lock. This can likely go away if issue5045
2568 # transactions per lock. This can likely go away if issue5045
2552 # fixes the function accumulation.
2569 # fixes the function accumulation.
2553 hookargs = tr2.hookargs
2570 hookargs = tr2.hookargs
2554
2571
2555 def hookfunc(unused_success):
2572 def hookfunc(unused_success):
2556 repo = reporef()
2573 repo = reporef()
2557 assert repo is not None # help pytype
2574 assert repo is not None # help pytype
2558
2575
2559 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2576 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2560 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2577 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2561 for name, (old, new) in bmchanges:
2578 for name, (old, new) in bmchanges:
2562 args = tr.hookargs.copy()
2579 args = tr.hookargs.copy()
2563 args.update(bookmarks.preparehookargs(name, old, new))
2580 args.update(bookmarks.preparehookargs(name, old, new))
2564 repo.hook(
2581 repo.hook(
2565 b'txnclose-bookmark',
2582 b'txnclose-bookmark',
2566 throw=False,
2583 throw=False,
2567 **pycompat.strkwargs(args)
2584 **pycompat.strkwargs(args)
2568 )
2585 )
2569
2586
2570 if hook.hashook(repo.ui, b'txnclose-phase'):
2587 if hook.hashook(repo.ui, b'txnclose-phase'):
2571 cl = repo.unfiltered().changelog
2588 cl = repo.unfiltered().changelog
2572 phasemv = sorted(
2589 phasemv = sorted(
2573 tr.changes[b'phases'], key=lambda r: r[0][0]
2590 tr.changes[b'phases'], key=lambda r: r[0][0]
2574 )
2591 )
2575 for revs, (old, new) in phasemv:
2592 for revs, (old, new) in phasemv:
2576 for rev in revs:
2593 for rev in revs:
2577 args = tr.hookargs.copy()
2594 args = tr.hookargs.copy()
2578 node = hex(cl.node(rev))
2595 node = hex(cl.node(rev))
2579 args.update(phases.preparehookargs(node, old, new))
2596 args.update(phases.preparehookargs(node, old, new))
2580 repo.hook(
2597 repo.hook(
2581 b'txnclose-phase',
2598 b'txnclose-phase',
2582 throw=False,
2599 throw=False,
2583 **pycompat.strkwargs(args)
2600 **pycompat.strkwargs(args)
2584 )
2601 )
2585
2602
2586 repo.hook(
2603 repo.hook(
2587 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2604 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2588 )
2605 )
2589
2606
2590 repo = reporef()
2607 repo = reporef()
2591 assert repo is not None # help pytype
2608 assert repo is not None # help pytype
2592 repo._afterlock(hookfunc)
2609 repo._afterlock(hookfunc)
2593
2610
2594 tr.addfinalize(b'txnclose-hook', txnclosehook)
2611 tr.addfinalize(b'txnclose-hook', txnclosehook)
2595 # Include a leading "-" to make it happen before the transaction summary
2612 # Include a leading "-" to make it happen before the transaction summary
2596 # reports registered via scmutil.registersummarycallback() whose names
2613 # reports registered via scmutil.registersummarycallback() whose names
2597 # are 00-txnreport etc. That way, the caches will be warm when the
2614 # are 00-txnreport etc. That way, the caches will be warm when the
2598 # callbacks run.
2615 # callbacks run.
2599 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2616 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2600
2617
2601 def txnaborthook(tr2):
2618 def txnaborthook(tr2):
2602 """To be run if transaction is aborted"""
2619 """To be run if transaction is aborted"""
2603 repo = reporef()
2620 repo = reporef()
2604 assert repo is not None # help pytype
2621 assert repo is not None # help pytype
2605 repo.hook(
2622 repo.hook(
2606 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2623 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2607 )
2624 )
2608
2625
2609 tr.addabort(b'txnabort-hook', txnaborthook)
2626 tr.addabort(b'txnabort-hook', txnaborthook)
2610 # avoid eager cache invalidation. in-memory data should be identical
2627 # avoid eager cache invalidation. in-memory data should be identical
2611 # to stored data if transaction has no error.
2628 # to stored data if transaction has no error.
2612 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2629 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2613 self._transref = weakref.ref(tr)
2630 self._transref = weakref.ref(tr)
2614 scmutil.registersummarycallback(self, tr, desc)
2631 scmutil.registersummarycallback(self, tr, desc)
2615 return tr
2632 return tr
2616
2633
2617 def _journalfiles(self):
2634 def _journalfiles(self):
2618 first = (
2635 first = (
2619 (self.svfs, b'journal'),
2636 (self.svfs, b'journal'),
2620 (self.svfs, b'journal.narrowspec'),
2637 (self.svfs, b'journal.narrowspec'),
2621 (self.vfs, b'journal.narrowspec.dirstate'),
2638 (self.vfs, b'journal.narrowspec.dirstate'),
2622 (self.vfs, b'journal.dirstate'),
2639 (self.vfs, b'journal.dirstate'),
2623 )
2640 )
2624 middle = []
2641 middle = []
2625 dirstate_data = self.dirstate.data_backup_filename(b'journal.dirstate')
2642 dirstate_data = self.dirstate.data_backup_filename(b'journal.dirstate')
2626 if dirstate_data is not None:
2643 if dirstate_data is not None:
2627 middle.append((self.vfs, dirstate_data))
2644 middle.append((self.vfs, dirstate_data))
2628 end = (
2645 end = (
2629 (self.vfs, b'journal.branch'),
2646 (self.vfs, b'journal.branch'),
2630 (self.vfs, b'journal.desc'),
2647 (self.vfs, b'journal.desc'),
2631 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2648 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2632 (self.svfs, b'journal.phaseroots'),
2649 (self.svfs, b'journal.phaseroots'),
2633 )
2650 )
2634 return first + tuple(middle) + end
2651 return first + tuple(middle) + end
2635
2652
2636 def undofiles(self):
2653 def undofiles(self):
2637 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2654 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2638
2655
2639 @unfilteredmethod
2656 @unfilteredmethod
2640 def _writejournal(self, desc):
2657 def _writejournal(self, desc):
2641 self.dirstate.savebackup(None, b'journal.dirstate')
2658 self.dirstate.savebackup(None, b'journal.dirstate')
2642 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2659 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2643 narrowspec.savebackup(self, b'journal.narrowspec')
2660 narrowspec.savebackup(self, b'journal.narrowspec')
2644 self.vfs.write(
2661 self.vfs.write(
2645 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2662 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2646 )
2663 )
2647 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2664 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2648 bookmarksvfs = bookmarks.bookmarksvfs(self)
2665 bookmarksvfs = bookmarks.bookmarksvfs(self)
2649 bookmarksvfs.write(
2666 bookmarksvfs.write(
2650 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2667 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2651 )
2668 )
2652 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2669 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2653
2670
2654 def recover(self):
2671 def recover(self):
2655 with self.lock():
2672 with self.lock():
2656 if self.svfs.exists(b"journal"):
2673 if self.svfs.exists(b"journal"):
2657 self.ui.status(_(b"rolling back interrupted transaction\n"))
2674 self.ui.status(_(b"rolling back interrupted transaction\n"))
2658 vfsmap = {
2675 vfsmap = {
2659 b'': self.svfs,
2676 b'': self.svfs,
2660 b'plain': self.vfs,
2677 b'plain': self.vfs,
2661 }
2678 }
2662 transaction.rollback(
2679 transaction.rollback(
2663 self.svfs,
2680 self.svfs,
2664 vfsmap,
2681 vfsmap,
2665 b"journal",
2682 b"journal",
2666 self.ui.warn,
2683 self.ui.warn,
2667 checkambigfiles=_cachedfiles,
2684 checkambigfiles=_cachedfiles,
2668 )
2685 )
2669 self.invalidate()
2686 self.invalidate()
2670 return True
2687 return True
2671 else:
2688 else:
2672 self.ui.warn(_(b"no interrupted transaction available\n"))
2689 self.ui.warn(_(b"no interrupted transaction available\n"))
2673 return False
2690 return False
2674
2691
2675 def rollback(self, dryrun=False, force=False):
2692 def rollback(self, dryrun=False, force=False):
2676 wlock = lock = dsguard = None
2693 wlock = lock = dsguard = None
2677 try:
2694 try:
2678 wlock = self.wlock()
2695 wlock = self.wlock()
2679 lock = self.lock()
2696 lock = self.lock()
2680 if self.svfs.exists(b"undo"):
2697 if self.svfs.exists(b"undo"):
2681 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2698 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2682
2699
2683 return self._rollback(dryrun, force, dsguard)
2700 return self._rollback(dryrun, force, dsguard)
2684 else:
2701 else:
2685 self.ui.warn(_(b"no rollback information available\n"))
2702 self.ui.warn(_(b"no rollback information available\n"))
2686 return 1
2703 return 1
2687 finally:
2704 finally:
2688 release(dsguard, lock, wlock)
2705 release(dsguard, lock, wlock)
2689
2706
2690 @unfilteredmethod # Until we get smarter cache management
2707 @unfilteredmethod # Until we get smarter cache management
2691 def _rollback(self, dryrun, force, dsguard):
2708 def _rollback(self, dryrun, force, dsguard):
2692 ui = self.ui
2709 ui = self.ui
2693 try:
2710 try:
2694 args = self.vfs.read(b'undo.desc').splitlines()
2711 args = self.vfs.read(b'undo.desc').splitlines()
2695 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2712 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2696 if len(args) >= 3:
2713 if len(args) >= 3:
2697 detail = args[2]
2714 detail = args[2]
2698 oldtip = oldlen - 1
2715 oldtip = oldlen - 1
2699
2716
2700 if detail and ui.verbose:
2717 if detail and ui.verbose:
2701 msg = _(
2718 msg = _(
2702 b'repository tip rolled back to revision %d'
2719 b'repository tip rolled back to revision %d'
2703 b' (undo %s: %s)\n'
2720 b' (undo %s: %s)\n'
2704 ) % (oldtip, desc, detail)
2721 ) % (oldtip, desc, detail)
2705 else:
2722 else:
2706 msg = _(
2723 msg = _(
2707 b'repository tip rolled back to revision %d (undo %s)\n'
2724 b'repository tip rolled back to revision %d (undo %s)\n'
2708 ) % (oldtip, desc)
2725 ) % (oldtip, desc)
2709 except IOError:
2726 except IOError:
2710 msg = _(b'rolling back unknown transaction\n')
2727 msg = _(b'rolling back unknown transaction\n')
2711 desc = None
2728 desc = None
2712
2729
2713 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2730 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2714 raise error.Abort(
2731 raise error.Abort(
2715 _(
2732 _(
2716 b'rollback of last commit while not checked out '
2733 b'rollback of last commit while not checked out '
2717 b'may lose data'
2734 b'may lose data'
2718 ),
2735 ),
2719 hint=_(b'use -f to force'),
2736 hint=_(b'use -f to force'),
2720 )
2737 )
2721
2738
2722 ui.status(msg)
2739 ui.status(msg)
2723 if dryrun:
2740 if dryrun:
2724 return 0
2741 return 0
2725
2742
2726 parents = self.dirstate.parents()
2743 parents = self.dirstate.parents()
2727 self.destroying()
2744 self.destroying()
2728 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2745 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2729 transaction.rollback(
2746 transaction.rollback(
2730 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2747 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2731 )
2748 )
2732 bookmarksvfs = bookmarks.bookmarksvfs(self)
2749 bookmarksvfs = bookmarks.bookmarksvfs(self)
2733 if bookmarksvfs.exists(b'undo.bookmarks'):
2750 if bookmarksvfs.exists(b'undo.bookmarks'):
2734 bookmarksvfs.rename(
2751 bookmarksvfs.rename(
2735 b'undo.bookmarks', b'bookmarks', checkambig=True
2752 b'undo.bookmarks', b'bookmarks', checkambig=True
2736 )
2753 )
2737 if self.svfs.exists(b'undo.phaseroots'):
2754 if self.svfs.exists(b'undo.phaseroots'):
2738 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2755 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2739 self.invalidate()
2756 self.invalidate()
2740
2757
2741 has_node = self.changelog.index.has_node
2758 has_node = self.changelog.index.has_node
2742 parentgone = any(not has_node(p) for p in parents)
2759 parentgone = any(not has_node(p) for p in parents)
2743 if parentgone:
2760 if parentgone:
2744 # prevent dirstateguard from overwriting already restored one
2761 # prevent dirstateguard from overwriting already restored one
2745 dsguard.close()
2762 dsguard.close()
2746
2763
2747 narrowspec.restorebackup(self, b'undo.narrowspec')
2764 narrowspec.restorebackup(self, b'undo.narrowspec')
2748 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2765 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2749 self.dirstate.restorebackup(None, b'undo.dirstate')
2766 self.dirstate.restorebackup(None, b'undo.dirstate')
2750 try:
2767 try:
2751 branch = self.vfs.read(b'undo.branch')
2768 branch = self.vfs.read(b'undo.branch')
2752 self.dirstate.setbranch(encoding.tolocal(branch))
2769 self.dirstate.setbranch(encoding.tolocal(branch))
2753 except IOError:
2770 except IOError:
2754 ui.warn(
2771 ui.warn(
2755 _(
2772 _(
2756 b'named branch could not be reset: '
2773 b'named branch could not be reset: '
2757 b'current branch is still \'%s\'\n'
2774 b'current branch is still \'%s\'\n'
2758 )
2775 )
2759 % self.dirstate.branch()
2776 % self.dirstate.branch()
2760 )
2777 )
2761
2778
2762 parents = tuple([p.rev() for p in self[None].parents()])
2779 parents = tuple([p.rev() for p in self[None].parents()])
2763 if len(parents) > 1:
2780 if len(parents) > 1:
2764 ui.status(
2781 ui.status(
2765 _(
2782 _(
2766 b'working directory now based on '
2783 b'working directory now based on '
2767 b'revisions %d and %d\n'
2784 b'revisions %d and %d\n'
2768 )
2785 )
2769 % parents
2786 % parents
2770 )
2787 )
2771 else:
2788 else:
2772 ui.status(
2789 ui.status(
2773 _(b'working directory now based on revision %d\n') % parents
2790 _(b'working directory now based on revision %d\n') % parents
2774 )
2791 )
2775 mergestatemod.mergestate.clean(self)
2792 mergestatemod.mergestate.clean(self)
2776
2793
2777 # TODO: if we know which new heads may result from this rollback, pass
2794 # TODO: if we know which new heads may result from this rollback, pass
2778 # them to destroy(), which will prevent the branchhead cache from being
2795 # them to destroy(), which will prevent the branchhead cache from being
2779 # invalidated.
2796 # invalidated.
2780 self.destroyed()
2797 self.destroyed()
2781 return 0
2798 return 0
2782
2799
2783 def _buildcacheupdater(self, newtransaction):
2800 def _buildcacheupdater(self, newtransaction):
2784 """called during transaction to build the callback updating cache
2801 """called during transaction to build the callback updating cache
2785
2802
2786 Lives on the repository to help extension who might want to augment
2803 Lives on the repository to help extension who might want to augment
2787 this logic. For this purpose, the created transaction is passed to the
2804 this logic. For this purpose, the created transaction is passed to the
2788 method.
2805 method.
2789 """
2806 """
2790 # we must avoid cyclic reference between repo and transaction.
2807 # we must avoid cyclic reference between repo and transaction.
2791 reporef = weakref.ref(self)
2808 reporef = weakref.ref(self)
2792
2809
2793 def updater(tr):
2810 def updater(tr):
2794 repo = reporef()
2811 repo = reporef()
2795 assert repo is not None # help pytype
2812 assert repo is not None # help pytype
2796 repo.updatecaches(tr)
2813 repo.updatecaches(tr)
2797
2814
2798 return updater
2815 return updater
2799
2816
2800 @unfilteredmethod
2817 @unfilteredmethod
2801 def updatecaches(self, tr=None, full=False, caches=None):
2818 def updatecaches(self, tr=None, full=False, caches=None):
2802 """warm appropriate caches
2819 """warm appropriate caches
2803
2820
2804 If this function is called after a transaction closed. The transaction
2821 If this function is called after a transaction closed. The transaction
2805 will be available in the 'tr' argument. This can be used to selectively
2822 will be available in the 'tr' argument. This can be used to selectively
2806 update caches relevant to the changes in that transaction.
2823 update caches relevant to the changes in that transaction.
2807
2824
2808 If 'full' is set, make sure all caches the function knows about have
2825 If 'full' is set, make sure all caches the function knows about have
2809 up-to-date data. Even the ones usually loaded more lazily.
2826 up-to-date data. Even the ones usually loaded more lazily.
2810
2827
2811 The `full` argument can take a special "post-clone" value. In this case
2828 The `full` argument can take a special "post-clone" value. In this case
2812 the cache warming is made after a clone and of the slower cache might
2829 the cache warming is made after a clone and of the slower cache might
2813 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2830 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2814 as we plan for a cleaner way to deal with this for 5.9.
2831 as we plan for a cleaner way to deal with this for 5.9.
2815 """
2832 """
2816 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2833 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2817 # During strip, many caches are invalid but
2834 # During strip, many caches are invalid but
2818 # later call to `destroyed` will refresh them.
2835 # later call to `destroyed` will refresh them.
2819 return
2836 return
2820
2837
2821 unfi = self.unfiltered()
2838 unfi = self.unfiltered()
2822
2839
2823 if full:
2840 if full:
2824 msg = (
2841 msg = (
2825 "`full` argument for `repo.updatecaches` is deprecated\n"
2842 "`full` argument for `repo.updatecaches` is deprecated\n"
2826 "(use `caches=repository.CACHE_ALL` instead)"
2843 "(use `caches=repository.CACHE_ALL` instead)"
2827 )
2844 )
2828 self.ui.deprecwarn(msg, b"5.9")
2845 self.ui.deprecwarn(msg, b"5.9")
2829 caches = repository.CACHES_ALL
2846 caches = repository.CACHES_ALL
2830 if full == b"post-clone":
2847 if full == b"post-clone":
2831 caches = repository.CACHES_POST_CLONE
2848 caches = repository.CACHES_POST_CLONE
2832 caches = repository.CACHES_ALL
2849 caches = repository.CACHES_ALL
2833 elif caches is None:
2850 elif caches is None:
2834 caches = repository.CACHES_DEFAULT
2851 caches = repository.CACHES_DEFAULT
2835
2852
2836 if repository.CACHE_BRANCHMAP_SERVED in caches:
2853 if repository.CACHE_BRANCHMAP_SERVED in caches:
2837 if tr is None or tr.changes[b'origrepolen'] < len(self):
2854 if tr is None or tr.changes[b'origrepolen'] < len(self):
2838 # accessing the 'served' branchmap should refresh all the others,
2855 # accessing the 'served' branchmap should refresh all the others,
2839 self.ui.debug(b'updating the branch cache\n')
2856 self.ui.debug(b'updating the branch cache\n')
2840 self.filtered(b'served').branchmap()
2857 self.filtered(b'served').branchmap()
2841 self.filtered(b'served.hidden').branchmap()
2858 self.filtered(b'served.hidden').branchmap()
2842 # flush all possibly delayed write.
2859 # flush all possibly delayed write.
2843 self._branchcaches.write_delayed(self)
2860 self._branchcaches.write_delayed(self)
2844
2861
2845 if repository.CACHE_CHANGELOG_CACHE in caches:
2862 if repository.CACHE_CHANGELOG_CACHE in caches:
2846 self.changelog.update_caches(transaction=tr)
2863 self.changelog.update_caches(transaction=tr)
2847
2864
2848 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2865 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2849 self.manifestlog.update_caches(transaction=tr)
2866 self.manifestlog.update_caches(transaction=tr)
2850
2867
2851 if repository.CACHE_REV_BRANCH in caches:
2868 if repository.CACHE_REV_BRANCH in caches:
2852 rbc = unfi.revbranchcache()
2869 rbc = unfi.revbranchcache()
2853 for r in unfi.changelog:
2870 for r in unfi.changelog:
2854 rbc.branchinfo(r)
2871 rbc.branchinfo(r)
2855 rbc.write()
2872 rbc.write()
2856
2873
2857 if repository.CACHE_FULL_MANIFEST in caches:
2874 if repository.CACHE_FULL_MANIFEST in caches:
2858 # ensure the working copy parents are in the manifestfulltextcache
2875 # ensure the working copy parents are in the manifestfulltextcache
2859 for ctx in self[b'.'].parents():
2876 for ctx in self[b'.'].parents():
2860 ctx.manifest() # accessing the manifest is enough
2877 ctx.manifest() # accessing the manifest is enough
2861
2878
2862 if repository.CACHE_FILE_NODE_TAGS in caches:
2879 if repository.CACHE_FILE_NODE_TAGS in caches:
2863 # accessing fnode cache warms the cache
2880 # accessing fnode cache warms the cache
2864 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2881 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2865
2882
2866 if repository.CACHE_TAGS_DEFAULT in caches:
2883 if repository.CACHE_TAGS_DEFAULT in caches:
2867 # accessing tags warm the cache
2884 # accessing tags warm the cache
2868 self.tags()
2885 self.tags()
2869 if repository.CACHE_TAGS_SERVED in caches:
2886 if repository.CACHE_TAGS_SERVED in caches:
2870 self.filtered(b'served').tags()
2887 self.filtered(b'served').tags()
2871
2888
2872 if repository.CACHE_BRANCHMAP_ALL in caches:
2889 if repository.CACHE_BRANCHMAP_ALL in caches:
2873 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2890 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2874 # so we're forcing a write to cause these caches to be warmed up
2891 # so we're forcing a write to cause these caches to be warmed up
2875 # even if they haven't explicitly been requested yet (if they've
2892 # even if they haven't explicitly been requested yet (if they've
2876 # never been used by hg, they won't ever have been written, even if
2893 # never been used by hg, they won't ever have been written, even if
2877 # they're a subset of another kind of cache that *has* been used).
2894 # they're a subset of another kind of cache that *has* been used).
2878 for filt in repoview.filtertable.keys():
2895 for filt in repoview.filtertable.keys():
2879 filtered = self.filtered(filt)
2896 filtered = self.filtered(filt)
2880 filtered.branchmap().write(filtered)
2897 filtered.branchmap().write(filtered)
2881
2898
2882 def invalidatecaches(self):
2899 def invalidatecaches(self):
2883
2900
2884 if '_tagscache' in vars(self):
2901 if '_tagscache' in vars(self):
2885 # can't use delattr on proxy
2902 # can't use delattr on proxy
2886 del self.__dict__['_tagscache']
2903 del self.__dict__['_tagscache']
2887
2904
2888 self._branchcaches.clear()
2905 self._branchcaches.clear()
2889 self.invalidatevolatilesets()
2906 self.invalidatevolatilesets()
2890 self._sparsesignaturecache.clear()
2907 self._sparsesignaturecache.clear()
2891
2908
2892 def invalidatevolatilesets(self):
2909 def invalidatevolatilesets(self):
2893 self.filteredrevcache.clear()
2910 self.filteredrevcache.clear()
2894 obsolete.clearobscaches(self)
2911 obsolete.clearobscaches(self)
2895 self._quick_access_changeid_invalidate()
2912 self._quick_access_changeid_invalidate()
2896
2913
2897 def invalidatedirstate(self):
2914 def invalidatedirstate(self):
2898 """Invalidates the dirstate, causing the next call to dirstate
2915 """Invalidates the dirstate, causing the next call to dirstate
2899 to check if it was modified since the last time it was read,
2916 to check if it was modified since the last time it was read,
2900 rereading it if it has.
2917 rereading it if it has.
2901
2918
2902 This is different to dirstate.invalidate() that it doesn't always
2919 This is different to dirstate.invalidate() that it doesn't always
2903 rereads the dirstate. Use dirstate.invalidate() if you want to
2920 rereads the dirstate. Use dirstate.invalidate() if you want to
2904 explicitly read the dirstate again (i.e. restoring it to a previous
2921 explicitly read the dirstate again (i.e. restoring it to a previous
2905 known good state)."""
2922 known good state)."""
2906 if hasunfilteredcache(self, 'dirstate'):
2923 if hasunfilteredcache(self, 'dirstate'):
2907 for k in self.dirstate._filecache:
2924 for k in self.dirstate._filecache:
2908 try:
2925 try:
2909 delattr(self.dirstate, k)
2926 delattr(self.dirstate, k)
2910 except AttributeError:
2927 except AttributeError:
2911 pass
2928 pass
2912 delattr(self.unfiltered(), 'dirstate')
2929 delattr(self.unfiltered(), 'dirstate')
2913
2930
2914 def invalidate(self, clearfilecache=False):
2931 def invalidate(self, clearfilecache=False):
2915 """Invalidates both store and non-store parts other than dirstate
2932 """Invalidates both store and non-store parts other than dirstate
2916
2933
2917 If a transaction is running, invalidation of store is omitted,
2934 If a transaction is running, invalidation of store is omitted,
2918 because discarding in-memory changes might cause inconsistency
2935 because discarding in-memory changes might cause inconsistency
2919 (e.g. incomplete fncache causes unintentional failure, but
2936 (e.g. incomplete fncache causes unintentional failure, but
2920 redundant one doesn't).
2937 redundant one doesn't).
2921 """
2938 """
2922 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2939 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2923 for k in list(self._filecache.keys()):
2940 for k in list(self._filecache.keys()):
2924 # dirstate is invalidated separately in invalidatedirstate()
2941 # dirstate is invalidated separately in invalidatedirstate()
2925 if k == b'dirstate':
2942 if k == b'dirstate':
2926 continue
2943 continue
2927 if (
2944 if (
2928 k == b'changelog'
2945 k == b'changelog'
2929 and self.currenttransaction()
2946 and self.currenttransaction()
2930 and self.changelog._delayed
2947 and self.changelog._delayed
2931 ):
2948 ):
2932 # The changelog object may store unwritten revisions. We don't
2949 # The changelog object may store unwritten revisions. We don't
2933 # want to lose them.
2950 # want to lose them.
2934 # TODO: Solve the problem instead of working around it.
2951 # TODO: Solve the problem instead of working around it.
2935 continue
2952 continue
2936
2953
2937 if clearfilecache:
2954 if clearfilecache:
2938 del self._filecache[k]
2955 del self._filecache[k]
2939 try:
2956 try:
2940 delattr(unfiltered, k)
2957 delattr(unfiltered, k)
2941 except AttributeError:
2958 except AttributeError:
2942 pass
2959 pass
2943 self.invalidatecaches()
2960 self.invalidatecaches()
2944 if not self.currenttransaction():
2961 if not self.currenttransaction():
2945 # TODO: Changing contents of store outside transaction
2962 # TODO: Changing contents of store outside transaction
2946 # causes inconsistency. We should make in-memory store
2963 # causes inconsistency. We should make in-memory store
2947 # changes detectable, and abort if changed.
2964 # changes detectable, and abort if changed.
2948 self.store.invalidatecaches()
2965 self.store.invalidatecaches()
2949
2966
2950 def invalidateall(self):
2967 def invalidateall(self):
2951 """Fully invalidates both store and non-store parts, causing the
2968 """Fully invalidates both store and non-store parts, causing the
2952 subsequent operation to reread any outside changes."""
2969 subsequent operation to reread any outside changes."""
2953 # extension should hook this to invalidate its caches
2970 # extension should hook this to invalidate its caches
2954 self.invalidate()
2971 self.invalidate()
2955 self.invalidatedirstate()
2972 self.invalidatedirstate()
2956
2973
2957 @unfilteredmethod
2974 @unfilteredmethod
2958 def _refreshfilecachestats(self, tr):
2975 def _refreshfilecachestats(self, tr):
2959 """Reload stats of cached files so that they are flagged as valid"""
2976 """Reload stats of cached files so that they are flagged as valid"""
2960 for k, ce in self._filecache.items():
2977 for k, ce in self._filecache.items():
2961 k = pycompat.sysstr(k)
2978 k = pycompat.sysstr(k)
2962 if k == 'dirstate' or k not in self.__dict__:
2979 if k == 'dirstate' or k not in self.__dict__:
2963 continue
2980 continue
2964 ce.refresh()
2981 ce.refresh()
2965
2982
2966 def _lock(
2983 def _lock(
2967 self,
2984 self,
2968 vfs,
2985 vfs,
2969 lockname,
2986 lockname,
2970 wait,
2987 wait,
2971 releasefn,
2988 releasefn,
2972 acquirefn,
2989 acquirefn,
2973 desc,
2990 desc,
2974 ):
2991 ):
2975 timeout = 0
2992 timeout = 0
2976 warntimeout = 0
2993 warntimeout = 0
2977 if wait:
2994 if wait:
2978 timeout = self.ui.configint(b"ui", b"timeout")
2995 timeout = self.ui.configint(b"ui", b"timeout")
2979 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2996 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2980 # internal config: ui.signal-safe-lock
2997 # internal config: ui.signal-safe-lock
2981 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2998 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2982
2999
2983 l = lockmod.trylock(
3000 l = lockmod.trylock(
2984 self.ui,
3001 self.ui,
2985 vfs,
3002 vfs,
2986 lockname,
3003 lockname,
2987 timeout,
3004 timeout,
2988 warntimeout,
3005 warntimeout,
2989 releasefn=releasefn,
3006 releasefn=releasefn,
2990 acquirefn=acquirefn,
3007 acquirefn=acquirefn,
2991 desc=desc,
3008 desc=desc,
2992 signalsafe=signalsafe,
3009 signalsafe=signalsafe,
2993 )
3010 )
2994 return l
3011 return l
2995
3012
2996 def _afterlock(self, callback):
3013 def _afterlock(self, callback):
2997 """add a callback to be run when the repository is fully unlocked
3014 """add a callback to be run when the repository is fully unlocked
2998
3015
2999 The callback will be executed when the outermost lock is released
3016 The callback will be executed when the outermost lock is released
3000 (with wlock being higher level than 'lock')."""
3017 (with wlock being higher level than 'lock')."""
3001 for ref in (self._wlockref, self._lockref):
3018 for ref in (self._wlockref, self._lockref):
3002 l = ref and ref()
3019 l = ref and ref()
3003 if l and l.held:
3020 if l and l.held:
3004 l.postrelease.append(callback)
3021 l.postrelease.append(callback)
3005 break
3022 break
3006 else: # no lock have been found.
3023 else: # no lock have been found.
3007 callback(True)
3024 callback(True)
3008
3025
3009 def lock(self, wait=True):
3026 def lock(self, wait=True):
3010 """Lock the repository store (.hg/store) and return a weak reference
3027 """Lock the repository store (.hg/store) and return a weak reference
3011 to the lock. Use this before modifying the store (e.g. committing or
3028 to the lock. Use this before modifying the store (e.g. committing or
3012 stripping). If you are opening a transaction, get a lock as well.)
3029 stripping). If you are opening a transaction, get a lock as well.)
3013
3030
3014 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3031 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3015 'wlock' first to avoid a dead-lock hazard."""
3032 'wlock' first to avoid a dead-lock hazard."""
3016 l = self._currentlock(self._lockref)
3033 l = self._currentlock(self._lockref)
3017 if l is not None:
3034 if l is not None:
3018 l.lock()
3035 l.lock()
3019 return l
3036 return l
3020
3037
3021 l = self._lock(
3038 l = self._lock(
3022 vfs=self.svfs,
3039 vfs=self.svfs,
3023 lockname=b"lock",
3040 lockname=b"lock",
3024 wait=wait,
3041 wait=wait,
3025 releasefn=None,
3042 releasefn=None,
3026 acquirefn=self.invalidate,
3043 acquirefn=self.invalidate,
3027 desc=_(b'repository %s') % self.origroot,
3044 desc=_(b'repository %s') % self.origroot,
3028 )
3045 )
3029 self._lockref = weakref.ref(l)
3046 self._lockref = weakref.ref(l)
3030 return l
3047 return l
3031
3048
3032 def wlock(self, wait=True):
3049 def wlock(self, wait=True):
3033 """Lock the non-store parts of the repository (everything under
3050 """Lock the non-store parts of the repository (everything under
3034 .hg except .hg/store) and return a weak reference to the lock.
3051 .hg except .hg/store) and return a weak reference to the lock.
3035
3052
3036 Use this before modifying files in .hg.
3053 Use this before modifying files in .hg.
3037
3054
3038 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3055 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3039 'wlock' first to avoid a dead-lock hazard."""
3056 'wlock' first to avoid a dead-lock hazard."""
3040 l = self._wlockref() if self._wlockref else None
3057 l = self._wlockref() if self._wlockref else None
3041 if l is not None and l.held:
3058 if l is not None and l.held:
3042 l.lock()
3059 l.lock()
3043 return l
3060 return l
3044
3061
3045 # We do not need to check for non-waiting lock acquisition. Such
3062 # We do not need to check for non-waiting lock acquisition. Such
3046 # acquisition would not cause dead-lock as they would just fail.
3063 # acquisition would not cause dead-lock as they would just fail.
3047 if wait and (
3064 if wait and (
3048 self.ui.configbool(b'devel', b'all-warnings')
3065 self.ui.configbool(b'devel', b'all-warnings')
3049 or self.ui.configbool(b'devel', b'check-locks')
3066 or self.ui.configbool(b'devel', b'check-locks')
3050 ):
3067 ):
3051 if self._currentlock(self._lockref) is not None:
3068 if self._currentlock(self._lockref) is not None:
3052 self.ui.develwarn(b'"wlock" acquired after "lock"')
3069 self.ui.develwarn(b'"wlock" acquired after "lock"')
3053
3070
3054 def unlock():
3071 def unlock():
3055 if self.dirstate.pendingparentchange():
3072 if self.dirstate.pendingparentchange():
3056 self.dirstate.invalidate()
3073 self.dirstate.invalidate()
3057 else:
3074 else:
3058 self.dirstate.write(None)
3075 self.dirstate.write(None)
3059
3076
3060 self._filecache[b'dirstate'].refresh()
3077 self._filecache[b'dirstate'].refresh()
3061
3078
3062 l = self._lock(
3079 l = self._lock(
3063 self.vfs,
3080 self.vfs,
3064 b"wlock",
3081 b"wlock",
3065 wait,
3082 wait,
3066 unlock,
3083 unlock,
3067 self.invalidatedirstate,
3084 self.invalidatedirstate,
3068 _(b'working directory of %s') % self.origroot,
3085 _(b'working directory of %s') % self.origroot,
3069 )
3086 )
3070 self._wlockref = weakref.ref(l)
3087 self._wlockref = weakref.ref(l)
3071 return l
3088 return l
3072
3089
3073 def _currentlock(self, lockref):
3090 def _currentlock(self, lockref):
3074 """Returns the lock if it's held, or None if it's not."""
3091 """Returns the lock if it's held, or None if it's not."""
3075 if lockref is None:
3092 if lockref is None:
3076 return None
3093 return None
3077 l = lockref()
3094 l = lockref()
3078 if l is None or not l.held:
3095 if l is None or not l.held:
3079 return None
3096 return None
3080 return l
3097 return l
3081
3098
3082 def currentwlock(self):
3099 def currentwlock(self):
3083 """Returns the wlock if it's held, or None if it's not."""
3100 """Returns the wlock if it's held, or None if it's not."""
3084 return self._currentlock(self._wlockref)
3101 return self._currentlock(self._wlockref)
3085
3102
3086 def checkcommitpatterns(self, wctx, match, status, fail):
3103 def checkcommitpatterns(self, wctx, match, status, fail):
3087 """check for commit arguments that aren't committable"""
3104 """check for commit arguments that aren't committable"""
3088 if match.isexact() or match.prefix():
3105 if match.isexact() or match.prefix():
3089 matched = set(status.modified + status.added + status.removed)
3106 matched = set(status.modified + status.added + status.removed)
3090
3107
3091 for f in match.files():
3108 for f in match.files():
3092 f = self.dirstate.normalize(f)
3109 f = self.dirstate.normalize(f)
3093 if f == b'.' or f in matched or f in wctx.substate:
3110 if f == b'.' or f in matched or f in wctx.substate:
3094 continue
3111 continue
3095 if f in status.deleted:
3112 if f in status.deleted:
3096 fail(f, _(b'file not found!'))
3113 fail(f, _(b'file not found!'))
3097 # Is it a directory that exists or used to exist?
3114 # Is it a directory that exists or used to exist?
3098 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3115 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3099 d = f + b'/'
3116 d = f + b'/'
3100 for mf in matched:
3117 for mf in matched:
3101 if mf.startswith(d):
3118 if mf.startswith(d):
3102 break
3119 break
3103 else:
3120 else:
3104 fail(f, _(b"no match under directory!"))
3121 fail(f, _(b"no match under directory!"))
3105 elif f not in self.dirstate:
3122 elif f not in self.dirstate:
3106 fail(f, _(b"file not tracked!"))
3123 fail(f, _(b"file not tracked!"))
3107
3124
3108 @unfilteredmethod
3125 @unfilteredmethod
3109 def commit(
3126 def commit(
3110 self,
3127 self,
3111 text=b"",
3128 text=b"",
3112 user=None,
3129 user=None,
3113 date=None,
3130 date=None,
3114 match=None,
3131 match=None,
3115 force=False,
3132 force=False,
3116 editor=None,
3133 editor=None,
3117 extra=None,
3134 extra=None,
3118 ):
3135 ):
3119 """Add a new revision to current repository.
3136 """Add a new revision to current repository.
3120
3137
3121 Revision information is gathered from the working directory,
3138 Revision information is gathered from the working directory,
3122 match can be used to filter the committed files. If editor is
3139 match can be used to filter the committed files. If editor is
3123 supplied, it is called to get a commit message.
3140 supplied, it is called to get a commit message.
3124 """
3141 """
3125 if extra is None:
3142 if extra is None:
3126 extra = {}
3143 extra = {}
3127
3144
3128 def fail(f, msg):
3145 def fail(f, msg):
3129 raise error.InputError(b'%s: %s' % (f, msg))
3146 raise error.InputError(b'%s: %s' % (f, msg))
3130
3147
3131 if not match:
3148 if not match:
3132 match = matchmod.always()
3149 match = matchmod.always()
3133
3150
3134 if not force:
3151 if not force:
3135 match.bad = fail
3152 match.bad = fail
3136
3153
3137 # lock() for recent changelog (see issue4368)
3154 # lock() for recent changelog (see issue4368)
3138 with self.wlock(), self.lock():
3155 with self.wlock(), self.lock():
3139 wctx = self[None]
3156 wctx = self[None]
3140 merge = len(wctx.parents()) > 1
3157 merge = len(wctx.parents()) > 1
3141
3158
3142 if not force and merge and not match.always():
3159 if not force and merge and not match.always():
3143 raise error.Abort(
3160 raise error.Abort(
3144 _(
3161 _(
3145 b'cannot partially commit a merge '
3162 b'cannot partially commit a merge '
3146 b'(do not specify files or patterns)'
3163 b'(do not specify files or patterns)'
3147 )
3164 )
3148 )
3165 )
3149
3166
3150 status = self.status(match=match, clean=force)
3167 status = self.status(match=match, clean=force)
3151 if force:
3168 if force:
3152 status.modified.extend(
3169 status.modified.extend(
3153 status.clean
3170 status.clean
3154 ) # mq may commit clean files
3171 ) # mq may commit clean files
3155
3172
3156 # check subrepos
3173 # check subrepos
3157 subs, commitsubs, newstate = subrepoutil.precommit(
3174 subs, commitsubs, newstate = subrepoutil.precommit(
3158 self.ui, wctx, status, match, force=force
3175 self.ui, wctx, status, match, force=force
3159 )
3176 )
3160
3177
3161 # make sure all explicit patterns are matched
3178 # make sure all explicit patterns are matched
3162 if not force:
3179 if not force:
3163 self.checkcommitpatterns(wctx, match, status, fail)
3180 self.checkcommitpatterns(wctx, match, status, fail)
3164
3181
3165 cctx = context.workingcommitctx(
3182 cctx = context.workingcommitctx(
3166 self, status, text, user, date, extra
3183 self, status, text, user, date, extra
3167 )
3184 )
3168
3185
3169 ms = mergestatemod.mergestate.read(self)
3186 ms = mergestatemod.mergestate.read(self)
3170 mergeutil.checkunresolved(ms)
3187 mergeutil.checkunresolved(ms)
3171
3188
3172 # internal config: ui.allowemptycommit
3189 # internal config: ui.allowemptycommit
3173 if cctx.isempty() and not self.ui.configbool(
3190 if cctx.isempty() and not self.ui.configbool(
3174 b'ui', b'allowemptycommit'
3191 b'ui', b'allowemptycommit'
3175 ):
3192 ):
3176 self.ui.debug(b'nothing to commit, clearing merge state\n')
3193 self.ui.debug(b'nothing to commit, clearing merge state\n')
3177 ms.reset()
3194 ms.reset()
3178 return None
3195 return None
3179
3196
3180 if merge and cctx.deleted():
3197 if merge and cctx.deleted():
3181 raise error.Abort(_(b"cannot commit merge with missing files"))
3198 raise error.Abort(_(b"cannot commit merge with missing files"))
3182
3199
3183 if editor:
3200 if editor:
3184 cctx._text = editor(self, cctx, subs)
3201 cctx._text = editor(self, cctx, subs)
3185 edited = text != cctx._text
3202 edited = text != cctx._text
3186
3203
3187 # Save commit message in case this transaction gets rolled back
3204 # Save commit message in case this transaction gets rolled back
3188 # (e.g. by a pretxncommit hook). Leave the content alone on
3205 # (e.g. by a pretxncommit hook). Leave the content alone on
3189 # the assumption that the user will use the same editor again.
3206 # the assumption that the user will use the same editor again.
3190 msg_path = self.savecommitmessage(cctx._text)
3207 msg_path = self.savecommitmessage(cctx._text)
3191
3208
3192 # commit subs and write new state
3209 # commit subs and write new state
3193 if subs:
3210 if subs:
3194 uipathfn = scmutil.getuipathfn(self)
3211 uipathfn = scmutil.getuipathfn(self)
3195 for s in sorted(commitsubs):
3212 for s in sorted(commitsubs):
3196 sub = wctx.sub(s)
3213 sub = wctx.sub(s)
3197 self.ui.status(
3214 self.ui.status(
3198 _(b'committing subrepository %s\n')
3215 _(b'committing subrepository %s\n')
3199 % uipathfn(subrepoutil.subrelpath(sub))
3216 % uipathfn(subrepoutil.subrelpath(sub))
3200 )
3217 )
3201 sr = sub.commit(cctx._text, user, date)
3218 sr = sub.commit(cctx._text, user, date)
3202 newstate[s] = (newstate[s][0], sr)
3219 newstate[s] = (newstate[s][0], sr)
3203 subrepoutil.writestate(self, newstate)
3220 subrepoutil.writestate(self, newstate)
3204
3221
3205 p1, p2 = self.dirstate.parents()
3222 p1, p2 = self.dirstate.parents()
3206 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3223 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3207 try:
3224 try:
3208 self.hook(
3225 self.hook(
3209 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3226 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3210 )
3227 )
3211 with self.transaction(b'commit'):
3228 with self.transaction(b'commit'):
3212 ret = self.commitctx(cctx, True)
3229 ret = self.commitctx(cctx, True)
3213 # update bookmarks, dirstate and mergestate
3230 # update bookmarks, dirstate and mergestate
3214 bookmarks.update(self, [p1, p2], ret)
3231 bookmarks.update(self, [p1, p2], ret)
3215 cctx.markcommitted(ret)
3232 cctx.markcommitted(ret)
3216 ms.reset()
3233 ms.reset()
3217 except: # re-raises
3234 except: # re-raises
3218 if edited:
3235 if edited:
3219 self.ui.write(
3236 self.ui.write(
3220 _(b'note: commit message saved in %s\n') % msg_path
3237 _(b'note: commit message saved in %s\n') % msg_path
3221 )
3238 )
3222 self.ui.write(
3239 self.ui.write(
3223 _(
3240 _(
3224 b"note: use 'hg commit --logfile "
3241 b"note: use 'hg commit --logfile "
3225 b"%s --edit' to reuse it\n"
3242 b"%s --edit' to reuse it\n"
3226 )
3243 )
3227 % msg_path
3244 % msg_path
3228 )
3245 )
3229 raise
3246 raise
3230
3247
3231 def commithook(unused_success):
3248 def commithook(unused_success):
3232 # hack for command that use a temporary commit (eg: histedit)
3249 # hack for command that use a temporary commit (eg: histedit)
3233 # temporary commit got stripped before hook release
3250 # temporary commit got stripped before hook release
3234 if self.changelog.hasnode(ret):
3251 if self.changelog.hasnode(ret):
3235 self.hook(
3252 self.hook(
3236 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3253 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3237 )
3254 )
3238
3255
3239 self._afterlock(commithook)
3256 self._afterlock(commithook)
3240 return ret
3257 return ret
3241
3258
3242 @unfilteredmethod
3259 @unfilteredmethod
3243 def commitctx(self, ctx, error=False, origctx=None):
3260 def commitctx(self, ctx, error=False, origctx=None):
3244 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3261 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3245
3262
3246 @unfilteredmethod
3263 @unfilteredmethod
3247 def destroying(self):
3264 def destroying(self):
3248 """Inform the repository that nodes are about to be destroyed.
3265 """Inform the repository that nodes are about to be destroyed.
3249 Intended for use by strip and rollback, so there's a common
3266 Intended for use by strip and rollback, so there's a common
3250 place for anything that has to be done before destroying history.
3267 place for anything that has to be done before destroying history.
3251
3268
3252 This is mostly useful for saving state that is in memory and waiting
3269 This is mostly useful for saving state that is in memory and waiting
3253 to be flushed when the current lock is released. Because a call to
3270 to be flushed when the current lock is released. Because a call to
3254 destroyed is imminent, the repo will be invalidated causing those
3271 destroyed is imminent, the repo will be invalidated causing those
3255 changes to stay in memory (waiting for the next unlock), or vanish
3272 changes to stay in memory (waiting for the next unlock), or vanish
3256 completely.
3273 completely.
3257 """
3274 """
3258 # When using the same lock to commit and strip, the phasecache is left
3275 # When using the same lock to commit and strip, the phasecache is left
3259 # dirty after committing. Then when we strip, the repo is invalidated,
3276 # dirty after committing. Then when we strip, the repo is invalidated,
3260 # causing those changes to disappear.
3277 # causing those changes to disappear.
3261 if '_phasecache' in vars(self):
3278 if '_phasecache' in vars(self):
3262 self._phasecache.write()
3279 self._phasecache.write()
3263
3280
3264 @unfilteredmethod
3281 @unfilteredmethod
3265 def destroyed(self):
3282 def destroyed(self):
3266 """Inform the repository that nodes have been destroyed.
3283 """Inform the repository that nodes have been destroyed.
3267 Intended for use by strip and rollback, so there's a common
3284 Intended for use by strip and rollback, so there's a common
3268 place for anything that has to be done after destroying history.
3285 place for anything that has to be done after destroying history.
3269 """
3286 """
3270 # When one tries to:
3287 # When one tries to:
3271 # 1) destroy nodes thus calling this method (e.g. strip)
3288 # 1) destroy nodes thus calling this method (e.g. strip)
3272 # 2) use phasecache somewhere (e.g. commit)
3289 # 2) use phasecache somewhere (e.g. commit)
3273 #
3290 #
3274 # then 2) will fail because the phasecache contains nodes that were
3291 # then 2) will fail because the phasecache contains nodes that were
3275 # removed. We can either remove phasecache from the filecache,
3292 # removed. We can either remove phasecache from the filecache,
3276 # causing it to reload next time it is accessed, or simply filter
3293 # causing it to reload next time it is accessed, or simply filter
3277 # the removed nodes now and write the updated cache.
3294 # the removed nodes now and write the updated cache.
3278 self._phasecache.filterunknown(self)
3295 self._phasecache.filterunknown(self)
3279 self._phasecache.write()
3296 self._phasecache.write()
3280
3297
3281 # refresh all repository caches
3298 # refresh all repository caches
3282 self.updatecaches()
3299 self.updatecaches()
3283
3300
3284 # Ensure the persistent tag cache is updated. Doing it now
3301 # Ensure the persistent tag cache is updated. Doing it now
3285 # means that the tag cache only has to worry about destroyed
3302 # means that the tag cache only has to worry about destroyed
3286 # heads immediately after a strip/rollback. That in turn
3303 # heads immediately after a strip/rollback. That in turn
3287 # guarantees that "cachetip == currenttip" (comparing both rev
3304 # guarantees that "cachetip == currenttip" (comparing both rev
3288 # and node) always means no nodes have been added or destroyed.
3305 # and node) always means no nodes have been added or destroyed.
3289
3306
3290 # XXX this is suboptimal when qrefresh'ing: we strip the current
3307 # XXX this is suboptimal when qrefresh'ing: we strip the current
3291 # head, refresh the tag cache, then immediately add a new head.
3308 # head, refresh the tag cache, then immediately add a new head.
3292 # But I think doing it this way is necessary for the "instant
3309 # But I think doing it this way is necessary for the "instant
3293 # tag cache retrieval" case to work.
3310 # tag cache retrieval" case to work.
3294 self.invalidate()
3311 self.invalidate()
3295
3312
3296 def status(
3313 def status(
3297 self,
3314 self,
3298 node1=b'.',
3315 node1=b'.',
3299 node2=None,
3316 node2=None,
3300 match=None,
3317 match=None,
3301 ignored=False,
3318 ignored=False,
3302 clean=False,
3319 clean=False,
3303 unknown=False,
3320 unknown=False,
3304 listsubrepos=False,
3321 listsubrepos=False,
3305 ):
3322 ):
3306 '''a convenience method that calls node1.status(node2)'''
3323 '''a convenience method that calls node1.status(node2)'''
3307 return self[node1].status(
3324 return self[node1].status(
3308 node2, match, ignored, clean, unknown, listsubrepos
3325 node2, match, ignored, clean, unknown, listsubrepos
3309 )
3326 )
3310
3327
3311 def addpostdsstatus(self, ps):
3328 def addpostdsstatus(self, ps):
3312 """Add a callback to run within the wlock, at the point at which status
3329 """Add a callback to run within the wlock, at the point at which status
3313 fixups happen.
3330 fixups happen.
3314
3331
3315 On status completion, callback(wctx, status) will be called with the
3332 On status completion, callback(wctx, status) will be called with the
3316 wlock held, unless the dirstate has changed from underneath or the wlock
3333 wlock held, unless the dirstate has changed from underneath or the wlock
3317 couldn't be grabbed.
3334 couldn't be grabbed.
3318
3335
3319 Callbacks should not capture and use a cached copy of the dirstate --
3336 Callbacks should not capture and use a cached copy of the dirstate --
3320 it might change in the meanwhile. Instead, they should access the
3337 it might change in the meanwhile. Instead, they should access the
3321 dirstate via wctx.repo().dirstate.
3338 dirstate via wctx.repo().dirstate.
3322
3339
3323 This list is emptied out after each status run -- extensions should
3340 This list is emptied out after each status run -- extensions should
3324 make sure it adds to this list each time dirstate.status is called.
3341 make sure it adds to this list each time dirstate.status is called.
3325 Extensions should also make sure they don't call this for statuses
3342 Extensions should also make sure they don't call this for statuses
3326 that don't involve the dirstate.
3343 that don't involve the dirstate.
3327 """
3344 """
3328
3345
3329 # The list is located here for uniqueness reasons -- it is actually
3346 # The list is located here for uniqueness reasons -- it is actually
3330 # managed by the workingctx, but that isn't unique per-repo.
3347 # managed by the workingctx, but that isn't unique per-repo.
3331 self._postdsstatus.append(ps)
3348 self._postdsstatus.append(ps)
3332
3349
3333 def postdsstatus(self):
3350 def postdsstatus(self):
3334 """Used by workingctx to get the list of post-dirstate-status hooks."""
3351 """Used by workingctx to get the list of post-dirstate-status hooks."""
3335 return self._postdsstatus
3352 return self._postdsstatus
3336
3353
3337 def clearpostdsstatus(self):
3354 def clearpostdsstatus(self):
3338 """Used by workingctx to clear post-dirstate-status hooks."""
3355 """Used by workingctx to clear post-dirstate-status hooks."""
3339 del self._postdsstatus[:]
3356 del self._postdsstatus[:]
3340
3357
3341 def heads(self, start=None):
3358 def heads(self, start=None):
3342 if start is None:
3359 if start is None:
3343 cl = self.changelog
3360 cl = self.changelog
3344 headrevs = reversed(cl.headrevs())
3361 headrevs = reversed(cl.headrevs())
3345 return [cl.node(rev) for rev in headrevs]
3362 return [cl.node(rev) for rev in headrevs]
3346
3363
3347 heads = self.changelog.heads(start)
3364 heads = self.changelog.heads(start)
3348 # sort the output in rev descending order
3365 # sort the output in rev descending order
3349 return sorted(heads, key=self.changelog.rev, reverse=True)
3366 return sorted(heads, key=self.changelog.rev, reverse=True)
3350
3367
3351 def branchheads(self, branch=None, start=None, closed=False):
3368 def branchheads(self, branch=None, start=None, closed=False):
3352 """return a (possibly filtered) list of heads for the given branch
3369 """return a (possibly filtered) list of heads for the given branch
3353
3370
3354 Heads are returned in topological order, from newest to oldest.
3371 Heads are returned in topological order, from newest to oldest.
3355 If branch is None, use the dirstate branch.
3372 If branch is None, use the dirstate branch.
3356 If start is not None, return only heads reachable from start.
3373 If start is not None, return only heads reachable from start.
3357 If closed is True, return heads that are marked as closed as well.
3374 If closed is True, return heads that are marked as closed as well.
3358 """
3375 """
3359 if branch is None:
3376 if branch is None:
3360 branch = self[None].branch()
3377 branch = self[None].branch()
3361 branches = self.branchmap()
3378 branches = self.branchmap()
3362 if not branches.hasbranch(branch):
3379 if not branches.hasbranch(branch):
3363 return []
3380 return []
3364 # the cache returns heads ordered lowest to highest
3381 # the cache returns heads ordered lowest to highest
3365 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3382 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3366 if start is not None:
3383 if start is not None:
3367 # filter out the heads that cannot be reached from startrev
3384 # filter out the heads that cannot be reached from startrev
3368 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3385 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3369 bheads = [h for h in bheads if h in fbheads]
3386 bheads = [h for h in bheads if h in fbheads]
3370 return bheads
3387 return bheads
3371
3388
3372 def branches(self, nodes):
3389 def branches(self, nodes):
3373 if not nodes:
3390 if not nodes:
3374 nodes = [self.changelog.tip()]
3391 nodes = [self.changelog.tip()]
3375 b = []
3392 b = []
3376 for n in nodes:
3393 for n in nodes:
3377 t = n
3394 t = n
3378 while True:
3395 while True:
3379 p = self.changelog.parents(n)
3396 p = self.changelog.parents(n)
3380 if p[1] != self.nullid or p[0] == self.nullid:
3397 if p[1] != self.nullid or p[0] == self.nullid:
3381 b.append((t, n, p[0], p[1]))
3398 b.append((t, n, p[0], p[1]))
3382 break
3399 break
3383 n = p[0]
3400 n = p[0]
3384 return b
3401 return b
3385
3402
3386 def between(self, pairs):
3403 def between(self, pairs):
3387 r = []
3404 r = []
3388
3405
3389 for top, bottom in pairs:
3406 for top, bottom in pairs:
3390 n, l, i = top, [], 0
3407 n, l, i = top, [], 0
3391 f = 1
3408 f = 1
3392
3409
3393 while n != bottom and n != self.nullid:
3410 while n != bottom and n != self.nullid:
3394 p = self.changelog.parents(n)[0]
3411 p = self.changelog.parents(n)[0]
3395 if i == f:
3412 if i == f:
3396 l.append(n)
3413 l.append(n)
3397 f = f * 2
3414 f = f * 2
3398 n = p
3415 n = p
3399 i += 1
3416 i += 1
3400
3417
3401 r.append(l)
3418 r.append(l)
3402
3419
3403 return r
3420 return r
3404
3421
3405 def checkpush(self, pushop):
3422 def checkpush(self, pushop):
3406 """Extensions can override this function if additional checks have
3423 """Extensions can override this function if additional checks have
3407 to be performed before pushing, or call it if they override push
3424 to be performed before pushing, or call it if they override push
3408 command.
3425 command.
3409 """
3426 """
3410
3427
3411 @unfilteredpropertycache
3428 @unfilteredpropertycache
3412 def prepushoutgoinghooks(self):
3429 def prepushoutgoinghooks(self):
3413 """Return util.hooks consists of a pushop with repo, remote, outgoing
3430 """Return util.hooks consists of a pushop with repo, remote, outgoing
3414 methods, which are called before pushing changesets.
3431 methods, which are called before pushing changesets.
3415 """
3432 """
3416 return util.hooks()
3433 return util.hooks()
3417
3434
3418 def pushkey(self, namespace, key, old, new):
3435 def pushkey(self, namespace, key, old, new):
3419 try:
3436 try:
3420 tr = self.currenttransaction()
3437 tr = self.currenttransaction()
3421 hookargs = {}
3438 hookargs = {}
3422 if tr is not None:
3439 if tr is not None:
3423 hookargs.update(tr.hookargs)
3440 hookargs.update(tr.hookargs)
3424 hookargs = pycompat.strkwargs(hookargs)
3441 hookargs = pycompat.strkwargs(hookargs)
3425 hookargs['namespace'] = namespace
3442 hookargs['namespace'] = namespace
3426 hookargs['key'] = key
3443 hookargs['key'] = key
3427 hookargs['old'] = old
3444 hookargs['old'] = old
3428 hookargs['new'] = new
3445 hookargs['new'] = new
3429 self.hook(b'prepushkey', throw=True, **hookargs)
3446 self.hook(b'prepushkey', throw=True, **hookargs)
3430 except error.HookAbort as exc:
3447 except error.HookAbort as exc:
3431 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3448 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3432 if exc.hint:
3449 if exc.hint:
3433 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3450 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3434 return False
3451 return False
3435 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3452 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3436 ret = pushkey.push(self, namespace, key, old, new)
3453 ret = pushkey.push(self, namespace, key, old, new)
3437
3454
3438 def runhook(unused_success):
3455 def runhook(unused_success):
3439 self.hook(
3456 self.hook(
3440 b'pushkey',
3457 b'pushkey',
3441 namespace=namespace,
3458 namespace=namespace,
3442 key=key,
3459 key=key,
3443 old=old,
3460 old=old,
3444 new=new,
3461 new=new,
3445 ret=ret,
3462 ret=ret,
3446 )
3463 )
3447
3464
3448 self._afterlock(runhook)
3465 self._afterlock(runhook)
3449 return ret
3466 return ret
3450
3467
3451 def listkeys(self, namespace):
3468 def listkeys(self, namespace):
3452 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3469 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3453 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3470 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3454 values = pushkey.list(self, namespace)
3471 values = pushkey.list(self, namespace)
3455 self.hook(b'listkeys', namespace=namespace, values=values)
3472 self.hook(b'listkeys', namespace=namespace, values=values)
3456 return values
3473 return values
3457
3474
3458 def debugwireargs(self, one, two, three=None, four=None, five=None):
3475 def debugwireargs(self, one, two, three=None, four=None, five=None):
3459 '''used to test argument passing over the wire'''
3476 '''used to test argument passing over the wire'''
3460 return b"%s %s %s %s %s" % (
3477 return b"%s %s %s %s %s" % (
3461 one,
3478 one,
3462 two,
3479 two,
3463 pycompat.bytestr(three),
3480 pycompat.bytestr(three),
3464 pycompat.bytestr(four),
3481 pycompat.bytestr(four),
3465 pycompat.bytestr(five),
3482 pycompat.bytestr(five),
3466 )
3483 )
3467
3484
3468 def savecommitmessage(self, text):
3485 def savecommitmessage(self, text):
3469 fp = self.vfs(b'last-message.txt', b'wb')
3486 fp = self.vfs(b'last-message.txt', b'wb')
3470 try:
3487 try:
3471 fp.write(text)
3488 fp.write(text)
3472 finally:
3489 finally:
3473 fp.close()
3490 fp.close()
3474 return self.pathto(fp.name[len(self.root) + 1 :])
3491 return self.pathto(fp.name[len(self.root) + 1 :])
3475
3492
3476 def register_wanted_sidedata(self, category):
3493 def register_wanted_sidedata(self, category):
3477 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3494 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3478 # Only revlogv2 repos can want sidedata.
3495 # Only revlogv2 repos can want sidedata.
3479 return
3496 return
3480 self._wanted_sidedata.add(pycompat.bytestr(category))
3497 self._wanted_sidedata.add(pycompat.bytestr(category))
3481
3498
3482 def register_sidedata_computer(
3499 def register_sidedata_computer(
3483 self, kind, category, keys, computer, flags, replace=False
3500 self, kind, category, keys, computer, flags, replace=False
3484 ):
3501 ):
3485 if kind not in revlogconst.ALL_KINDS:
3502 if kind not in revlogconst.ALL_KINDS:
3486 msg = _(b"unexpected revlog kind '%s'.")
3503 msg = _(b"unexpected revlog kind '%s'.")
3487 raise error.ProgrammingError(msg % kind)
3504 raise error.ProgrammingError(msg % kind)
3488 category = pycompat.bytestr(category)
3505 category = pycompat.bytestr(category)
3489 already_registered = category in self._sidedata_computers.get(kind, [])
3506 already_registered = category in self._sidedata_computers.get(kind, [])
3490 if already_registered and not replace:
3507 if already_registered and not replace:
3491 msg = _(
3508 msg = _(
3492 b"cannot register a sidedata computer twice for category '%s'."
3509 b"cannot register a sidedata computer twice for category '%s'."
3493 )
3510 )
3494 raise error.ProgrammingError(msg % category)
3511 raise error.ProgrammingError(msg % category)
3495 if replace and not already_registered:
3512 if replace and not already_registered:
3496 msg = _(
3513 msg = _(
3497 b"cannot replace a sidedata computer that isn't registered "
3514 b"cannot replace a sidedata computer that isn't registered "
3498 b"for category '%s'."
3515 b"for category '%s'."
3499 )
3516 )
3500 raise error.ProgrammingError(msg % category)
3517 raise error.ProgrammingError(msg % category)
3501 self._sidedata_computers.setdefault(kind, {})
3518 self._sidedata_computers.setdefault(kind, {})
3502 self._sidedata_computers[kind][category] = (keys, computer, flags)
3519 self._sidedata_computers[kind][category] = (keys, computer, flags)
3503
3520
3504
3521
3505 # used to avoid circular references so destructors work
3522 # used to avoid circular references so destructors work
3506 def aftertrans(files):
3523 def aftertrans(files):
3507 renamefiles = [tuple(t) for t in files]
3524 renamefiles = [tuple(t) for t in files]
3508
3525
3509 def a():
3526 def a():
3510 for vfs, src, dest in renamefiles:
3527 for vfs, src, dest in renamefiles:
3511 # if src and dest refer to a same file, vfs.rename is a no-op,
3528 # if src and dest refer to a same file, vfs.rename is a no-op,
3512 # leaving both src and dest on disk. delete dest to make sure
3529 # leaving both src and dest on disk. delete dest to make sure
3513 # the rename couldn't be such a no-op.
3530 # the rename couldn't be such a no-op.
3514 vfs.tryunlink(dest)
3531 vfs.tryunlink(dest)
3515 try:
3532 try:
3516 vfs.rename(src, dest)
3533 vfs.rename(src, dest)
3517 except FileNotFoundError: # journal file does not yet exist
3534 except FileNotFoundError: # journal file does not yet exist
3518 pass
3535 pass
3519
3536
3520 return a
3537 return a
3521
3538
3522
3539
3523 def undoname(fn):
3540 def undoname(fn: bytes) -> bytes:
3524 base, name = os.path.split(fn)
3541 base, name = os.path.split(fn)
3525 assert name.startswith(b'journal')
3542 assert name.startswith(b'journal')
3526 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3543 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3527
3544
3528
3545
3529 def instance(ui, path, create, intents=None, createopts=None):
3546 def instance(ui, path: bytes, create, intents=None, createopts=None):
3530
3547
3531 # prevent cyclic import localrepo -> upgrade -> localrepo
3548 # prevent cyclic import localrepo -> upgrade -> localrepo
3532 from . import upgrade
3549 from . import upgrade
3533
3550
3534 localpath = urlutil.urllocalpath(path)
3551 localpath = urlutil.urllocalpath(path)
3535 if create:
3552 if create:
3536 createrepository(ui, localpath, createopts=createopts)
3553 createrepository(ui, localpath, createopts=createopts)
3537
3554
3538 def repo_maker():
3555 def repo_maker():
3539 return makelocalrepository(ui, localpath, intents=intents)
3556 return makelocalrepository(ui, localpath, intents=intents)
3540
3557
3541 repo = repo_maker()
3558 repo = repo_maker()
3542 repo = upgrade.may_auto_upgrade(repo, repo_maker)
3559 repo = upgrade.may_auto_upgrade(repo, repo_maker)
3543 return repo
3560 return repo
3544
3561
3545
3562
3546 def islocal(path):
3563 def islocal(path: bytes) -> bool:
3547 return True
3564 return True
3548
3565
3549
3566
3550 def defaultcreateopts(ui, createopts=None):
3567 def defaultcreateopts(ui, createopts=None):
3551 """Populate the default creation options for a repository.
3568 """Populate the default creation options for a repository.
3552
3569
3553 A dictionary of explicitly requested creation options can be passed
3570 A dictionary of explicitly requested creation options can be passed
3554 in. Missing keys will be populated.
3571 in. Missing keys will be populated.
3555 """
3572 """
3556 createopts = dict(createopts or {})
3573 createopts = dict(createopts or {})
3557
3574
3558 if b'backend' not in createopts:
3575 if b'backend' not in createopts:
3559 # experimental config: storage.new-repo-backend
3576 # experimental config: storage.new-repo-backend
3560 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3577 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3561
3578
3562 return createopts
3579 return createopts
3563
3580
3564
3581
3565 def clone_requirements(ui, createopts, srcrepo):
3582 def clone_requirements(ui, createopts, srcrepo):
3566 """clone the requirements of a local repo for a local clone
3583 """clone the requirements of a local repo for a local clone
3567
3584
3568 The store requirements are unchanged while the working copy requirements
3585 The store requirements are unchanged while the working copy requirements
3569 depends on the configuration
3586 depends on the configuration
3570 """
3587 """
3571 target_requirements = set()
3588 target_requirements = set()
3572 if not srcrepo.requirements:
3589 if not srcrepo.requirements:
3573 # this is a legacy revlog "v0" repository, we cannot do anything fancy
3590 # this is a legacy revlog "v0" repository, we cannot do anything fancy
3574 # with it.
3591 # with it.
3575 return target_requirements
3592 return target_requirements
3576 createopts = defaultcreateopts(ui, createopts=createopts)
3593 createopts = defaultcreateopts(ui, createopts=createopts)
3577 for r in newreporequirements(ui, createopts):
3594 for r in newreporequirements(ui, createopts):
3578 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3595 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3579 target_requirements.add(r)
3596 target_requirements.add(r)
3580
3597
3581 for r in srcrepo.requirements:
3598 for r in srcrepo.requirements:
3582 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3599 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3583 target_requirements.add(r)
3600 target_requirements.add(r)
3584 return target_requirements
3601 return target_requirements
3585
3602
3586
3603
3587 def newreporequirements(ui, createopts):
3604 def newreporequirements(ui, createopts):
3588 """Determine the set of requirements for a new local repository.
3605 """Determine the set of requirements for a new local repository.
3589
3606
3590 Extensions can wrap this function to specify custom requirements for
3607 Extensions can wrap this function to specify custom requirements for
3591 new repositories.
3608 new repositories.
3592 """
3609 """
3593
3610
3594 if b'backend' not in createopts:
3611 if b'backend' not in createopts:
3595 raise error.ProgrammingError(
3612 raise error.ProgrammingError(
3596 b'backend key not present in createopts; '
3613 b'backend key not present in createopts; '
3597 b'was defaultcreateopts() called?'
3614 b'was defaultcreateopts() called?'
3598 )
3615 )
3599
3616
3600 if createopts[b'backend'] != b'revlogv1':
3617 if createopts[b'backend'] != b'revlogv1':
3601 raise error.Abort(
3618 raise error.Abort(
3602 _(
3619 _(
3603 b'unable to determine repository requirements for '
3620 b'unable to determine repository requirements for '
3604 b'storage backend: %s'
3621 b'storage backend: %s'
3605 )
3622 )
3606 % createopts[b'backend']
3623 % createopts[b'backend']
3607 )
3624 )
3608
3625
3609 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3626 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3610 if ui.configbool(b'format', b'usestore'):
3627 if ui.configbool(b'format', b'usestore'):
3611 requirements.add(requirementsmod.STORE_REQUIREMENT)
3628 requirements.add(requirementsmod.STORE_REQUIREMENT)
3612 if ui.configbool(b'format', b'usefncache'):
3629 if ui.configbool(b'format', b'usefncache'):
3613 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3630 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3614 if ui.configbool(b'format', b'dotencode'):
3631 if ui.configbool(b'format', b'dotencode'):
3615 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3632 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3616
3633
3617 compengines = ui.configlist(b'format', b'revlog-compression')
3634 compengines = ui.configlist(b'format', b'revlog-compression')
3618 for compengine in compengines:
3635 for compengine in compengines:
3619 if compengine in util.compengines:
3636 if compengine in util.compengines:
3620 engine = util.compengines[compengine]
3637 engine = util.compengines[compengine]
3621 if engine.available() and engine.revlogheader():
3638 if engine.available() and engine.revlogheader():
3622 break
3639 break
3623 else:
3640 else:
3624 raise error.Abort(
3641 raise error.Abort(
3625 _(
3642 _(
3626 b'compression engines %s defined by '
3643 b'compression engines %s defined by '
3627 b'format.revlog-compression not available'
3644 b'format.revlog-compression not available'
3628 )
3645 )
3629 % b', '.join(b'"%s"' % e for e in compengines),
3646 % b', '.join(b'"%s"' % e for e in compengines),
3630 hint=_(
3647 hint=_(
3631 b'run "hg debuginstall" to list available '
3648 b'run "hg debuginstall" to list available '
3632 b'compression engines'
3649 b'compression engines'
3633 ),
3650 ),
3634 )
3651 )
3635
3652
3636 # zlib is the historical default and doesn't need an explicit requirement.
3653 # zlib is the historical default and doesn't need an explicit requirement.
3637 if compengine == b'zstd':
3654 if compengine == b'zstd':
3638 requirements.add(b'revlog-compression-zstd')
3655 requirements.add(b'revlog-compression-zstd')
3639 elif compengine != b'zlib':
3656 elif compengine != b'zlib':
3640 requirements.add(b'exp-compression-%s' % compengine)
3657 requirements.add(b'exp-compression-%s' % compengine)
3641
3658
3642 if scmutil.gdinitconfig(ui):
3659 if scmutil.gdinitconfig(ui):
3643 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3660 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3644 if ui.configbool(b'format', b'sparse-revlog'):
3661 if ui.configbool(b'format', b'sparse-revlog'):
3645 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3662 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3646
3663
3647 # experimental config: format.use-dirstate-v2
3664 # experimental config: format.use-dirstate-v2
3648 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3665 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3649 if ui.configbool(b'format', b'use-dirstate-v2'):
3666 if ui.configbool(b'format', b'use-dirstate-v2'):
3650 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3667 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3651
3668
3652 # experimental config: format.exp-use-copies-side-data-changeset
3669 # experimental config: format.exp-use-copies-side-data-changeset
3653 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3670 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3654 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3671 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3655 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3672 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3656 if ui.configbool(b'experimental', b'treemanifest'):
3673 if ui.configbool(b'experimental', b'treemanifest'):
3657 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3674 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3658
3675
3659 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3676 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3660 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3677 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3661 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3678 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3662
3679
3663 revlogv2 = ui.config(b'experimental', b'revlogv2')
3680 revlogv2 = ui.config(b'experimental', b'revlogv2')
3664 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3681 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3665 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3682 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3666 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3683 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3667 # experimental config: format.internal-phase
3684 # experimental config: format.internal-phase
3668 if ui.configbool(b'format', b'use-internal-phase'):
3685 if ui.configbool(b'format', b'use-internal-phase'):
3669 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3686 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3670
3687
3671 # experimental config: format.exp-archived-phase
3688 # experimental config: format.exp-archived-phase
3672 if ui.configbool(b'format', b'exp-archived-phase'):
3689 if ui.configbool(b'format', b'exp-archived-phase'):
3673 requirements.add(requirementsmod.ARCHIVED_PHASE_REQUIREMENT)
3690 requirements.add(requirementsmod.ARCHIVED_PHASE_REQUIREMENT)
3674
3691
3675 if createopts.get(b'narrowfiles'):
3692 if createopts.get(b'narrowfiles'):
3676 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3693 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3677
3694
3678 if createopts.get(b'lfs'):
3695 if createopts.get(b'lfs'):
3679 requirements.add(b'lfs')
3696 requirements.add(b'lfs')
3680
3697
3681 if ui.configbool(b'format', b'bookmarks-in-store'):
3698 if ui.configbool(b'format', b'bookmarks-in-store'):
3682 requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3699 requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3683
3700
3684 if ui.configbool(b'format', b'use-persistent-nodemap'):
3701 if ui.configbool(b'format', b'use-persistent-nodemap'):
3685 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3702 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3686
3703
3687 # if share-safe is enabled, let's create the new repository with the new
3704 # if share-safe is enabled, let's create the new repository with the new
3688 # requirement
3705 # requirement
3689 if ui.configbool(b'format', b'use-share-safe'):
3706 if ui.configbool(b'format', b'use-share-safe'):
3690 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3707 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3691
3708
3692 # if we are creating a share-repoΒΉ we have to handle requirement
3709 # if we are creating a share-repoΒΉ we have to handle requirement
3693 # differently.
3710 # differently.
3694 #
3711 #
3695 # [1] (i.e. reusing the store from another repository, just having a
3712 # [1] (i.e. reusing the store from another repository, just having a
3696 # working copy)
3713 # working copy)
3697 if b'sharedrepo' in createopts:
3714 if b'sharedrepo' in createopts:
3698 source_requirements = set(createopts[b'sharedrepo'].requirements)
3715 source_requirements = set(createopts[b'sharedrepo'].requirements)
3699
3716
3700 if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements:
3717 if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements:
3701 # share to an old school repository, we have to copy the
3718 # share to an old school repository, we have to copy the
3702 # requirements and hope for the best.
3719 # requirements and hope for the best.
3703 requirements = source_requirements
3720 requirements = source_requirements
3704 else:
3721 else:
3705 # We have control on the working copy only, so "copy" the non
3722 # We have control on the working copy only, so "copy" the non
3706 # working copy part over, ignoring previous logic.
3723 # working copy part over, ignoring previous logic.
3707 to_drop = set()
3724 to_drop = set()
3708 for req in requirements:
3725 for req in requirements:
3709 if req in requirementsmod.WORKING_DIR_REQUIREMENTS:
3726 if req in requirementsmod.WORKING_DIR_REQUIREMENTS:
3710 continue
3727 continue
3711 if req in source_requirements:
3728 if req in source_requirements:
3712 continue
3729 continue
3713 to_drop.add(req)
3730 to_drop.add(req)
3714 requirements -= to_drop
3731 requirements -= to_drop
3715 requirements |= source_requirements
3732 requirements |= source_requirements
3716
3733
3717 if createopts.get(b'sharedrelative'):
3734 if createopts.get(b'sharedrelative'):
3718 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3735 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3719 else:
3736 else:
3720 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3737 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3721
3738
3722 if ui.configbool(b'format', b'use-dirstate-tracked-hint'):
3739 if ui.configbool(b'format', b'use-dirstate-tracked-hint'):
3723 version = ui.configint(b'format', b'use-dirstate-tracked-hint.version')
3740 version = ui.configint(b'format', b'use-dirstate-tracked-hint.version')
3724 msg = _("ignoring unknown tracked key version: %d\n")
3741 msg = _("ignoring unknown tracked key version: %d\n")
3725 hint = _("see `hg help config.format.use-dirstate-tracked-hint-version")
3742 hint = _("see `hg help config.format.use-dirstate-tracked-hint-version")
3726 if version != 1:
3743 if version != 1:
3727 ui.warn(msg % version, hint=hint)
3744 ui.warn(msg % version, hint=hint)
3728 else:
3745 else:
3729 requirements.add(requirementsmod.DIRSTATE_TRACKED_HINT_V1)
3746 requirements.add(requirementsmod.DIRSTATE_TRACKED_HINT_V1)
3730
3747
3731 return requirements
3748 return requirements
3732
3749
3733
3750
3734 def checkrequirementscompat(ui, requirements):
3751 def checkrequirementscompat(ui, requirements):
3735 """Checks compatibility of repository requirements enabled and disabled.
3752 """Checks compatibility of repository requirements enabled and disabled.
3736
3753
3737 Returns a set of requirements which needs to be dropped because dependend
3754 Returns a set of requirements which needs to be dropped because dependend
3738 requirements are not enabled. Also warns users about it"""
3755 requirements are not enabled. Also warns users about it"""
3739
3756
3740 dropped = set()
3757 dropped = set()
3741
3758
3742 if requirementsmod.STORE_REQUIREMENT not in requirements:
3759 if requirementsmod.STORE_REQUIREMENT not in requirements:
3743 if requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3760 if requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3744 ui.warn(
3761 ui.warn(
3745 _(
3762 _(
3746 b'ignoring enabled \'format.bookmarks-in-store\' config '
3763 b'ignoring enabled \'format.bookmarks-in-store\' config '
3747 b'beacuse it is incompatible with disabled '
3764 b'beacuse it is incompatible with disabled '
3748 b'\'format.usestore\' config\n'
3765 b'\'format.usestore\' config\n'
3749 )
3766 )
3750 )
3767 )
3751 dropped.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3768 dropped.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3752
3769
3753 if (
3770 if (
3754 requirementsmod.SHARED_REQUIREMENT in requirements
3771 requirementsmod.SHARED_REQUIREMENT in requirements
3755 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3772 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3756 ):
3773 ):
3757 raise error.Abort(
3774 raise error.Abort(
3758 _(
3775 _(
3759 b"cannot create shared repository as source was created"
3776 b"cannot create shared repository as source was created"
3760 b" with 'format.usestore' config disabled"
3777 b" with 'format.usestore' config disabled"
3761 )
3778 )
3762 )
3779 )
3763
3780
3764 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3781 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3765 if ui.hasconfig(b'format', b'use-share-safe'):
3782 if ui.hasconfig(b'format', b'use-share-safe'):
3766 msg = _(
3783 msg = _(
3767 b"ignoring enabled 'format.use-share-safe' config because "
3784 b"ignoring enabled 'format.use-share-safe' config because "
3768 b"it is incompatible with disabled 'format.usestore'"
3785 b"it is incompatible with disabled 'format.usestore'"
3769 b" config\n"
3786 b" config\n"
3770 )
3787 )
3771 ui.warn(msg)
3788 ui.warn(msg)
3772 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3789 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3773
3790
3774 return dropped
3791 return dropped
3775
3792
3776
3793
3777 def filterknowncreateopts(ui, createopts):
3794 def filterknowncreateopts(ui, createopts):
3778 """Filters a dict of repo creation options against options that are known.
3795 """Filters a dict of repo creation options against options that are known.
3779
3796
3780 Receives a dict of repo creation options and returns a dict of those
3797 Receives a dict of repo creation options and returns a dict of those
3781 options that we don't know how to handle.
3798 options that we don't know how to handle.
3782
3799
3783 This function is called as part of repository creation. If the
3800 This function is called as part of repository creation. If the
3784 returned dict contains any items, repository creation will not
3801 returned dict contains any items, repository creation will not
3785 be allowed, as it means there was a request to create a repository
3802 be allowed, as it means there was a request to create a repository
3786 with options not recognized by loaded code.
3803 with options not recognized by loaded code.
3787
3804
3788 Extensions can wrap this function to filter out creation options
3805 Extensions can wrap this function to filter out creation options
3789 they know how to handle.
3806 they know how to handle.
3790 """
3807 """
3791 known = {
3808 known = {
3792 b'backend',
3809 b'backend',
3793 b'lfs',
3810 b'lfs',
3794 b'narrowfiles',
3811 b'narrowfiles',
3795 b'sharedrepo',
3812 b'sharedrepo',
3796 b'sharedrelative',
3813 b'sharedrelative',
3797 b'shareditems',
3814 b'shareditems',
3798 b'shallowfilestore',
3815 b'shallowfilestore',
3799 }
3816 }
3800
3817
3801 return {k: v for k, v in createopts.items() if k not in known}
3818 return {k: v for k, v in createopts.items() if k not in known}
3802
3819
3803
3820
3804 def createrepository(ui, path, createopts=None, requirements=None):
3821 def createrepository(ui, path: bytes, createopts=None, requirements=None):
3805 """Create a new repository in a vfs.
3822 """Create a new repository in a vfs.
3806
3823
3807 ``path`` path to the new repo's working directory.
3824 ``path`` path to the new repo's working directory.
3808 ``createopts`` options for the new repository.
3825 ``createopts`` options for the new repository.
3809 ``requirement`` predefined set of requirements.
3826 ``requirement`` predefined set of requirements.
3810 (incompatible with ``createopts``)
3827 (incompatible with ``createopts``)
3811
3828
3812 The following keys for ``createopts`` are recognized:
3829 The following keys for ``createopts`` are recognized:
3813
3830
3814 backend
3831 backend
3815 The storage backend to use.
3832 The storage backend to use.
3816 lfs
3833 lfs
3817 Repository will be created with ``lfs`` requirement. The lfs extension
3834 Repository will be created with ``lfs`` requirement. The lfs extension
3818 will automatically be loaded when the repository is accessed.
3835 will automatically be loaded when the repository is accessed.
3819 narrowfiles
3836 narrowfiles
3820 Set up repository to support narrow file storage.
3837 Set up repository to support narrow file storage.
3821 sharedrepo
3838 sharedrepo
3822 Repository object from which storage should be shared.
3839 Repository object from which storage should be shared.
3823 sharedrelative
3840 sharedrelative
3824 Boolean indicating if the path to the shared repo should be
3841 Boolean indicating if the path to the shared repo should be
3825 stored as relative. By default, the pointer to the "parent" repo
3842 stored as relative. By default, the pointer to the "parent" repo
3826 is stored as an absolute path.
3843 is stored as an absolute path.
3827 shareditems
3844 shareditems
3828 Set of items to share to the new repository (in addition to storage).
3845 Set of items to share to the new repository (in addition to storage).
3829 shallowfilestore
3846 shallowfilestore
3830 Indicates that storage for files should be shallow (not all ancestor
3847 Indicates that storage for files should be shallow (not all ancestor
3831 revisions are known).
3848 revisions are known).
3832 """
3849 """
3833
3850
3834 if requirements is not None:
3851 if requirements is not None:
3835 if createopts is not None:
3852 if createopts is not None:
3836 msg = b'cannot specify both createopts and requirements'
3853 msg = b'cannot specify both createopts and requirements'
3837 raise error.ProgrammingError(msg)
3854 raise error.ProgrammingError(msg)
3838 createopts = {}
3855 createopts = {}
3839 else:
3856 else:
3840 createopts = defaultcreateopts(ui, createopts=createopts)
3857 createopts = defaultcreateopts(ui, createopts=createopts)
3841
3858
3842 unknownopts = filterknowncreateopts(ui, createopts)
3859 unknownopts = filterknowncreateopts(ui, createopts)
3843
3860
3844 if not isinstance(unknownopts, dict):
3861 if not isinstance(unknownopts, dict):
3845 raise error.ProgrammingError(
3862 raise error.ProgrammingError(
3846 b'filterknowncreateopts() did not return a dict'
3863 b'filterknowncreateopts() did not return a dict'
3847 )
3864 )
3848
3865
3849 if unknownopts:
3866 if unknownopts:
3850 raise error.Abort(
3867 raise error.Abort(
3851 _(
3868 _(
3852 b'unable to create repository because of unknown '
3869 b'unable to create repository because of unknown '
3853 b'creation option: %s'
3870 b'creation option: %s'
3854 )
3871 )
3855 % b', '.join(sorted(unknownopts)),
3872 % b', '.join(sorted(unknownopts)),
3856 hint=_(b'is a required extension not loaded?'),
3873 hint=_(b'is a required extension not loaded?'),
3857 )
3874 )
3858
3875
3859 requirements = newreporequirements(ui, createopts=createopts)
3876 requirements = newreporequirements(ui, createopts=createopts)
3860 requirements -= checkrequirementscompat(ui, requirements)
3877 requirements -= checkrequirementscompat(ui, requirements)
3861
3878
3862 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3879 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3863
3880
3864 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3881 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3865 if hgvfs.exists():
3882 if hgvfs.exists():
3866 raise error.RepoError(_(b'repository %s already exists') % path)
3883 raise error.RepoError(_(b'repository %s already exists') % path)
3867
3884
3868 if b'sharedrepo' in createopts:
3885 if b'sharedrepo' in createopts:
3869 sharedpath = createopts[b'sharedrepo'].sharedpath
3886 sharedpath = createopts[b'sharedrepo'].sharedpath
3870
3887
3871 if createopts.get(b'sharedrelative'):
3888 if createopts.get(b'sharedrelative'):
3872 try:
3889 try:
3873 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3890 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3874 sharedpath = util.pconvert(sharedpath)
3891 sharedpath = util.pconvert(sharedpath)
3875 except (IOError, ValueError) as e:
3892 except (IOError, ValueError) as e:
3876 # ValueError is raised on Windows if the drive letters differ
3893 # ValueError is raised on Windows if the drive letters differ
3877 # on each path.
3894 # on each path.
3878 raise error.Abort(
3895 raise error.Abort(
3879 _(b'cannot calculate relative path'),
3896 _(b'cannot calculate relative path'),
3880 hint=stringutil.forcebytestr(e),
3897 hint=stringutil.forcebytestr(e),
3881 )
3898 )
3882
3899
3883 if not wdirvfs.exists():
3900 if not wdirvfs.exists():
3884 wdirvfs.makedirs()
3901 wdirvfs.makedirs()
3885
3902
3886 hgvfs.makedir(notindexed=True)
3903 hgvfs.makedir(notindexed=True)
3887 if b'sharedrepo' not in createopts:
3904 if b'sharedrepo' not in createopts:
3888 hgvfs.mkdir(b'cache')
3905 hgvfs.mkdir(b'cache')
3889 hgvfs.mkdir(b'wcache')
3906 hgvfs.mkdir(b'wcache')
3890
3907
3891 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3908 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3892 if has_store and b'sharedrepo' not in createopts:
3909 if has_store and b'sharedrepo' not in createopts:
3893 hgvfs.mkdir(b'store')
3910 hgvfs.mkdir(b'store')
3894
3911
3895 # We create an invalid changelog outside the store so very old
3912 # We create an invalid changelog outside the store so very old
3896 # Mercurial versions (which didn't know about the requirements
3913 # Mercurial versions (which didn't know about the requirements
3897 # file) encounter an error on reading the changelog. This
3914 # file) encounter an error on reading the changelog. This
3898 # effectively locks out old clients and prevents them from
3915 # effectively locks out old clients and prevents them from
3899 # mucking with a repo in an unknown format.
3916 # mucking with a repo in an unknown format.
3900 #
3917 #
3901 # The revlog header has version 65535, which won't be recognized by
3918 # The revlog header has version 65535, which won't be recognized by
3902 # such old clients.
3919 # such old clients.
3903 hgvfs.append(
3920 hgvfs.append(
3904 b'00changelog.i',
3921 b'00changelog.i',
3905 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3922 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3906 b'layout',
3923 b'layout',
3907 )
3924 )
3908
3925
3909 # Filter the requirements into working copy and store ones
3926 # Filter the requirements into working copy and store ones
3910 wcreq, storereq = scmutil.filterrequirements(requirements)
3927 wcreq, storereq = scmutil.filterrequirements(requirements)
3911 # write working copy ones
3928 # write working copy ones
3912 scmutil.writerequires(hgvfs, wcreq)
3929 scmutil.writerequires(hgvfs, wcreq)
3913 # If there are store requirements and the current repository
3930 # If there are store requirements and the current repository
3914 # is not a shared one, write stored requirements
3931 # is not a shared one, write stored requirements
3915 # For new shared repository, we don't need to write the store
3932 # For new shared repository, we don't need to write the store
3916 # requirements as they are already present in store requires
3933 # requirements as they are already present in store requires
3917 if storereq and b'sharedrepo' not in createopts:
3934 if storereq and b'sharedrepo' not in createopts:
3918 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3935 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3919 scmutil.writerequires(storevfs, storereq)
3936 scmutil.writerequires(storevfs, storereq)
3920
3937
3921 # Write out file telling readers where to find the shared store.
3938 # Write out file telling readers where to find the shared store.
3922 if b'sharedrepo' in createopts:
3939 if b'sharedrepo' in createopts:
3923 hgvfs.write(b'sharedpath', sharedpath)
3940 hgvfs.write(b'sharedpath', sharedpath)
3924
3941
3925 if createopts.get(b'shareditems'):
3942 if createopts.get(b'shareditems'):
3926 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3943 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3927 hgvfs.write(b'shared', shared)
3944 hgvfs.write(b'shared', shared)
3928
3945
3929
3946
3930 def poisonrepository(repo):
3947 def poisonrepository(repo):
3931 """Poison a repository instance so it can no longer be used."""
3948 """Poison a repository instance so it can no longer be used."""
3932 # Perform any cleanup on the instance.
3949 # Perform any cleanup on the instance.
3933 repo.close()
3950 repo.close()
3934
3951
3935 # Our strategy is to replace the type of the object with one that
3952 # Our strategy is to replace the type of the object with one that
3936 # has all attribute lookups result in error.
3953 # has all attribute lookups result in error.
3937 #
3954 #
3938 # But we have to allow the close() method because some constructors
3955 # But we have to allow the close() method because some constructors
3939 # of repos call close() on repo references.
3956 # of repos call close() on repo references.
3940 class poisonedrepository:
3957 class poisonedrepository:
3941 def __getattribute__(self, item):
3958 def __getattribute__(self, item):
3942 if item == 'close':
3959 if item == 'close':
3943 return object.__getattribute__(self, item)
3960 return object.__getattribute__(self, item)
3944
3961
3945 raise error.ProgrammingError(
3962 raise error.ProgrammingError(
3946 b'repo instances should not be used after unshare'
3963 b'repo instances should not be used after unshare'
3947 )
3964 )
3948
3965
3949 def close(self):
3966 def close(self):
3950 pass
3967 pass
3951
3968
3952 # We may have a repoview, which intercepts __setattr__. So be sure
3969 # We may have a repoview, which intercepts __setattr__. So be sure
3953 # we operate at the lowest level possible.
3970 # we operate at the lowest level possible.
3954 object.__setattr__(repo, '__class__', poisonedrepository)
3971 object.__setattr__(repo, '__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now