##// END OF EJS Templates
rollback: remove the dirstateguard usage...
marmoute -
r50966:d91fc026 default
parent child Browse files
Show More
@@ -1,3990 +1,3984 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 # coding: utf-8
2 # coding: utf-8
3 #
3 #
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9
9
10 import functools
10 import functools
11 import os
11 import os
12 import random
12 import random
13 import re
13 import re
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from concurrent import futures
18 from concurrent import futures
19 from typing import (
19 from typing import (
20 Optional,
20 Optional,
21 )
21 )
22
22
23 from .i18n import _
23 from .i18n import _
24 from .node import (
24 from .node import (
25 bin,
25 bin,
26 hex,
26 hex,
27 nullrev,
27 nullrev,
28 sha1nodeconstants,
28 sha1nodeconstants,
29 short,
29 short,
30 )
30 )
31 from .pycompat import (
31 from .pycompat import (
32 delattr,
32 delattr,
33 getattr,
33 getattr,
34 )
34 )
35 from . import (
35 from . import (
36 bookmarks,
36 bookmarks,
37 branchmap,
37 branchmap,
38 bundle2,
38 bundle2,
39 bundlecaches,
39 bundlecaches,
40 changegroup,
40 changegroup,
41 color,
41 color,
42 commit,
42 commit,
43 context,
43 context,
44 dirstate,
44 dirstate,
45 dirstateguard,
46 discovery,
45 discovery,
47 encoding,
46 encoding,
48 error,
47 error,
49 exchange,
48 exchange,
50 extensions,
49 extensions,
51 filelog,
50 filelog,
52 hook,
51 hook,
53 lock as lockmod,
52 lock as lockmod,
54 match as matchmod,
53 match as matchmod,
55 mergestate as mergestatemod,
54 mergestate as mergestatemod,
56 mergeutil,
55 mergeutil,
57 namespaces,
56 namespaces,
58 narrowspec,
57 narrowspec,
59 obsolete,
58 obsolete,
60 pathutil,
59 pathutil,
61 phases,
60 phases,
62 pushkey,
61 pushkey,
63 pycompat,
62 pycompat,
64 rcutil,
63 rcutil,
65 repoview,
64 repoview,
66 requirements as requirementsmod,
65 requirements as requirementsmod,
67 revlog,
66 revlog,
68 revset,
67 revset,
69 revsetlang,
68 revsetlang,
70 scmutil,
69 scmutil,
71 sparse,
70 sparse,
72 store as storemod,
71 store as storemod,
73 subrepoutil,
72 subrepoutil,
74 tags as tagsmod,
73 tags as tagsmod,
75 transaction,
74 transaction,
76 txnutil,
75 txnutil,
77 util,
76 util,
78 vfs as vfsmod,
77 vfs as vfsmod,
79 wireprototypes,
78 wireprototypes,
80 )
79 )
81
80
82 from .interfaces import (
81 from .interfaces import (
83 repository,
82 repository,
84 util as interfaceutil,
83 util as interfaceutil,
85 )
84 )
86
85
87 from .utils import (
86 from .utils import (
88 hashutil,
87 hashutil,
89 procutil,
88 procutil,
90 stringutil,
89 stringutil,
91 urlutil,
90 urlutil,
92 )
91 )
93
92
94 from .revlogutils import (
93 from .revlogutils import (
95 concurrency_checker as revlogchecker,
94 concurrency_checker as revlogchecker,
96 constants as revlogconst,
95 constants as revlogconst,
97 sidedata as sidedatamod,
96 sidedata as sidedatamod,
98 )
97 )
99
98
100 release = lockmod.release
99 release = lockmod.release
101 urlerr = util.urlerr
100 urlerr = util.urlerr
102 urlreq = util.urlreq
101 urlreq = util.urlreq
103
102
104 RE_SKIP_DIRSTATE_ROLLBACK = re.compile(b"^(dirstate|narrowspec.dirstate).*")
103 RE_SKIP_DIRSTATE_ROLLBACK = re.compile(b"^(dirstate|narrowspec.dirstate).*")
105
104
106 # set of (path, vfs-location) tuples. vfs-location is:
105 # set of (path, vfs-location) tuples. vfs-location is:
107 # - 'plain for vfs relative paths
106 # - 'plain for vfs relative paths
108 # - '' for svfs relative paths
107 # - '' for svfs relative paths
109 _cachedfiles = set()
108 _cachedfiles = set()
110
109
111
110
112 class _basefilecache(scmutil.filecache):
111 class _basefilecache(scmutil.filecache):
113 """All filecache usage on repo are done for logic that should be unfiltered"""
112 """All filecache usage on repo are done for logic that should be unfiltered"""
114
113
115 def __get__(self, repo, type=None):
114 def __get__(self, repo, type=None):
116 if repo is None:
115 if repo is None:
117 return self
116 return self
118 # proxy to unfiltered __dict__ since filtered repo has no entry
117 # proxy to unfiltered __dict__ since filtered repo has no entry
119 unfi = repo.unfiltered()
118 unfi = repo.unfiltered()
120 try:
119 try:
121 return unfi.__dict__[self.sname]
120 return unfi.__dict__[self.sname]
122 except KeyError:
121 except KeyError:
123 pass
122 pass
124 return super(_basefilecache, self).__get__(unfi, type)
123 return super(_basefilecache, self).__get__(unfi, type)
125
124
126 def set(self, repo, value):
125 def set(self, repo, value):
127 return super(_basefilecache, self).set(repo.unfiltered(), value)
126 return super(_basefilecache, self).set(repo.unfiltered(), value)
128
127
129
128
130 class repofilecache(_basefilecache):
129 class repofilecache(_basefilecache):
131 """filecache for files in .hg but outside of .hg/store"""
130 """filecache for files in .hg but outside of .hg/store"""
132
131
133 def __init__(self, *paths):
132 def __init__(self, *paths):
134 super(repofilecache, self).__init__(*paths)
133 super(repofilecache, self).__init__(*paths)
135 for path in paths:
134 for path in paths:
136 _cachedfiles.add((path, b'plain'))
135 _cachedfiles.add((path, b'plain'))
137
136
138 def join(self, obj, fname):
137 def join(self, obj, fname):
139 return obj.vfs.join(fname)
138 return obj.vfs.join(fname)
140
139
141
140
142 class storecache(_basefilecache):
141 class storecache(_basefilecache):
143 """filecache for files in the store"""
142 """filecache for files in the store"""
144
143
145 def __init__(self, *paths):
144 def __init__(self, *paths):
146 super(storecache, self).__init__(*paths)
145 super(storecache, self).__init__(*paths)
147 for path in paths:
146 for path in paths:
148 _cachedfiles.add((path, b''))
147 _cachedfiles.add((path, b''))
149
148
150 def join(self, obj, fname):
149 def join(self, obj, fname):
151 return obj.sjoin(fname)
150 return obj.sjoin(fname)
152
151
153
152
154 class changelogcache(storecache):
153 class changelogcache(storecache):
155 """filecache for the changelog"""
154 """filecache for the changelog"""
156
155
157 def __init__(self):
156 def __init__(self):
158 super(changelogcache, self).__init__()
157 super(changelogcache, self).__init__()
159 _cachedfiles.add((b'00changelog.i', b''))
158 _cachedfiles.add((b'00changelog.i', b''))
160 _cachedfiles.add((b'00changelog.n', b''))
159 _cachedfiles.add((b'00changelog.n', b''))
161
160
162 def tracked_paths(self, obj):
161 def tracked_paths(self, obj):
163 paths = [self.join(obj, b'00changelog.i')]
162 paths = [self.join(obj, b'00changelog.i')]
164 if obj.store.opener.options.get(b'persistent-nodemap', False):
163 if obj.store.opener.options.get(b'persistent-nodemap', False):
165 paths.append(self.join(obj, b'00changelog.n'))
164 paths.append(self.join(obj, b'00changelog.n'))
166 return paths
165 return paths
167
166
168
167
169 class manifestlogcache(storecache):
168 class manifestlogcache(storecache):
170 """filecache for the manifestlog"""
169 """filecache for the manifestlog"""
171
170
172 def __init__(self):
171 def __init__(self):
173 super(manifestlogcache, self).__init__()
172 super(manifestlogcache, self).__init__()
174 _cachedfiles.add((b'00manifest.i', b''))
173 _cachedfiles.add((b'00manifest.i', b''))
175 _cachedfiles.add((b'00manifest.n', b''))
174 _cachedfiles.add((b'00manifest.n', b''))
176
175
177 def tracked_paths(self, obj):
176 def tracked_paths(self, obj):
178 paths = [self.join(obj, b'00manifest.i')]
177 paths = [self.join(obj, b'00manifest.i')]
179 if obj.store.opener.options.get(b'persistent-nodemap', False):
178 if obj.store.opener.options.get(b'persistent-nodemap', False):
180 paths.append(self.join(obj, b'00manifest.n'))
179 paths.append(self.join(obj, b'00manifest.n'))
181 return paths
180 return paths
182
181
183
182
184 class mixedrepostorecache(_basefilecache):
183 class mixedrepostorecache(_basefilecache):
185 """filecache for a mix files in .hg/store and outside"""
184 """filecache for a mix files in .hg/store and outside"""
186
185
187 def __init__(self, *pathsandlocations):
186 def __init__(self, *pathsandlocations):
188 # scmutil.filecache only uses the path for passing back into our
187 # scmutil.filecache only uses the path for passing back into our
189 # join(), so we can safely pass a list of paths and locations
188 # join(), so we can safely pass a list of paths and locations
190 super(mixedrepostorecache, self).__init__(*pathsandlocations)
189 super(mixedrepostorecache, self).__init__(*pathsandlocations)
191 _cachedfiles.update(pathsandlocations)
190 _cachedfiles.update(pathsandlocations)
192
191
193 def join(self, obj, fnameandlocation):
192 def join(self, obj, fnameandlocation):
194 fname, location = fnameandlocation
193 fname, location = fnameandlocation
195 if location == b'plain':
194 if location == b'plain':
196 return obj.vfs.join(fname)
195 return obj.vfs.join(fname)
197 else:
196 else:
198 if location != b'':
197 if location != b'':
199 raise error.ProgrammingError(
198 raise error.ProgrammingError(
200 b'unexpected location: %s' % location
199 b'unexpected location: %s' % location
201 )
200 )
202 return obj.sjoin(fname)
201 return obj.sjoin(fname)
203
202
204
203
205 def isfilecached(repo, name):
204 def isfilecached(repo, name):
206 """check if a repo has already cached "name" filecache-ed property
205 """check if a repo has already cached "name" filecache-ed property
207
206
208 This returns (cachedobj-or-None, iscached) tuple.
207 This returns (cachedobj-or-None, iscached) tuple.
209 """
208 """
210 cacheentry = repo.unfiltered()._filecache.get(name, None)
209 cacheentry = repo.unfiltered()._filecache.get(name, None)
211 if not cacheentry:
210 if not cacheentry:
212 return None, False
211 return None, False
213 return cacheentry.obj, True
212 return cacheentry.obj, True
214
213
215
214
216 class unfilteredpropertycache(util.propertycache):
215 class unfilteredpropertycache(util.propertycache):
217 """propertycache that apply to unfiltered repo only"""
216 """propertycache that apply to unfiltered repo only"""
218
217
219 def __get__(self, repo, type=None):
218 def __get__(self, repo, type=None):
220 unfi = repo.unfiltered()
219 unfi = repo.unfiltered()
221 if unfi is repo:
220 if unfi is repo:
222 return super(unfilteredpropertycache, self).__get__(unfi)
221 return super(unfilteredpropertycache, self).__get__(unfi)
223 return getattr(unfi, self.name)
222 return getattr(unfi, self.name)
224
223
225
224
226 class filteredpropertycache(util.propertycache):
225 class filteredpropertycache(util.propertycache):
227 """propertycache that must take filtering in account"""
226 """propertycache that must take filtering in account"""
228
227
229 def cachevalue(self, obj, value):
228 def cachevalue(self, obj, value):
230 object.__setattr__(obj, self.name, value)
229 object.__setattr__(obj, self.name, value)
231
230
232
231
233 def hasunfilteredcache(repo, name):
232 def hasunfilteredcache(repo, name):
234 """check if a repo has an unfilteredpropertycache value for <name>"""
233 """check if a repo has an unfilteredpropertycache value for <name>"""
235 return name in vars(repo.unfiltered())
234 return name in vars(repo.unfiltered())
236
235
237
236
238 def unfilteredmethod(orig):
237 def unfilteredmethod(orig):
239 """decorate method that always need to be run on unfiltered version"""
238 """decorate method that always need to be run on unfiltered version"""
240
239
241 @functools.wraps(orig)
240 @functools.wraps(orig)
242 def wrapper(repo, *args, **kwargs):
241 def wrapper(repo, *args, **kwargs):
243 return orig(repo.unfiltered(), *args, **kwargs)
242 return orig(repo.unfiltered(), *args, **kwargs)
244
243
245 return wrapper
244 return wrapper
246
245
247
246
248 moderncaps = {
247 moderncaps = {
249 b'lookup',
248 b'lookup',
250 b'branchmap',
249 b'branchmap',
251 b'pushkey',
250 b'pushkey',
252 b'known',
251 b'known',
253 b'getbundle',
252 b'getbundle',
254 b'unbundle',
253 b'unbundle',
255 }
254 }
256 legacycaps = moderncaps.union({b'changegroupsubset'})
255 legacycaps = moderncaps.union({b'changegroupsubset'})
257
256
258
257
259 @interfaceutil.implementer(repository.ipeercommandexecutor)
258 @interfaceutil.implementer(repository.ipeercommandexecutor)
260 class localcommandexecutor:
259 class localcommandexecutor:
261 def __init__(self, peer):
260 def __init__(self, peer):
262 self._peer = peer
261 self._peer = peer
263 self._sent = False
262 self._sent = False
264 self._closed = False
263 self._closed = False
265
264
266 def __enter__(self):
265 def __enter__(self):
267 return self
266 return self
268
267
269 def __exit__(self, exctype, excvalue, exctb):
268 def __exit__(self, exctype, excvalue, exctb):
270 self.close()
269 self.close()
271
270
272 def callcommand(self, command, args):
271 def callcommand(self, command, args):
273 if self._sent:
272 if self._sent:
274 raise error.ProgrammingError(
273 raise error.ProgrammingError(
275 b'callcommand() cannot be used after sendcommands()'
274 b'callcommand() cannot be used after sendcommands()'
276 )
275 )
277
276
278 if self._closed:
277 if self._closed:
279 raise error.ProgrammingError(
278 raise error.ProgrammingError(
280 b'callcommand() cannot be used after close()'
279 b'callcommand() cannot be used after close()'
281 )
280 )
282
281
283 # We don't need to support anything fancy. Just call the named
282 # We don't need to support anything fancy. Just call the named
284 # method on the peer and return a resolved future.
283 # method on the peer and return a resolved future.
285 fn = getattr(self._peer, pycompat.sysstr(command))
284 fn = getattr(self._peer, pycompat.sysstr(command))
286
285
287 f = futures.Future()
286 f = futures.Future()
288
287
289 try:
288 try:
290 result = fn(**pycompat.strkwargs(args))
289 result = fn(**pycompat.strkwargs(args))
291 except Exception:
290 except Exception:
292 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
291 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
293 else:
292 else:
294 f.set_result(result)
293 f.set_result(result)
295
294
296 return f
295 return f
297
296
298 def sendcommands(self):
297 def sendcommands(self):
299 self._sent = True
298 self._sent = True
300
299
301 def close(self):
300 def close(self):
302 self._closed = True
301 self._closed = True
303
302
304
303
305 @interfaceutil.implementer(repository.ipeercommands)
304 @interfaceutil.implementer(repository.ipeercommands)
306 class localpeer(repository.peer):
305 class localpeer(repository.peer):
307 '''peer for a local repo; reflects only the most recent API'''
306 '''peer for a local repo; reflects only the most recent API'''
308
307
309 def __init__(self, repo, caps=None, path=None):
308 def __init__(self, repo, caps=None, path=None):
310 super(localpeer, self).__init__(repo.ui, path=path)
309 super(localpeer, self).__init__(repo.ui, path=path)
311
310
312 if caps is None:
311 if caps is None:
313 caps = moderncaps.copy()
312 caps = moderncaps.copy()
314 self._repo = repo.filtered(b'served')
313 self._repo = repo.filtered(b'served')
315
314
316 if repo._wanted_sidedata:
315 if repo._wanted_sidedata:
317 formatted = bundle2.format_remote_wanted_sidedata(repo)
316 formatted = bundle2.format_remote_wanted_sidedata(repo)
318 caps.add(b'exp-wanted-sidedata=' + formatted)
317 caps.add(b'exp-wanted-sidedata=' + formatted)
319
318
320 self._caps = repo._restrictcapabilities(caps)
319 self._caps = repo._restrictcapabilities(caps)
321
320
322 # Begin of _basepeer interface.
321 # Begin of _basepeer interface.
323
322
324 def url(self):
323 def url(self):
325 return self._repo.url()
324 return self._repo.url()
326
325
327 def local(self):
326 def local(self):
328 return self._repo
327 return self._repo
329
328
330 def canpush(self):
329 def canpush(self):
331 return True
330 return True
332
331
333 def close(self):
332 def close(self):
334 self._repo.close()
333 self._repo.close()
335
334
336 # End of _basepeer interface.
335 # End of _basepeer interface.
337
336
338 # Begin of _basewirecommands interface.
337 # Begin of _basewirecommands interface.
339
338
340 def branchmap(self):
339 def branchmap(self):
341 return self._repo.branchmap()
340 return self._repo.branchmap()
342
341
343 def capabilities(self):
342 def capabilities(self):
344 return self._caps
343 return self._caps
345
344
346 def clonebundles(self):
345 def clonebundles(self):
347 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
346 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
348
347
349 def debugwireargs(self, one, two, three=None, four=None, five=None):
348 def debugwireargs(self, one, two, three=None, four=None, five=None):
350 """Used to test argument passing over the wire"""
349 """Used to test argument passing over the wire"""
351 return b"%s %s %s %s %s" % (
350 return b"%s %s %s %s %s" % (
352 one,
351 one,
353 two,
352 two,
354 pycompat.bytestr(three),
353 pycompat.bytestr(three),
355 pycompat.bytestr(four),
354 pycompat.bytestr(four),
356 pycompat.bytestr(five),
355 pycompat.bytestr(five),
357 )
356 )
358
357
359 def getbundle(
358 def getbundle(
360 self,
359 self,
361 source,
360 source,
362 heads=None,
361 heads=None,
363 common=None,
362 common=None,
364 bundlecaps=None,
363 bundlecaps=None,
365 remote_sidedata=None,
364 remote_sidedata=None,
366 **kwargs
365 **kwargs
367 ):
366 ):
368 chunks = exchange.getbundlechunks(
367 chunks = exchange.getbundlechunks(
369 self._repo,
368 self._repo,
370 source,
369 source,
371 heads=heads,
370 heads=heads,
372 common=common,
371 common=common,
373 bundlecaps=bundlecaps,
372 bundlecaps=bundlecaps,
374 remote_sidedata=remote_sidedata,
373 remote_sidedata=remote_sidedata,
375 **kwargs
374 **kwargs
376 )[1]
375 )[1]
377 cb = util.chunkbuffer(chunks)
376 cb = util.chunkbuffer(chunks)
378
377
379 if exchange.bundle2requested(bundlecaps):
378 if exchange.bundle2requested(bundlecaps):
380 # When requesting a bundle2, getbundle returns a stream to make the
379 # When requesting a bundle2, getbundle returns a stream to make the
381 # wire level function happier. We need to build a proper object
380 # wire level function happier. We need to build a proper object
382 # from it in local peer.
381 # from it in local peer.
383 return bundle2.getunbundler(self.ui, cb)
382 return bundle2.getunbundler(self.ui, cb)
384 else:
383 else:
385 return changegroup.getunbundler(b'01', cb, None)
384 return changegroup.getunbundler(b'01', cb, None)
386
385
387 def heads(self):
386 def heads(self):
388 return self._repo.heads()
387 return self._repo.heads()
389
388
390 def known(self, nodes):
389 def known(self, nodes):
391 return self._repo.known(nodes)
390 return self._repo.known(nodes)
392
391
393 def listkeys(self, namespace):
392 def listkeys(self, namespace):
394 return self._repo.listkeys(namespace)
393 return self._repo.listkeys(namespace)
395
394
396 def lookup(self, key):
395 def lookup(self, key):
397 return self._repo.lookup(key)
396 return self._repo.lookup(key)
398
397
399 def pushkey(self, namespace, key, old, new):
398 def pushkey(self, namespace, key, old, new):
400 return self._repo.pushkey(namespace, key, old, new)
399 return self._repo.pushkey(namespace, key, old, new)
401
400
402 def stream_out(self):
401 def stream_out(self):
403 raise error.Abort(_(b'cannot perform stream clone against local peer'))
402 raise error.Abort(_(b'cannot perform stream clone against local peer'))
404
403
405 def unbundle(self, bundle, heads, url):
404 def unbundle(self, bundle, heads, url):
406 """apply a bundle on a repo
405 """apply a bundle on a repo
407
406
408 This function handles the repo locking itself."""
407 This function handles the repo locking itself."""
409 try:
408 try:
410 try:
409 try:
411 bundle = exchange.readbundle(self.ui, bundle, None)
410 bundle = exchange.readbundle(self.ui, bundle, None)
412 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
411 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
413 if util.safehasattr(ret, b'getchunks'):
412 if util.safehasattr(ret, b'getchunks'):
414 # This is a bundle20 object, turn it into an unbundler.
413 # This is a bundle20 object, turn it into an unbundler.
415 # This little dance should be dropped eventually when the
414 # This little dance should be dropped eventually when the
416 # API is finally improved.
415 # API is finally improved.
417 stream = util.chunkbuffer(ret.getchunks())
416 stream = util.chunkbuffer(ret.getchunks())
418 ret = bundle2.getunbundler(self.ui, stream)
417 ret = bundle2.getunbundler(self.ui, stream)
419 return ret
418 return ret
420 except Exception as exc:
419 except Exception as exc:
421 # If the exception contains output salvaged from a bundle2
420 # If the exception contains output salvaged from a bundle2
422 # reply, we need to make sure it is printed before continuing
421 # reply, we need to make sure it is printed before continuing
423 # to fail. So we build a bundle2 with such output and consume
422 # to fail. So we build a bundle2 with such output and consume
424 # it directly.
423 # it directly.
425 #
424 #
426 # This is not very elegant but allows a "simple" solution for
425 # This is not very elegant but allows a "simple" solution for
427 # issue4594
426 # issue4594
428 output = getattr(exc, '_bundle2salvagedoutput', ())
427 output = getattr(exc, '_bundle2salvagedoutput', ())
429 if output:
428 if output:
430 bundler = bundle2.bundle20(self._repo.ui)
429 bundler = bundle2.bundle20(self._repo.ui)
431 for out in output:
430 for out in output:
432 bundler.addpart(out)
431 bundler.addpart(out)
433 stream = util.chunkbuffer(bundler.getchunks())
432 stream = util.chunkbuffer(bundler.getchunks())
434 b = bundle2.getunbundler(self.ui, stream)
433 b = bundle2.getunbundler(self.ui, stream)
435 bundle2.processbundle(self._repo, b)
434 bundle2.processbundle(self._repo, b)
436 raise
435 raise
437 except error.PushRaced as exc:
436 except error.PushRaced as exc:
438 raise error.ResponseError(
437 raise error.ResponseError(
439 _(b'push failed:'), stringutil.forcebytestr(exc)
438 _(b'push failed:'), stringutil.forcebytestr(exc)
440 )
439 )
441
440
442 # End of _basewirecommands interface.
441 # End of _basewirecommands interface.
443
442
444 # Begin of peer interface.
443 # Begin of peer interface.
445
444
446 def commandexecutor(self):
445 def commandexecutor(self):
447 return localcommandexecutor(self)
446 return localcommandexecutor(self)
448
447
449 # End of peer interface.
448 # End of peer interface.
450
449
451
450
452 @interfaceutil.implementer(repository.ipeerlegacycommands)
451 @interfaceutil.implementer(repository.ipeerlegacycommands)
453 class locallegacypeer(localpeer):
452 class locallegacypeer(localpeer):
454 """peer extension which implements legacy methods too; used for tests with
453 """peer extension which implements legacy methods too; used for tests with
455 restricted capabilities"""
454 restricted capabilities"""
456
455
457 def __init__(self, repo, path=None):
456 def __init__(self, repo, path=None):
458 super(locallegacypeer, self).__init__(repo, caps=legacycaps, path=path)
457 super(locallegacypeer, self).__init__(repo, caps=legacycaps, path=path)
459
458
460 # Begin of baselegacywirecommands interface.
459 # Begin of baselegacywirecommands interface.
461
460
462 def between(self, pairs):
461 def between(self, pairs):
463 return self._repo.between(pairs)
462 return self._repo.between(pairs)
464
463
465 def branches(self, nodes):
464 def branches(self, nodes):
466 return self._repo.branches(nodes)
465 return self._repo.branches(nodes)
467
466
468 def changegroup(self, nodes, source):
467 def changegroup(self, nodes, source):
469 outgoing = discovery.outgoing(
468 outgoing = discovery.outgoing(
470 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
469 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
471 )
470 )
472 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
471 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
473
472
474 def changegroupsubset(self, bases, heads, source):
473 def changegroupsubset(self, bases, heads, source):
475 outgoing = discovery.outgoing(
474 outgoing = discovery.outgoing(
476 self._repo, missingroots=bases, ancestorsof=heads
475 self._repo, missingroots=bases, ancestorsof=heads
477 )
476 )
478 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
477 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
479
478
480 # End of baselegacywirecommands interface.
479 # End of baselegacywirecommands interface.
481
480
482
481
483 # Functions receiving (ui, features) that extensions can register to impact
482 # Functions receiving (ui, features) that extensions can register to impact
484 # the ability to load repositories with custom requirements. Only
483 # the ability to load repositories with custom requirements. Only
485 # functions defined in loaded extensions are called.
484 # functions defined in loaded extensions are called.
486 #
485 #
487 # The function receives a set of requirement strings that the repository
486 # The function receives a set of requirement strings that the repository
488 # is capable of opening. Functions will typically add elements to the
487 # is capable of opening. Functions will typically add elements to the
489 # set to reflect that the extension knows how to handle that requirements.
488 # set to reflect that the extension knows how to handle that requirements.
490 featuresetupfuncs = set()
489 featuresetupfuncs = set()
491
490
492
491
493 def _getsharedvfs(hgvfs, requirements):
492 def _getsharedvfs(hgvfs, requirements):
494 """returns the vfs object pointing to root of shared source
493 """returns the vfs object pointing to root of shared source
495 repo for a shared repository
494 repo for a shared repository
496
495
497 hgvfs is vfs pointing at .hg/ of current repo (shared one)
496 hgvfs is vfs pointing at .hg/ of current repo (shared one)
498 requirements is a set of requirements of current repo (shared one)
497 requirements is a set of requirements of current repo (shared one)
499 """
498 """
500 # The ``shared`` or ``relshared`` requirements indicate the
499 # The ``shared`` or ``relshared`` requirements indicate the
501 # store lives in the path contained in the ``.hg/sharedpath`` file.
500 # store lives in the path contained in the ``.hg/sharedpath`` file.
502 # This is an absolute path for ``shared`` and relative to
501 # This is an absolute path for ``shared`` and relative to
503 # ``.hg/`` for ``relshared``.
502 # ``.hg/`` for ``relshared``.
504 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
503 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
505 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
504 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
506 sharedpath = util.normpath(hgvfs.join(sharedpath))
505 sharedpath = util.normpath(hgvfs.join(sharedpath))
507
506
508 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
507 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
509
508
510 if not sharedvfs.exists():
509 if not sharedvfs.exists():
511 raise error.RepoError(
510 raise error.RepoError(
512 _(b'.hg/sharedpath points to nonexistent directory %s')
511 _(b'.hg/sharedpath points to nonexistent directory %s')
513 % sharedvfs.base
512 % sharedvfs.base
514 )
513 )
515 return sharedvfs
514 return sharedvfs
516
515
517
516
518 def _readrequires(vfs, allowmissing):
517 def _readrequires(vfs, allowmissing):
519 """reads the require file present at root of this vfs
518 """reads the require file present at root of this vfs
520 and return a set of requirements
519 and return a set of requirements
521
520
522 If allowmissing is True, we suppress FileNotFoundError if raised"""
521 If allowmissing is True, we suppress FileNotFoundError if raised"""
523 # requires file contains a newline-delimited list of
522 # requires file contains a newline-delimited list of
524 # features/capabilities the opener (us) must have in order to use
523 # features/capabilities the opener (us) must have in order to use
525 # the repository. This file was introduced in Mercurial 0.9.2,
524 # the repository. This file was introduced in Mercurial 0.9.2,
526 # which means very old repositories may not have one. We assume
525 # which means very old repositories may not have one. We assume
527 # a missing file translates to no requirements.
526 # a missing file translates to no requirements.
528 read = vfs.tryread if allowmissing else vfs.read
527 read = vfs.tryread if allowmissing else vfs.read
529 return set(read(b'requires').splitlines())
528 return set(read(b'requires').splitlines())
530
529
531
530
532 def makelocalrepository(baseui, path: bytes, intents=None):
531 def makelocalrepository(baseui, path: bytes, intents=None):
533 """Create a local repository object.
532 """Create a local repository object.
534
533
535 Given arguments needed to construct a local repository, this function
534 Given arguments needed to construct a local repository, this function
536 performs various early repository loading functionality (such as
535 performs various early repository loading functionality (such as
537 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
536 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
538 the repository can be opened, derives a type suitable for representing
537 the repository can be opened, derives a type suitable for representing
539 that repository, and returns an instance of it.
538 that repository, and returns an instance of it.
540
539
541 The returned object conforms to the ``repository.completelocalrepository``
540 The returned object conforms to the ``repository.completelocalrepository``
542 interface.
541 interface.
543
542
544 The repository type is derived by calling a series of factory functions
543 The repository type is derived by calling a series of factory functions
545 for each aspect/interface of the final repository. These are defined by
544 for each aspect/interface of the final repository. These are defined by
546 ``REPO_INTERFACES``.
545 ``REPO_INTERFACES``.
547
546
548 Each factory function is called to produce a type implementing a specific
547 Each factory function is called to produce a type implementing a specific
549 interface. The cumulative list of returned types will be combined into a
548 interface. The cumulative list of returned types will be combined into a
550 new type and that type will be instantiated to represent the local
549 new type and that type will be instantiated to represent the local
551 repository.
550 repository.
552
551
553 The factory functions each receive various state that may be consulted
552 The factory functions each receive various state that may be consulted
554 as part of deriving a type.
553 as part of deriving a type.
555
554
556 Extensions should wrap these factory functions to customize repository type
555 Extensions should wrap these factory functions to customize repository type
557 creation. Note that an extension's wrapped function may be called even if
556 creation. Note that an extension's wrapped function may be called even if
558 that extension is not loaded for the repo being constructed. Extensions
557 that extension is not loaded for the repo being constructed. Extensions
559 should check if their ``__name__`` appears in the
558 should check if their ``__name__`` appears in the
560 ``extensionmodulenames`` set passed to the factory function and no-op if
559 ``extensionmodulenames`` set passed to the factory function and no-op if
561 not.
560 not.
562 """
561 """
563 ui = baseui.copy()
562 ui = baseui.copy()
564 # Prevent copying repo configuration.
563 # Prevent copying repo configuration.
565 ui.copy = baseui.copy
564 ui.copy = baseui.copy
566
565
567 # Working directory VFS rooted at repository root.
566 # Working directory VFS rooted at repository root.
568 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
567 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
569
568
570 # Main VFS for .hg/ directory.
569 # Main VFS for .hg/ directory.
571 hgpath = wdirvfs.join(b'.hg')
570 hgpath = wdirvfs.join(b'.hg')
572 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
571 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
573 # Whether this repository is shared one or not
572 # Whether this repository is shared one or not
574 shared = False
573 shared = False
575 # If this repository is shared, vfs pointing to shared repo
574 # If this repository is shared, vfs pointing to shared repo
576 sharedvfs = None
575 sharedvfs = None
577
576
578 # The .hg/ path should exist and should be a directory. All other
577 # The .hg/ path should exist and should be a directory. All other
579 # cases are errors.
578 # cases are errors.
580 if not hgvfs.isdir():
579 if not hgvfs.isdir():
581 try:
580 try:
582 hgvfs.stat()
581 hgvfs.stat()
583 except FileNotFoundError:
582 except FileNotFoundError:
584 pass
583 pass
585 except ValueError as e:
584 except ValueError as e:
586 # Can be raised on Python 3.8 when path is invalid.
585 # Can be raised on Python 3.8 when path is invalid.
587 raise error.Abort(
586 raise error.Abort(
588 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
587 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
589 )
588 )
590
589
591 raise error.RepoError(_(b'repository %s not found') % path)
590 raise error.RepoError(_(b'repository %s not found') % path)
592
591
593 requirements = _readrequires(hgvfs, True)
592 requirements = _readrequires(hgvfs, True)
594 shared = (
593 shared = (
595 requirementsmod.SHARED_REQUIREMENT in requirements
594 requirementsmod.SHARED_REQUIREMENT in requirements
596 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
595 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
597 )
596 )
598 storevfs = None
597 storevfs = None
599 if shared:
598 if shared:
600 # This is a shared repo
599 # This is a shared repo
601 sharedvfs = _getsharedvfs(hgvfs, requirements)
600 sharedvfs = _getsharedvfs(hgvfs, requirements)
602 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
601 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
603 else:
602 else:
604 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
603 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
605
604
606 # if .hg/requires contains the sharesafe requirement, it means
605 # if .hg/requires contains the sharesafe requirement, it means
607 # there exists a `.hg/store/requires` too and we should read it
606 # there exists a `.hg/store/requires` too and we should read it
608 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
607 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
609 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
608 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
610 # is not present, refer checkrequirementscompat() for that
609 # is not present, refer checkrequirementscompat() for that
611 #
610 #
612 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
611 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
613 # repository was shared the old way. We check the share source .hg/requires
612 # repository was shared the old way. We check the share source .hg/requires
614 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
613 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
615 # to be reshared
614 # to be reshared
616 hint = _(b"see `hg help config.format.use-share-safe` for more information")
615 hint = _(b"see `hg help config.format.use-share-safe` for more information")
617 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
616 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
618 if (
617 if (
619 shared
618 shared
620 and requirementsmod.SHARESAFE_REQUIREMENT
619 and requirementsmod.SHARESAFE_REQUIREMENT
621 not in _readrequires(sharedvfs, True)
620 not in _readrequires(sharedvfs, True)
622 ):
621 ):
623 mismatch_warn = ui.configbool(
622 mismatch_warn = ui.configbool(
624 b'share', b'safe-mismatch.source-not-safe.warn'
623 b'share', b'safe-mismatch.source-not-safe.warn'
625 )
624 )
626 mismatch_config = ui.config(
625 mismatch_config = ui.config(
627 b'share', b'safe-mismatch.source-not-safe'
626 b'share', b'safe-mismatch.source-not-safe'
628 )
627 )
629 mismatch_verbose_upgrade = ui.configbool(
628 mismatch_verbose_upgrade = ui.configbool(
630 b'share', b'safe-mismatch.source-not-safe:verbose-upgrade'
629 b'share', b'safe-mismatch.source-not-safe:verbose-upgrade'
631 )
630 )
632 if mismatch_config in (
631 if mismatch_config in (
633 b'downgrade-allow',
632 b'downgrade-allow',
634 b'allow',
633 b'allow',
635 b'downgrade-abort',
634 b'downgrade-abort',
636 ):
635 ):
637 # prevent cyclic import localrepo -> upgrade -> localrepo
636 # prevent cyclic import localrepo -> upgrade -> localrepo
638 from . import upgrade
637 from . import upgrade
639
638
640 upgrade.downgrade_share_to_non_safe(
639 upgrade.downgrade_share_to_non_safe(
641 ui,
640 ui,
642 hgvfs,
641 hgvfs,
643 sharedvfs,
642 sharedvfs,
644 requirements,
643 requirements,
645 mismatch_config,
644 mismatch_config,
646 mismatch_warn,
645 mismatch_warn,
647 mismatch_verbose_upgrade,
646 mismatch_verbose_upgrade,
648 )
647 )
649 elif mismatch_config == b'abort':
648 elif mismatch_config == b'abort':
650 raise error.Abort(
649 raise error.Abort(
651 _(b"share source does not support share-safe requirement"),
650 _(b"share source does not support share-safe requirement"),
652 hint=hint,
651 hint=hint,
653 )
652 )
654 else:
653 else:
655 raise error.Abort(
654 raise error.Abort(
656 _(
655 _(
657 b"share-safe mismatch with source.\nUnrecognized"
656 b"share-safe mismatch with source.\nUnrecognized"
658 b" value '%s' of `share.safe-mismatch.source-not-safe`"
657 b" value '%s' of `share.safe-mismatch.source-not-safe`"
659 b" set."
658 b" set."
660 )
659 )
661 % mismatch_config,
660 % mismatch_config,
662 hint=hint,
661 hint=hint,
663 )
662 )
664 else:
663 else:
665 requirements |= _readrequires(storevfs, False)
664 requirements |= _readrequires(storevfs, False)
666 elif shared:
665 elif shared:
667 sourcerequires = _readrequires(sharedvfs, False)
666 sourcerequires = _readrequires(sharedvfs, False)
668 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
667 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
669 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
668 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
670 mismatch_warn = ui.configbool(
669 mismatch_warn = ui.configbool(
671 b'share', b'safe-mismatch.source-safe.warn'
670 b'share', b'safe-mismatch.source-safe.warn'
672 )
671 )
673 mismatch_verbose_upgrade = ui.configbool(
672 mismatch_verbose_upgrade = ui.configbool(
674 b'share', b'safe-mismatch.source-safe:verbose-upgrade'
673 b'share', b'safe-mismatch.source-safe:verbose-upgrade'
675 )
674 )
676 if mismatch_config in (
675 if mismatch_config in (
677 b'upgrade-allow',
676 b'upgrade-allow',
678 b'allow',
677 b'allow',
679 b'upgrade-abort',
678 b'upgrade-abort',
680 ):
679 ):
681 # prevent cyclic import localrepo -> upgrade -> localrepo
680 # prevent cyclic import localrepo -> upgrade -> localrepo
682 from . import upgrade
681 from . import upgrade
683
682
684 upgrade.upgrade_share_to_safe(
683 upgrade.upgrade_share_to_safe(
685 ui,
684 ui,
686 hgvfs,
685 hgvfs,
687 storevfs,
686 storevfs,
688 requirements,
687 requirements,
689 mismatch_config,
688 mismatch_config,
690 mismatch_warn,
689 mismatch_warn,
691 mismatch_verbose_upgrade,
690 mismatch_verbose_upgrade,
692 )
691 )
693 elif mismatch_config == b'abort':
692 elif mismatch_config == b'abort':
694 raise error.Abort(
693 raise error.Abort(
695 _(
694 _(
696 b'version mismatch: source uses share-safe'
695 b'version mismatch: source uses share-safe'
697 b' functionality while the current share does not'
696 b' functionality while the current share does not'
698 ),
697 ),
699 hint=hint,
698 hint=hint,
700 )
699 )
701 else:
700 else:
702 raise error.Abort(
701 raise error.Abort(
703 _(
702 _(
704 b"share-safe mismatch with source.\nUnrecognized"
703 b"share-safe mismatch with source.\nUnrecognized"
705 b" value '%s' of `share.safe-mismatch.source-safe` set."
704 b" value '%s' of `share.safe-mismatch.source-safe` set."
706 )
705 )
707 % mismatch_config,
706 % mismatch_config,
708 hint=hint,
707 hint=hint,
709 )
708 )
710
709
711 # The .hg/hgrc file may load extensions or contain config options
710 # The .hg/hgrc file may load extensions or contain config options
712 # that influence repository construction. Attempt to load it and
711 # that influence repository construction. Attempt to load it and
713 # process any new extensions that it may have pulled in.
712 # process any new extensions that it may have pulled in.
714 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
713 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
715 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
714 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
716 extensions.loadall(ui)
715 extensions.loadall(ui)
717 extensions.populateui(ui)
716 extensions.populateui(ui)
718
717
719 # Set of module names of extensions loaded for this repository.
718 # Set of module names of extensions loaded for this repository.
720 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
719 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
721
720
722 supportedrequirements = gathersupportedrequirements(ui)
721 supportedrequirements = gathersupportedrequirements(ui)
723
722
724 # We first validate the requirements are known.
723 # We first validate the requirements are known.
725 ensurerequirementsrecognized(requirements, supportedrequirements)
724 ensurerequirementsrecognized(requirements, supportedrequirements)
726
725
727 # Then we validate that the known set is reasonable to use together.
726 # Then we validate that the known set is reasonable to use together.
728 ensurerequirementscompatible(ui, requirements)
727 ensurerequirementscompatible(ui, requirements)
729
728
730 # TODO there are unhandled edge cases related to opening repositories with
729 # TODO there are unhandled edge cases related to opening repositories with
731 # shared storage. If storage is shared, we should also test for requirements
730 # shared storage. If storage is shared, we should also test for requirements
732 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
731 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
733 # that repo, as that repo may load extensions needed to open it. This is a
732 # that repo, as that repo may load extensions needed to open it. This is a
734 # bit complicated because we don't want the other hgrc to overwrite settings
733 # bit complicated because we don't want the other hgrc to overwrite settings
735 # in this hgrc.
734 # in this hgrc.
736 #
735 #
737 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
736 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
738 # file when sharing repos. But if a requirement is added after the share is
737 # file when sharing repos. But if a requirement is added after the share is
739 # performed, thereby introducing a new requirement for the opener, we may
738 # performed, thereby introducing a new requirement for the opener, we may
740 # will not see that and could encounter a run-time error interacting with
739 # will not see that and could encounter a run-time error interacting with
741 # that shared store since it has an unknown-to-us requirement.
740 # that shared store since it has an unknown-to-us requirement.
742
741
743 # At this point, we know we should be capable of opening the repository.
742 # At this point, we know we should be capable of opening the repository.
744 # Now get on with doing that.
743 # Now get on with doing that.
745
744
746 features = set()
745 features = set()
747
746
748 # The "store" part of the repository holds versioned data. How it is
747 # The "store" part of the repository holds versioned data. How it is
749 # accessed is determined by various requirements. If `shared` or
748 # accessed is determined by various requirements. If `shared` or
750 # `relshared` requirements are present, this indicates current repository
749 # `relshared` requirements are present, this indicates current repository
751 # is a share and store exists in path mentioned in `.hg/sharedpath`
750 # is a share and store exists in path mentioned in `.hg/sharedpath`
752 if shared:
751 if shared:
753 storebasepath = sharedvfs.base
752 storebasepath = sharedvfs.base
754 cachepath = sharedvfs.join(b'cache')
753 cachepath = sharedvfs.join(b'cache')
755 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
754 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
756 else:
755 else:
757 storebasepath = hgvfs.base
756 storebasepath = hgvfs.base
758 cachepath = hgvfs.join(b'cache')
757 cachepath = hgvfs.join(b'cache')
759 wcachepath = hgvfs.join(b'wcache')
758 wcachepath = hgvfs.join(b'wcache')
760
759
761 # The store has changed over time and the exact layout is dictated by
760 # The store has changed over time and the exact layout is dictated by
762 # requirements. The store interface abstracts differences across all
761 # requirements. The store interface abstracts differences across all
763 # of them.
762 # of them.
764 store = makestore(
763 store = makestore(
765 requirements,
764 requirements,
766 storebasepath,
765 storebasepath,
767 lambda base: vfsmod.vfs(base, cacheaudited=True),
766 lambda base: vfsmod.vfs(base, cacheaudited=True),
768 )
767 )
769 hgvfs.createmode = store.createmode
768 hgvfs.createmode = store.createmode
770
769
771 storevfs = store.vfs
770 storevfs = store.vfs
772 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
771 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
773
772
774 if (
773 if (
775 requirementsmod.REVLOGV2_REQUIREMENT in requirements
774 requirementsmod.REVLOGV2_REQUIREMENT in requirements
776 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
775 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
777 ):
776 ):
778 features.add(repository.REPO_FEATURE_SIDE_DATA)
777 features.add(repository.REPO_FEATURE_SIDE_DATA)
779 # the revlogv2 docket introduced race condition that we need to fix
778 # the revlogv2 docket introduced race condition that we need to fix
780 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
779 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
781
780
782 # The cache vfs is used to manage cache files.
781 # The cache vfs is used to manage cache files.
783 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
782 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
784 cachevfs.createmode = store.createmode
783 cachevfs.createmode = store.createmode
785 # The cache vfs is used to manage cache files related to the working copy
784 # The cache vfs is used to manage cache files related to the working copy
786 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
785 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
787 wcachevfs.createmode = store.createmode
786 wcachevfs.createmode = store.createmode
788
787
789 # Now resolve the type for the repository object. We do this by repeatedly
788 # Now resolve the type for the repository object. We do this by repeatedly
790 # calling a factory function to produces types for specific aspects of the
789 # calling a factory function to produces types for specific aspects of the
791 # repo's operation. The aggregate returned types are used as base classes
790 # repo's operation. The aggregate returned types are used as base classes
792 # for a dynamically-derived type, which will represent our new repository.
791 # for a dynamically-derived type, which will represent our new repository.
793
792
794 bases = []
793 bases = []
795 extrastate = {}
794 extrastate = {}
796
795
797 for iface, fn in REPO_INTERFACES:
796 for iface, fn in REPO_INTERFACES:
798 # We pass all potentially useful state to give extensions tons of
797 # We pass all potentially useful state to give extensions tons of
799 # flexibility.
798 # flexibility.
800 typ = fn()(
799 typ = fn()(
801 ui=ui,
800 ui=ui,
802 intents=intents,
801 intents=intents,
803 requirements=requirements,
802 requirements=requirements,
804 features=features,
803 features=features,
805 wdirvfs=wdirvfs,
804 wdirvfs=wdirvfs,
806 hgvfs=hgvfs,
805 hgvfs=hgvfs,
807 store=store,
806 store=store,
808 storevfs=storevfs,
807 storevfs=storevfs,
809 storeoptions=storevfs.options,
808 storeoptions=storevfs.options,
810 cachevfs=cachevfs,
809 cachevfs=cachevfs,
811 wcachevfs=wcachevfs,
810 wcachevfs=wcachevfs,
812 extensionmodulenames=extensionmodulenames,
811 extensionmodulenames=extensionmodulenames,
813 extrastate=extrastate,
812 extrastate=extrastate,
814 baseclasses=bases,
813 baseclasses=bases,
815 )
814 )
816
815
817 if not isinstance(typ, type):
816 if not isinstance(typ, type):
818 raise error.ProgrammingError(
817 raise error.ProgrammingError(
819 b'unable to construct type for %s' % iface
818 b'unable to construct type for %s' % iface
820 )
819 )
821
820
822 bases.append(typ)
821 bases.append(typ)
823
822
824 # type() allows you to use characters in type names that wouldn't be
823 # type() allows you to use characters in type names that wouldn't be
825 # recognized as Python symbols in source code. We abuse that to add
824 # recognized as Python symbols in source code. We abuse that to add
826 # rich information about our constructed repo.
825 # rich information about our constructed repo.
827 name = pycompat.sysstr(
826 name = pycompat.sysstr(
828 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
827 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
829 )
828 )
830
829
831 cls = type(name, tuple(bases), {})
830 cls = type(name, tuple(bases), {})
832
831
833 return cls(
832 return cls(
834 baseui=baseui,
833 baseui=baseui,
835 ui=ui,
834 ui=ui,
836 origroot=path,
835 origroot=path,
837 wdirvfs=wdirvfs,
836 wdirvfs=wdirvfs,
838 hgvfs=hgvfs,
837 hgvfs=hgvfs,
839 requirements=requirements,
838 requirements=requirements,
840 supportedrequirements=supportedrequirements,
839 supportedrequirements=supportedrequirements,
841 sharedpath=storebasepath,
840 sharedpath=storebasepath,
842 store=store,
841 store=store,
843 cachevfs=cachevfs,
842 cachevfs=cachevfs,
844 wcachevfs=wcachevfs,
843 wcachevfs=wcachevfs,
845 features=features,
844 features=features,
846 intents=intents,
845 intents=intents,
847 )
846 )
848
847
849
848
850 def loadhgrc(
849 def loadhgrc(
851 ui,
850 ui,
852 wdirvfs: vfsmod.vfs,
851 wdirvfs: vfsmod.vfs,
853 hgvfs: vfsmod.vfs,
852 hgvfs: vfsmod.vfs,
854 requirements,
853 requirements,
855 sharedvfs: Optional[vfsmod.vfs] = None,
854 sharedvfs: Optional[vfsmod.vfs] = None,
856 ):
855 ):
857 """Load hgrc files/content into a ui instance.
856 """Load hgrc files/content into a ui instance.
858
857
859 This is called during repository opening to load any additional
858 This is called during repository opening to load any additional
860 config files or settings relevant to the current repository.
859 config files or settings relevant to the current repository.
861
860
862 Returns a bool indicating whether any additional configs were loaded.
861 Returns a bool indicating whether any additional configs were loaded.
863
862
864 Extensions should monkeypatch this function to modify how per-repo
863 Extensions should monkeypatch this function to modify how per-repo
865 configs are loaded. For example, an extension may wish to pull in
864 configs are loaded. For example, an extension may wish to pull in
866 configs from alternate files or sources.
865 configs from alternate files or sources.
867
866
868 sharedvfs is vfs object pointing to source repo if the current one is a
867 sharedvfs is vfs object pointing to source repo if the current one is a
869 shared one
868 shared one
870 """
869 """
871 if not rcutil.use_repo_hgrc():
870 if not rcutil.use_repo_hgrc():
872 return False
871 return False
873
872
874 ret = False
873 ret = False
875 # first load config from shared source if we has to
874 # first load config from shared source if we has to
876 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
875 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
877 try:
876 try:
878 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
877 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
879 ret = True
878 ret = True
880 except IOError:
879 except IOError:
881 pass
880 pass
882
881
883 try:
882 try:
884 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
883 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
885 ret = True
884 ret = True
886 except IOError:
885 except IOError:
887 pass
886 pass
888
887
889 try:
888 try:
890 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
889 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
891 ret = True
890 ret = True
892 except IOError:
891 except IOError:
893 pass
892 pass
894
893
895 return ret
894 return ret
896
895
897
896
898 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
897 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
899 """Perform additional actions after .hg/hgrc is loaded.
898 """Perform additional actions after .hg/hgrc is loaded.
900
899
901 This function is called during repository loading immediately after
900 This function is called during repository loading immediately after
902 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
901 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
903
902
904 The function can be used to validate configs, automatically add
903 The function can be used to validate configs, automatically add
905 options (including extensions) based on requirements, etc.
904 options (including extensions) based on requirements, etc.
906 """
905 """
907
906
908 # Map of requirements to list of extensions to load automatically when
907 # Map of requirements to list of extensions to load automatically when
909 # requirement is present.
908 # requirement is present.
910 autoextensions = {
909 autoextensions = {
911 b'git': [b'git'],
910 b'git': [b'git'],
912 b'largefiles': [b'largefiles'],
911 b'largefiles': [b'largefiles'],
913 b'lfs': [b'lfs'],
912 b'lfs': [b'lfs'],
914 }
913 }
915
914
916 for requirement, names in sorted(autoextensions.items()):
915 for requirement, names in sorted(autoextensions.items()):
917 if requirement not in requirements:
916 if requirement not in requirements:
918 continue
917 continue
919
918
920 for name in names:
919 for name in names:
921 if not ui.hasconfig(b'extensions', name):
920 if not ui.hasconfig(b'extensions', name):
922 ui.setconfig(b'extensions', name, b'', source=b'autoload')
921 ui.setconfig(b'extensions', name, b'', source=b'autoload')
923
922
924
923
925 def gathersupportedrequirements(ui):
924 def gathersupportedrequirements(ui):
926 """Determine the complete set of recognized requirements."""
925 """Determine the complete set of recognized requirements."""
927 # Start with all requirements supported by this file.
926 # Start with all requirements supported by this file.
928 supported = set(localrepository._basesupported)
927 supported = set(localrepository._basesupported)
929
928
930 # Execute ``featuresetupfuncs`` entries if they belong to an extension
929 # Execute ``featuresetupfuncs`` entries if they belong to an extension
931 # relevant to this ui instance.
930 # relevant to this ui instance.
932 modules = {m.__name__ for n, m in extensions.extensions(ui)}
931 modules = {m.__name__ for n, m in extensions.extensions(ui)}
933
932
934 for fn in featuresetupfuncs:
933 for fn in featuresetupfuncs:
935 if fn.__module__ in modules:
934 if fn.__module__ in modules:
936 fn(ui, supported)
935 fn(ui, supported)
937
936
938 # Add derived requirements from registered compression engines.
937 # Add derived requirements from registered compression engines.
939 for name in util.compengines:
938 for name in util.compengines:
940 engine = util.compengines[name]
939 engine = util.compengines[name]
941 if engine.available() and engine.revlogheader():
940 if engine.available() and engine.revlogheader():
942 supported.add(b'exp-compression-%s' % name)
941 supported.add(b'exp-compression-%s' % name)
943 if engine.name() == b'zstd':
942 if engine.name() == b'zstd':
944 supported.add(requirementsmod.REVLOG_COMPRESSION_ZSTD)
943 supported.add(requirementsmod.REVLOG_COMPRESSION_ZSTD)
945
944
946 return supported
945 return supported
947
946
948
947
949 def ensurerequirementsrecognized(requirements, supported):
948 def ensurerequirementsrecognized(requirements, supported):
950 """Validate that a set of local requirements is recognized.
949 """Validate that a set of local requirements is recognized.
951
950
952 Receives a set of requirements. Raises an ``error.RepoError`` if there
951 Receives a set of requirements. Raises an ``error.RepoError`` if there
953 exists any requirement in that set that currently loaded code doesn't
952 exists any requirement in that set that currently loaded code doesn't
954 recognize.
953 recognize.
955
954
956 Returns a set of supported requirements.
955 Returns a set of supported requirements.
957 """
956 """
958 missing = set()
957 missing = set()
959
958
960 for requirement in requirements:
959 for requirement in requirements:
961 if requirement in supported:
960 if requirement in supported:
962 continue
961 continue
963
962
964 if not requirement or not requirement[0:1].isalnum():
963 if not requirement or not requirement[0:1].isalnum():
965 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
964 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
966
965
967 missing.add(requirement)
966 missing.add(requirement)
968
967
969 if missing:
968 if missing:
970 raise error.RequirementError(
969 raise error.RequirementError(
971 _(b'repository requires features unknown to this Mercurial: %s')
970 _(b'repository requires features unknown to this Mercurial: %s')
972 % b' '.join(sorted(missing)),
971 % b' '.join(sorted(missing)),
973 hint=_(
972 hint=_(
974 b'see https://mercurial-scm.org/wiki/MissingRequirement '
973 b'see https://mercurial-scm.org/wiki/MissingRequirement '
975 b'for more information'
974 b'for more information'
976 ),
975 ),
977 )
976 )
978
977
979
978
980 def ensurerequirementscompatible(ui, requirements):
979 def ensurerequirementscompatible(ui, requirements):
981 """Validates that a set of recognized requirements is mutually compatible.
980 """Validates that a set of recognized requirements is mutually compatible.
982
981
983 Some requirements may not be compatible with others or require
982 Some requirements may not be compatible with others or require
984 config options that aren't enabled. This function is called during
983 config options that aren't enabled. This function is called during
985 repository opening to ensure that the set of requirements needed
984 repository opening to ensure that the set of requirements needed
986 to open a repository is sane and compatible with config options.
985 to open a repository is sane and compatible with config options.
987
986
988 Extensions can monkeypatch this function to perform additional
987 Extensions can monkeypatch this function to perform additional
989 checking.
988 checking.
990
989
991 ``error.RepoError`` should be raised on failure.
990 ``error.RepoError`` should be raised on failure.
992 """
991 """
993 if (
992 if (
994 requirementsmod.SPARSE_REQUIREMENT in requirements
993 requirementsmod.SPARSE_REQUIREMENT in requirements
995 and not sparse.enabled
994 and not sparse.enabled
996 ):
995 ):
997 raise error.RepoError(
996 raise error.RepoError(
998 _(
997 _(
999 b'repository is using sparse feature but '
998 b'repository is using sparse feature but '
1000 b'sparse is not enabled; enable the '
999 b'sparse is not enabled; enable the '
1001 b'"sparse" extensions to access'
1000 b'"sparse" extensions to access'
1002 )
1001 )
1003 )
1002 )
1004
1003
1005
1004
1006 def makestore(requirements, path, vfstype):
1005 def makestore(requirements, path, vfstype):
1007 """Construct a storage object for a repository."""
1006 """Construct a storage object for a repository."""
1008 if requirementsmod.STORE_REQUIREMENT in requirements:
1007 if requirementsmod.STORE_REQUIREMENT in requirements:
1009 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
1008 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
1010 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1009 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1011 return storemod.fncachestore(path, vfstype, dotencode)
1010 return storemod.fncachestore(path, vfstype, dotencode)
1012
1011
1013 return storemod.encodedstore(path, vfstype)
1012 return storemod.encodedstore(path, vfstype)
1014
1013
1015 return storemod.basicstore(path, vfstype)
1014 return storemod.basicstore(path, vfstype)
1016
1015
1017
1016
1018 def resolvestorevfsoptions(ui, requirements, features):
1017 def resolvestorevfsoptions(ui, requirements, features):
1019 """Resolve the options to pass to the store vfs opener.
1018 """Resolve the options to pass to the store vfs opener.
1020
1019
1021 The returned dict is used to influence behavior of the storage layer.
1020 The returned dict is used to influence behavior of the storage layer.
1022 """
1021 """
1023 options = {}
1022 options = {}
1024
1023
1025 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1024 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1026 options[b'treemanifest'] = True
1025 options[b'treemanifest'] = True
1027
1026
1028 # experimental config: format.manifestcachesize
1027 # experimental config: format.manifestcachesize
1029 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1028 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1030 if manifestcachesize is not None:
1029 if manifestcachesize is not None:
1031 options[b'manifestcachesize'] = manifestcachesize
1030 options[b'manifestcachesize'] = manifestcachesize
1032
1031
1033 # In the absence of another requirement superseding a revlog-related
1032 # In the absence of another requirement superseding a revlog-related
1034 # requirement, we have to assume the repo is using revlog version 0.
1033 # requirement, we have to assume the repo is using revlog version 0.
1035 # This revlog format is super old and we don't bother trying to parse
1034 # This revlog format is super old and we don't bother trying to parse
1036 # opener options for it because those options wouldn't do anything
1035 # opener options for it because those options wouldn't do anything
1037 # meaningful on such old repos.
1036 # meaningful on such old repos.
1038 if (
1037 if (
1039 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1038 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1040 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1039 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1041 ):
1040 ):
1042 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1041 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1043 else: # explicitly mark repo as using revlogv0
1042 else: # explicitly mark repo as using revlogv0
1044 options[b'revlogv0'] = True
1043 options[b'revlogv0'] = True
1045
1044
1046 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1045 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1047 options[b'copies-storage'] = b'changeset-sidedata'
1046 options[b'copies-storage'] = b'changeset-sidedata'
1048 else:
1047 else:
1049 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1048 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1050 copiesextramode = (b'changeset-only', b'compatibility')
1049 copiesextramode = (b'changeset-only', b'compatibility')
1051 if writecopiesto in copiesextramode:
1050 if writecopiesto in copiesextramode:
1052 options[b'copies-storage'] = b'extra'
1051 options[b'copies-storage'] = b'extra'
1053
1052
1054 return options
1053 return options
1055
1054
1056
1055
1057 def resolverevlogstorevfsoptions(ui, requirements, features):
1056 def resolverevlogstorevfsoptions(ui, requirements, features):
1058 """Resolve opener options specific to revlogs."""
1057 """Resolve opener options specific to revlogs."""
1059
1058
1060 options = {}
1059 options = {}
1061 options[b'flagprocessors'] = {}
1060 options[b'flagprocessors'] = {}
1062
1061
1063 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1062 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1064 options[b'revlogv1'] = True
1063 options[b'revlogv1'] = True
1065 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1064 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1066 options[b'revlogv2'] = True
1065 options[b'revlogv2'] = True
1067 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1066 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1068 options[b'changelogv2'] = True
1067 options[b'changelogv2'] = True
1069 cmp_rank = ui.configbool(b'experimental', b'changelog-v2.compute-rank')
1068 cmp_rank = ui.configbool(b'experimental', b'changelog-v2.compute-rank')
1070 options[b'changelogv2.compute-rank'] = cmp_rank
1069 options[b'changelogv2.compute-rank'] = cmp_rank
1071
1070
1072 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1071 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1073 options[b'generaldelta'] = True
1072 options[b'generaldelta'] = True
1074
1073
1075 # experimental config: format.chunkcachesize
1074 # experimental config: format.chunkcachesize
1076 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1075 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1077 if chunkcachesize is not None:
1076 if chunkcachesize is not None:
1078 options[b'chunkcachesize'] = chunkcachesize
1077 options[b'chunkcachesize'] = chunkcachesize
1079
1078
1080 deltabothparents = ui.configbool(
1079 deltabothparents = ui.configbool(
1081 b'storage', b'revlog.optimize-delta-parent-choice'
1080 b'storage', b'revlog.optimize-delta-parent-choice'
1082 )
1081 )
1083 options[b'deltabothparents'] = deltabothparents
1082 options[b'deltabothparents'] = deltabothparents
1084 dps_cgds = ui.configint(
1083 dps_cgds = ui.configint(
1085 b'storage',
1084 b'storage',
1086 b'revlog.delta-parent-search.candidate-group-chunk-size',
1085 b'revlog.delta-parent-search.candidate-group-chunk-size',
1087 )
1086 )
1088 options[b'delta-parent-search.candidate-group-chunk-size'] = dps_cgds
1087 options[b'delta-parent-search.candidate-group-chunk-size'] = dps_cgds
1089 options[b'debug-delta'] = ui.configbool(b'debug', b'revlog.debug-delta')
1088 options[b'debug-delta'] = ui.configbool(b'debug', b'revlog.debug-delta')
1090
1089
1091 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1090 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1092 options[b'issue6528.fix-incoming'] = issue6528
1091 options[b'issue6528.fix-incoming'] = issue6528
1093
1092
1094 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1093 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1095 lazydeltabase = False
1094 lazydeltabase = False
1096 if lazydelta:
1095 if lazydelta:
1097 lazydeltabase = ui.configbool(
1096 lazydeltabase = ui.configbool(
1098 b'storage', b'revlog.reuse-external-delta-parent'
1097 b'storage', b'revlog.reuse-external-delta-parent'
1099 )
1098 )
1100 if lazydeltabase is None:
1099 if lazydeltabase is None:
1101 lazydeltabase = not scmutil.gddeltaconfig(ui)
1100 lazydeltabase = not scmutil.gddeltaconfig(ui)
1102 options[b'lazydelta'] = lazydelta
1101 options[b'lazydelta'] = lazydelta
1103 options[b'lazydeltabase'] = lazydeltabase
1102 options[b'lazydeltabase'] = lazydeltabase
1104
1103
1105 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1104 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1106 if 0 <= chainspan:
1105 if 0 <= chainspan:
1107 options[b'maxdeltachainspan'] = chainspan
1106 options[b'maxdeltachainspan'] = chainspan
1108
1107
1109 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1108 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1110 if mmapindexthreshold is not None:
1109 if mmapindexthreshold is not None:
1111 options[b'mmapindexthreshold'] = mmapindexthreshold
1110 options[b'mmapindexthreshold'] = mmapindexthreshold
1112
1111
1113 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1112 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1114 srdensitythres = float(
1113 srdensitythres = float(
1115 ui.config(b'experimental', b'sparse-read.density-threshold')
1114 ui.config(b'experimental', b'sparse-read.density-threshold')
1116 )
1115 )
1117 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1116 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1118 options[b'with-sparse-read'] = withsparseread
1117 options[b'with-sparse-read'] = withsparseread
1119 options[b'sparse-read-density-threshold'] = srdensitythres
1118 options[b'sparse-read-density-threshold'] = srdensitythres
1120 options[b'sparse-read-min-gap-size'] = srmingapsize
1119 options[b'sparse-read-min-gap-size'] = srmingapsize
1121
1120
1122 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1121 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1123 options[b'sparse-revlog'] = sparserevlog
1122 options[b'sparse-revlog'] = sparserevlog
1124 if sparserevlog:
1123 if sparserevlog:
1125 options[b'generaldelta'] = True
1124 options[b'generaldelta'] = True
1126
1125
1127 maxchainlen = None
1126 maxchainlen = None
1128 if sparserevlog:
1127 if sparserevlog:
1129 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1128 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1130 # experimental config: format.maxchainlen
1129 # experimental config: format.maxchainlen
1131 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1130 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1132 if maxchainlen is not None:
1131 if maxchainlen is not None:
1133 options[b'maxchainlen'] = maxchainlen
1132 options[b'maxchainlen'] = maxchainlen
1134
1133
1135 for r in requirements:
1134 for r in requirements:
1136 # we allow multiple compression engine requirement to co-exist because
1135 # we allow multiple compression engine requirement to co-exist because
1137 # strickly speaking, revlog seems to support mixed compression style.
1136 # strickly speaking, revlog seems to support mixed compression style.
1138 #
1137 #
1139 # The compression used for new entries will be "the last one"
1138 # The compression used for new entries will be "the last one"
1140 prefix = r.startswith
1139 prefix = r.startswith
1141 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1140 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1142 options[b'compengine'] = r.split(b'-', 2)[2]
1141 options[b'compengine'] = r.split(b'-', 2)[2]
1143
1142
1144 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1143 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1145 if options[b'zlib.level'] is not None:
1144 if options[b'zlib.level'] is not None:
1146 if not (0 <= options[b'zlib.level'] <= 9):
1145 if not (0 <= options[b'zlib.level'] <= 9):
1147 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1146 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1148 raise error.Abort(msg % options[b'zlib.level'])
1147 raise error.Abort(msg % options[b'zlib.level'])
1149 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1148 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1150 if options[b'zstd.level'] is not None:
1149 if options[b'zstd.level'] is not None:
1151 if not (0 <= options[b'zstd.level'] <= 22):
1150 if not (0 <= options[b'zstd.level'] <= 22):
1152 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1151 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1153 raise error.Abort(msg % options[b'zstd.level'])
1152 raise error.Abort(msg % options[b'zstd.level'])
1154
1153
1155 if requirementsmod.NARROW_REQUIREMENT in requirements:
1154 if requirementsmod.NARROW_REQUIREMENT in requirements:
1156 options[b'enableellipsis'] = True
1155 options[b'enableellipsis'] = True
1157
1156
1158 if ui.configbool(b'experimental', b'rust.index'):
1157 if ui.configbool(b'experimental', b'rust.index'):
1159 options[b'rust.index'] = True
1158 options[b'rust.index'] = True
1160 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1159 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1161 slow_path = ui.config(
1160 slow_path = ui.config(
1162 b'storage', b'revlog.persistent-nodemap.slow-path'
1161 b'storage', b'revlog.persistent-nodemap.slow-path'
1163 )
1162 )
1164 if slow_path not in (b'allow', b'warn', b'abort'):
1163 if slow_path not in (b'allow', b'warn', b'abort'):
1165 default = ui.config_default(
1164 default = ui.config_default(
1166 b'storage', b'revlog.persistent-nodemap.slow-path'
1165 b'storage', b'revlog.persistent-nodemap.slow-path'
1167 )
1166 )
1168 msg = _(
1167 msg = _(
1169 b'unknown value for config '
1168 b'unknown value for config '
1170 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1169 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1171 )
1170 )
1172 ui.warn(msg % slow_path)
1171 ui.warn(msg % slow_path)
1173 if not ui.quiet:
1172 if not ui.quiet:
1174 ui.warn(_(b'falling back to default value: %s\n') % default)
1173 ui.warn(_(b'falling back to default value: %s\n') % default)
1175 slow_path = default
1174 slow_path = default
1176
1175
1177 msg = _(
1176 msg = _(
1178 b"accessing `persistent-nodemap` repository without associated "
1177 b"accessing `persistent-nodemap` repository without associated "
1179 b"fast implementation."
1178 b"fast implementation."
1180 )
1179 )
1181 hint = _(
1180 hint = _(
1182 b"check `hg help config.format.use-persistent-nodemap` "
1181 b"check `hg help config.format.use-persistent-nodemap` "
1183 b"for details"
1182 b"for details"
1184 )
1183 )
1185 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1184 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1186 if slow_path == b'warn':
1185 if slow_path == b'warn':
1187 msg = b"warning: " + msg + b'\n'
1186 msg = b"warning: " + msg + b'\n'
1188 ui.warn(msg)
1187 ui.warn(msg)
1189 if not ui.quiet:
1188 if not ui.quiet:
1190 hint = b'(' + hint + b')\n'
1189 hint = b'(' + hint + b')\n'
1191 ui.warn(hint)
1190 ui.warn(hint)
1192 if slow_path == b'abort':
1191 if slow_path == b'abort':
1193 raise error.Abort(msg, hint=hint)
1192 raise error.Abort(msg, hint=hint)
1194 options[b'persistent-nodemap'] = True
1193 options[b'persistent-nodemap'] = True
1195 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1194 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1196 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1195 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1197 if slow_path not in (b'allow', b'warn', b'abort'):
1196 if slow_path not in (b'allow', b'warn', b'abort'):
1198 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1197 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1199 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1198 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1200 ui.warn(msg % slow_path)
1199 ui.warn(msg % slow_path)
1201 if not ui.quiet:
1200 if not ui.quiet:
1202 ui.warn(_(b'falling back to default value: %s\n') % default)
1201 ui.warn(_(b'falling back to default value: %s\n') % default)
1203 slow_path = default
1202 slow_path = default
1204
1203
1205 msg = _(
1204 msg = _(
1206 b"accessing `dirstate-v2` repository without associated "
1205 b"accessing `dirstate-v2` repository without associated "
1207 b"fast implementation."
1206 b"fast implementation."
1208 )
1207 )
1209 hint = _(
1208 hint = _(
1210 b"check `hg help config.format.use-dirstate-v2` " b"for details"
1209 b"check `hg help config.format.use-dirstate-v2` " b"for details"
1211 )
1210 )
1212 if not dirstate.HAS_FAST_DIRSTATE_V2:
1211 if not dirstate.HAS_FAST_DIRSTATE_V2:
1213 if slow_path == b'warn':
1212 if slow_path == b'warn':
1214 msg = b"warning: " + msg + b'\n'
1213 msg = b"warning: " + msg + b'\n'
1215 ui.warn(msg)
1214 ui.warn(msg)
1216 if not ui.quiet:
1215 if not ui.quiet:
1217 hint = b'(' + hint + b')\n'
1216 hint = b'(' + hint + b')\n'
1218 ui.warn(hint)
1217 ui.warn(hint)
1219 if slow_path == b'abort':
1218 if slow_path == b'abort':
1220 raise error.Abort(msg, hint=hint)
1219 raise error.Abort(msg, hint=hint)
1221 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1220 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1222 options[b'persistent-nodemap.mmap'] = True
1221 options[b'persistent-nodemap.mmap'] = True
1223 if ui.configbool(b'devel', b'persistent-nodemap'):
1222 if ui.configbool(b'devel', b'persistent-nodemap'):
1224 options[b'devel-force-nodemap'] = True
1223 options[b'devel-force-nodemap'] = True
1225
1224
1226 return options
1225 return options
1227
1226
1228
1227
1229 def makemain(**kwargs):
1228 def makemain(**kwargs):
1230 """Produce a type conforming to ``ilocalrepositorymain``."""
1229 """Produce a type conforming to ``ilocalrepositorymain``."""
1231 return localrepository
1230 return localrepository
1232
1231
1233
1232
1234 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1233 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1235 class revlogfilestorage:
1234 class revlogfilestorage:
1236 """File storage when using revlogs."""
1235 """File storage when using revlogs."""
1237
1236
1238 def file(self, path):
1237 def file(self, path):
1239 if path.startswith(b'/'):
1238 if path.startswith(b'/'):
1240 path = path[1:]
1239 path = path[1:]
1241
1240
1242 return filelog.filelog(self.svfs, path)
1241 return filelog.filelog(self.svfs, path)
1243
1242
1244
1243
1245 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1244 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1246 class revlognarrowfilestorage:
1245 class revlognarrowfilestorage:
1247 """File storage when using revlogs and narrow files."""
1246 """File storage when using revlogs and narrow files."""
1248
1247
1249 def file(self, path):
1248 def file(self, path):
1250 if path.startswith(b'/'):
1249 if path.startswith(b'/'):
1251 path = path[1:]
1250 path = path[1:]
1252
1251
1253 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1252 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1254
1253
1255
1254
1256 def makefilestorage(requirements, features, **kwargs):
1255 def makefilestorage(requirements, features, **kwargs):
1257 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1256 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1258 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1257 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1259 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1258 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1260
1259
1261 if requirementsmod.NARROW_REQUIREMENT in requirements:
1260 if requirementsmod.NARROW_REQUIREMENT in requirements:
1262 return revlognarrowfilestorage
1261 return revlognarrowfilestorage
1263 else:
1262 else:
1264 return revlogfilestorage
1263 return revlogfilestorage
1265
1264
1266
1265
1267 # List of repository interfaces and factory functions for them. Each
1266 # List of repository interfaces and factory functions for them. Each
1268 # will be called in order during ``makelocalrepository()`` to iteratively
1267 # will be called in order during ``makelocalrepository()`` to iteratively
1269 # derive the final type for a local repository instance. We capture the
1268 # derive the final type for a local repository instance. We capture the
1270 # function as a lambda so we don't hold a reference and the module-level
1269 # function as a lambda so we don't hold a reference and the module-level
1271 # functions can be wrapped.
1270 # functions can be wrapped.
1272 REPO_INTERFACES = [
1271 REPO_INTERFACES = [
1273 (repository.ilocalrepositorymain, lambda: makemain),
1272 (repository.ilocalrepositorymain, lambda: makemain),
1274 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1273 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1275 ]
1274 ]
1276
1275
1277
1276
1278 @interfaceutil.implementer(repository.ilocalrepositorymain)
1277 @interfaceutil.implementer(repository.ilocalrepositorymain)
1279 class localrepository:
1278 class localrepository:
1280 """Main class for representing local repositories.
1279 """Main class for representing local repositories.
1281
1280
1282 All local repositories are instances of this class.
1281 All local repositories are instances of this class.
1283
1282
1284 Constructed on its own, instances of this class are not usable as
1283 Constructed on its own, instances of this class are not usable as
1285 repository objects. To obtain a usable repository object, call
1284 repository objects. To obtain a usable repository object, call
1286 ``hg.repository()``, ``localrepo.instance()``, or
1285 ``hg.repository()``, ``localrepo.instance()``, or
1287 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1286 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1288 ``instance()`` adds support for creating new repositories.
1287 ``instance()`` adds support for creating new repositories.
1289 ``hg.repository()`` adds more extension integration, including calling
1288 ``hg.repository()`` adds more extension integration, including calling
1290 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1289 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1291 used.
1290 used.
1292 """
1291 """
1293
1292
1294 _basesupported = {
1293 _basesupported = {
1295 requirementsmod.ARCHIVED_PHASE_REQUIREMENT,
1294 requirementsmod.ARCHIVED_PHASE_REQUIREMENT,
1296 requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT,
1295 requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT,
1297 requirementsmod.CHANGELOGV2_REQUIREMENT,
1296 requirementsmod.CHANGELOGV2_REQUIREMENT,
1298 requirementsmod.COPIESSDC_REQUIREMENT,
1297 requirementsmod.COPIESSDC_REQUIREMENT,
1299 requirementsmod.DIRSTATE_TRACKED_HINT_V1,
1298 requirementsmod.DIRSTATE_TRACKED_HINT_V1,
1300 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1299 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1301 requirementsmod.DOTENCODE_REQUIREMENT,
1300 requirementsmod.DOTENCODE_REQUIREMENT,
1302 requirementsmod.FNCACHE_REQUIREMENT,
1301 requirementsmod.FNCACHE_REQUIREMENT,
1303 requirementsmod.GENERALDELTA_REQUIREMENT,
1302 requirementsmod.GENERALDELTA_REQUIREMENT,
1304 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1303 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1305 requirementsmod.NODEMAP_REQUIREMENT,
1304 requirementsmod.NODEMAP_REQUIREMENT,
1306 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1305 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1307 requirementsmod.REVLOGV1_REQUIREMENT,
1306 requirementsmod.REVLOGV1_REQUIREMENT,
1308 requirementsmod.REVLOGV2_REQUIREMENT,
1307 requirementsmod.REVLOGV2_REQUIREMENT,
1309 requirementsmod.SHARED_REQUIREMENT,
1308 requirementsmod.SHARED_REQUIREMENT,
1310 requirementsmod.SHARESAFE_REQUIREMENT,
1309 requirementsmod.SHARESAFE_REQUIREMENT,
1311 requirementsmod.SPARSE_REQUIREMENT,
1310 requirementsmod.SPARSE_REQUIREMENT,
1312 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1311 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1313 requirementsmod.STORE_REQUIREMENT,
1312 requirementsmod.STORE_REQUIREMENT,
1314 requirementsmod.TREEMANIFEST_REQUIREMENT,
1313 requirementsmod.TREEMANIFEST_REQUIREMENT,
1315 }
1314 }
1316
1315
1317 # list of prefix for file which can be written without 'wlock'
1316 # list of prefix for file which can be written without 'wlock'
1318 # Extensions should extend this list when needed
1317 # Extensions should extend this list when needed
1319 _wlockfreeprefix = {
1318 _wlockfreeprefix = {
1320 # We migh consider requiring 'wlock' for the next
1319 # We migh consider requiring 'wlock' for the next
1321 # two, but pretty much all the existing code assume
1320 # two, but pretty much all the existing code assume
1322 # wlock is not needed so we keep them excluded for
1321 # wlock is not needed so we keep them excluded for
1323 # now.
1322 # now.
1324 b'hgrc',
1323 b'hgrc',
1325 b'requires',
1324 b'requires',
1326 # XXX cache is a complicatged business someone
1325 # XXX cache is a complicatged business someone
1327 # should investigate this in depth at some point
1326 # should investigate this in depth at some point
1328 b'cache/',
1327 b'cache/',
1329 # XXX bisect was still a bit too messy at the time
1328 # XXX bisect was still a bit too messy at the time
1330 # this changeset was introduced. Someone should fix
1329 # this changeset was introduced. Someone should fix
1331 # the remainig bit and drop this line
1330 # the remainig bit and drop this line
1332 b'bisect.state',
1331 b'bisect.state',
1333 }
1332 }
1334
1333
1335 def __init__(
1334 def __init__(
1336 self,
1335 self,
1337 baseui,
1336 baseui,
1338 ui,
1337 ui,
1339 origroot: bytes,
1338 origroot: bytes,
1340 wdirvfs: vfsmod.vfs,
1339 wdirvfs: vfsmod.vfs,
1341 hgvfs: vfsmod.vfs,
1340 hgvfs: vfsmod.vfs,
1342 requirements,
1341 requirements,
1343 supportedrequirements,
1342 supportedrequirements,
1344 sharedpath: bytes,
1343 sharedpath: bytes,
1345 store,
1344 store,
1346 cachevfs: vfsmod.vfs,
1345 cachevfs: vfsmod.vfs,
1347 wcachevfs: vfsmod.vfs,
1346 wcachevfs: vfsmod.vfs,
1348 features,
1347 features,
1349 intents=None,
1348 intents=None,
1350 ):
1349 ):
1351 """Create a new local repository instance.
1350 """Create a new local repository instance.
1352
1351
1353 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1352 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1354 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1353 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1355 object.
1354 object.
1356
1355
1357 Arguments:
1356 Arguments:
1358
1357
1359 baseui
1358 baseui
1360 ``ui.ui`` instance that ``ui`` argument was based off of.
1359 ``ui.ui`` instance that ``ui`` argument was based off of.
1361
1360
1362 ui
1361 ui
1363 ``ui.ui`` instance for use by the repository.
1362 ``ui.ui`` instance for use by the repository.
1364
1363
1365 origroot
1364 origroot
1366 ``bytes`` path to working directory root of this repository.
1365 ``bytes`` path to working directory root of this repository.
1367
1366
1368 wdirvfs
1367 wdirvfs
1369 ``vfs.vfs`` rooted at the working directory.
1368 ``vfs.vfs`` rooted at the working directory.
1370
1369
1371 hgvfs
1370 hgvfs
1372 ``vfs.vfs`` rooted at .hg/
1371 ``vfs.vfs`` rooted at .hg/
1373
1372
1374 requirements
1373 requirements
1375 ``set`` of bytestrings representing repository opening requirements.
1374 ``set`` of bytestrings representing repository opening requirements.
1376
1375
1377 supportedrequirements
1376 supportedrequirements
1378 ``set`` of bytestrings representing repository requirements that we
1377 ``set`` of bytestrings representing repository requirements that we
1379 know how to open. May be a supetset of ``requirements``.
1378 know how to open. May be a supetset of ``requirements``.
1380
1379
1381 sharedpath
1380 sharedpath
1382 ``bytes`` Defining path to storage base directory. Points to a
1381 ``bytes`` Defining path to storage base directory. Points to a
1383 ``.hg/`` directory somewhere.
1382 ``.hg/`` directory somewhere.
1384
1383
1385 store
1384 store
1386 ``store.basicstore`` (or derived) instance providing access to
1385 ``store.basicstore`` (or derived) instance providing access to
1387 versioned storage.
1386 versioned storage.
1388
1387
1389 cachevfs
1388 cachevfs
1390 ``vfs.vfs`` used for cache files.
1389 ``vfs.vfs`` used for cache files.
1391
1390
1392 wcachevfs
1391 wcachevfs
1393 ``vfs.vfs`` used for cache files related to the working copy.
1392 ``vfs.vfs`` used for cache files related to the working copy.
1394
1393
1395 features
1394 features
1396 ``set`` of bytestrings defining features/capabilities of this
1395 ``set`` of bytestrings defining features/capabilities of this
1397 instance.
1396 instance.
1398
1397
1399 intents
1398 intents
1400 ``set`` of system strings indicating what this repo will be used
1399 ``set`` of system strings indicating what this repo will be used
1401 for.
1400 for.
1402 """
1401 """
1403 self.baseui = baseui
1402 self.baseui = baseui
1404 self.ui = ui
1403 self.ui = ui
1405 self.origroot = origroot
1404 self.origroot = origroot
1406 # vfs rooted at working directory.
1405 # vfs rooted at working directory.
1407 self.wvfs = wdirvfs
1406 self.wvfs = wdirvfs
1408 self.root = wdirvfs.base
1407 self.root = wdirvfs.base
1409 # vfs rooted at .hg/. Used to access most non-store paths.
1408 # vfs rooted at .hg/. Used to access most non-store paths.
1410 self.vfs = hgvfs
1409 self.vfs = hgvfs
1411 self.path = hgvfs.base
1410 self.path = hgvfs.base
1412 self.requirements = requirements
1411 self.requirements = requirements
1413 self.nodeconstants = sha1nodeconstants
1412 self.nodeconstants = sha1nodeconstants
1414 self.nullid = self.nodeconstants.nullid
1413 self.nullid = self.nodeconstants.nullid
1415 self.supported = supportedrequirements
1414 self.supported = supportedrequirements
1416 self.sharedpath = sharedpath
1415 self.sharedpath = sharedpath
1417 self.store = store
1416 self.store = store
1418 self.cachevfs = cachevfs
1417 self.cachevfs = cachevfs
1419 self.wcachevfs = wcachevfs
1418 self.wcachevfs = wcachevfs
1420 self.features = features
1419 self.features = features
1421
1420
1422 self.filtername = None
1421 self.filtername = None
1423
1422
1424 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1423 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1425 b'devel', b'check-locks'
1424 b'devel', b'check-locks'
1426 ):
1425 ):
1427 self.vfs.audit = self._getvfsward(self.vfs.audit)
1426 self.vfs.audit = self._getvfsward(self.vfs.audit)
1428 # A list of callback to shape the phase if no data were found.
1427 # A list of callback to shape the phase if no data were found.
1429 # Callback are in the form: func(repo, roots) --> processed root.
1428 # Callback are in the form: func(repo, roots) --> processed root.
1430 # This list it to be filled by extension during repo setup
1429 # This list it to be filled by extension during repo setup
1431 self._phasedefaults = []
1430 self._phasedefaults = []
1432
1431
1433 color.setup(self.ui)
1432 color.setup(self.ui)
1434
1433
1435 self.spath = self.store.path
1434 self.spath = self.store.path
1436 self.svfs = self.store.vfs
1435 self.svfs = self.store.vfs
1437 self.sjoin = self.store.join
1436 self.sjoin = self.store.join
1438 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1437 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1439 b'devel', b'check-locks'
1438 b'devel', b'check-locks'
1440 ):
1439 ):
1441 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1440 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1442 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1441 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1443 else: # standard vfs
1442 else: # standard vfs
1444 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1443 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1445
1444
1446 self._dirstatevalidatewarned = False
1445 self._dirstatevalidatewarned = False
1447
1446
1448 self._branchcaches = branchmap.BranchMapCache()
1447 self._branchcaches = branchmap.BranchMapCache()
1449 self._revbranchcache = None
1448 self._revbranchcache = None
1450 self._filterpats = {}
1449 self._filterpats = {}
1451 self._datafilters = {}
1450 self._datafilters = {}
1452 self._transref = self._lockref = self._wlockref = None
1451 self._transref = self._lockref = self._wlockref = None
1453
1452
1454 # A cache for various files under .hg/ that tracks file changes,
1453 # A cache for various files under .hg/ that tracks file changes,
1455 # (used by the filecache decorator)
1454 # (used by the filecache decorator)
1456 #
1455 #
1457 # Maps a property name to its util.filecacheentry
1456 # Maps a property name to its util.filecacheentry
1458 self._filecache = {}
1457 self._filecache = {}
1459
1458
1460 # hold sets of revision to be filtered
1459 # hold sets of revision to be filtered
1461 # should be cleared when something might have changed the filter value:
1460 # should be cleared when something might have changed the filter value:
1462 # - new changesets,
1461 # - new changesets,
1463 # - phase change,
1462 # - phase change,
1464 # - new obsolescence marker,
1463 # - new obsolescence marker,
1465 # - working directory parent change,
1464 # - working directory parent change,
1466 # - bookmark changes
1465 # - bookmark changes
1467 self.filteredrevcache = {}
1466 self.filteredrevcache = {}
1468
1467
1469 # post-dirstate-status hooks
1468 # post-dirstate-status hooks
1470 self._postdsstatus = []
1469 self._postdsstatus = []
1471
1470
1472 # generic mapping between names and nodes
1471 # generic mapping between names and nodes
1473 self.names = namespaces.namespaces()
1472 self.names = namespaces.namespaces()
1474
1473
1475 # Key to signature value.
1474 # Key to signature value.
1476 self._sparsesignaturecache = {}
1475 self._sparsesignaturecache = {}
1477 # Signature to cached matcher instance.
1476 # Signature to cached matcher instance.
1478 self._sparsematchercache = {}
1477 self._sparsematchercache = {}
1479
1478
1480 self._extrafilterid = repoview.extrafilter(ui)
1479 self._extrafilterid = repoview.extrafilter(ui)
1481
1480
1482 self.filecopiesmode = None
1481 self.filecopiesmode = None
1483 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1482 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1484 self.filecopiesmode = b'changeset-sidedata'
1483 self.filecopiesmode = b'changeset-sidedata'
1485
1484
1486 self._wanted_sidedata = set()
1485 self._wanted_sidedata = set()
1487 self._sidedata_computers = {}
1486 self._sidedata_computers = {}
1488 sidedatamod.set_sidedata_spec_for_repo(self)
1487 sidedatamod.set_sidedata_spec_for_repo(self)
1489
1488
1490 def _getvfsward(self, origfunc):
1489 def _getvfsward(self, origfunc):
1491 """build a ward for self.vfs"""
1490 """build a ward for self.vfs"""
1492 rref = weakref.ref(self)
1491 rref = weakref.ref(self)
1493
1492
1494 def checkvfs(path, mode=None):
1493 def checkvfs(path, mode=None):
1495 ret = origfunc(path, mode=mode)
1494 ret = origfunc(path, mode=mode)
1496 repo = rref()
1495 repo = rref()
1497 if (
1496 if (
1498 repo is None
1497 repo is None
1499 or not util.safehasattr(repo, b'_wlockref')
1498 or not util.safehasattr(repo, b'_wlockref')
1500 or not util.safehasattr(repo, b'_lockref')
1499 or not util.safehasattr(repo, b'_lockref')
1501 ):
1500 ):
1502 return
1501 return
1503 if mode in (None, b'r', b'rb'):
1502 if mode in (None, b'r', b'rb'):
1504 return
1503 return
1505 if path.startswith(repo.path):
1504 if path.startswith(repo.path):
1506 # truncate name relative to the repository (.hg)
1505 # truncate name relative to the repository (.hg)
1507 path = path[len(repo.path) + 1 :]
1506 path = path[len(repo.path) + 1 :]
1508 if path.startswith(b'cache/'):
1507 if path.startswith(b'cache/'):
1509 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1508 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1510 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1509 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1511 # path prefixes covered by 'lock'
1510 # path prefixes covered by 'lock'
1512 vfs_path_prefixes = (
1511 vfs_path_prefixes = (
1513 b'journal.',
1512 b'journal.',
1514 b'undo.',
1513 b'undo.',
1515 b'strip-backup/',
1514 b'strip-backup/',
1516 b'cache/',
1515 b'cache/',
1517 )
1516 )
1518 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1517 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1519 if repo._currentlock(repo._lockref) is None:
1518 if repo._currentlock(repo._lockref) is None:
1520 repo.ui.develwarn(
1519 repo.ui.develwarn(
1521 b'write with no lock: "%s"' % path,
1520 b'write with no lock: "%s"' % path,
1522 stacklevel=3,
1521 stacklevel=3,
1523 config=b'check-locks',
1522 config=b'check-locks',
1524 )
1523 )
1525 elif repo._currentlock(repo._wlockref) is None:
1524 elif repo._currentlock(repo._wlockref) is None:
1526 # rest of vfs files are covered by 'wlock'
1525 # rest of vfs files are covered by 'wlock'
1527 #
1526 #
1528 # exclude special files
1527 # exclude special files
1529 for prefix in self._wlockfreeprefix:
1528 for prefix in self._wlockfreeprefix:
1530 if path.startswith(prefix):
1529 if path.startswith(prefix):
1531 return
1530 return
1532 repo.ui.develwarn(
1531 repo.ui.develwarn(
1533 b'write with no wlock: "%s"' % path,
1532 b'write with no wlock: "%s"' % path,
1534 stacklevel=3,
1533 stacklevel=3,
1535 config=b'check-locks',
1534 config=b'check-locks',
1536 )
1535 )
1537 return ret
1536 return ret
1538
1537
1539 return checkvfs
1538 return checkvfs
1540
1539
1541 def _getsvfsward(self, origfunc):
1540 def _getsvfsward(self, origfunc):
1542 """build a ward for self.svfs"""
1541 """build a ward for self.svfs"""
1543 rref = weakref.ref(self)
1542 rref = weakref.ref(self)
1544
1543
1545 def checksvfs(path, mode=None):
1544 def checksvfs(path, mode=None):
1546 ret = origfunc(path, mode=mode)
1545 ret = origfunc(path, mode=mode)
1547 repo = rref()
1546 repo = rref()
1548 if repo is None or not util.safehasattr(repo, b'_lockref'):
1547 if repo is None or not util.safehasattr(repo, b'_lockref'):
1549 return
1548 return
1550 if mode in (None, b'r', b'rb'):
1549 if mode in (None, b'r', b'rb'):
1551 return
1550 return
1552 if path.startswith(repo.sharedpath):
1551 if path.startswith(repo.sharedpath):
1553 # truncate name relative to the repository (.hg)
1552 # truncate name relative to the repository (.hg)
1554 path = path[len(repo.sharedpath) + 1 :]
1553 path = path[len(repo.sharedpath) + 1 :]
1555 if repo._currentlock(repo._lockref) is None:
1554 if repo._currentlock(repo._lockref) is None:
1556 repo.ui.develwarn(
1555 repo.ui.develwarn(
1557 b'write with no lock: "%s"' % path, stacklevel=4
1556 b'write with no lock: "%s"' % path, stacklevel=4
1558 )
1557 )
1559 return ret
1558 return ret
1560
1559
1561 return checksvfs
1560 return checksvfs
1562
1561
1563 def close(self):
1562 def close(self):
1564 self._writecaches()
1563 self._writecaches()
1565
1564
1566 def _writecaches(self):
1565 def _writecaches(self):
1567 if self._revbranchcache:
1566 if self._revbranchcache:
1568 self._revbranchcache.write()
1567 self._revbranchcache.write()
1569
1568
1570 def _restrictcapabilities(self, caps):
1569 def _restrictcapabilities(self, caps):
1571 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1570 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1572 caps = set(caps)
1571 caps = set(caps)
1573 capsblob = bundle2.encodecaps(
1572 capsblob = bundle2.encodecaps(
1574 bundle2.getrepocaps(self, role=b'client')
1573 bundle2.getrepocaps(self, role=b'client')
1575 )
1574 )
1576 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1575 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1577 if self.ui.configbool(b'experimental', b'narrow'):
1576 if self.ui.configbool(b'experimental', b'narrow'):
1578 caps.add(wireprototypes.NARROWCAP)
1577 caps.add(wireprototypes.NARROWCAP)
1579 return caps
1578 return caps
1580
1579
1581 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1580 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1582 # self -> auditor -> self._checknested -> self
1581 # self -> auditor -> self._checknested -> self
1583
1582
1584 @property
1583 @property
1585 def auditor(self):
1584 def auditor(self):
1586 # This is only used by context.workingctx.match in order to
1585 # This is only used by context.workingctx.match in order to
1587 # detect files in subrepos.
1586 # detect files in subrepos.
1588 return pathutil.pathauditor(self.root, callback=self._checknested)
1587 return pathutil.pathauditor(self.root, callback=self._checknested)
1589
1588
1590 @property
1589 @property
1591 def nofsauditor(self):
1590 def nofsauditor(self):
1592 # This is only used by context.basectx.match in order to detect
1591 # This is only used by context.basectx.match in order to detect
1593 # files in subrepos.
1592 # files in subrepos.
1594 return pathutil.pathauditor(
1593 return pathutil.pathauditor(
1595 self.root, callback=self._checknested, realfs=False, cached=True
1594 self.root, callback=self._checknested, realfs=False, cached=True
1596 )
1595 )
1597
1596
1598 def _checknested(self, path):
1597 def _checknested(self, path):
1599 """Determine if path is a legal nested repository."""
1598 """Determine if path is a legal nested repository."""
1600 if not path.startswith(self.root):
1599 if not path.startswith(self.root):
1601 return False
1600 return False
1602 subpath = path[len(self.root) + 1 :]
1601 subpath = path[len(self.root) + 1 :]
1603 normsubpath = util.pconvert(subpath)
1602 normsubpath = util.pconvert(subpath)
1604
1603
1605 # XXX: Checking against the current working copy is wrong in
1604 # XXX: Checking against the current working copy is wrong in
1606 # the sense that it can reject things like
1605 # the sense that it can reject things like
1607 #
1606 #
1608 # $ hg cat -r 10 sub/x.txt
1607 # $ hg cat -r 10 sub/x.txt
1609 #
1608 #
1610 # if sub/ is no longer a subrepository in the working copy
1609 # if sub/ is no longer a subrepository in the working copy
1611 # parent revision.
1610 # parent revision.
1612 #
1611 #
1613 # However, it can of course also allow things that would have
1612 # However, it can of course also allow things that would have
1614 # been rejected before, such as the above cat command if sub/
1613 # been rejected before, such as the above cat command if sub/
1615 # is a subrepository now, but was a normal directory before.
1614 # is a subrepository now, but was a normal directory before.
1616 # The old path auditor would have rejected by mistake since it
1615 # The old path auditor would have rejected by mistake since it
1617 # panics when it sees sub/.hg/.
1616 # panics when it sees sub/.hg/.
1618 #
1617 #
1619 # All in all, checking against the working copy seems sensible
1618 # All in all, checking against the working copy seems sensible
1620 # since we want to prevent access to nested repositories on
1619 # since we want to prevent access to nested repositories on
1621 # the filesystem *now*.
1620 # the filesystem *now*.
1622 ctx = self[None]
1621 ctx = self[None]
1623 parts = util.splitpath(subpath)
1622 parts = util.splitpath(subpath)
1624 while parts:
1623 while parts:
1625 prefix = b'/'.join(parts)
1624 prefix = b'/'.join(parts)
1626 if prefix in ctx.substate:
1625 if prefix in ctx.substate:
1627 if prefix == normsubpath:
1626 if prefix == normsubpath:
1628 return True
1627 return True
1629 else:
1628 else:
1630 sub = ctx.sub(prefix)
1629 sub = ctx.sub(prefix)
1631 return sub.checknested(subpath[len(prefix) + 1 :])
1630 return sub.checknested(subpath[len(prefix) + 1 :])
1632 else:
1631 else:
1633 parts.pop()
1632 parts.pop()
1634 return False
1633 return False
1635
1634
1636 def peer(self, path=None):
1635 def peer(self, path=None):
1637 return localpeer(self, path=path) # not cached to avoid reference cycle
1636 return localpeer(self, path=path) # not cached to avoid reference cycle
1638
1637
1639 def unfiltered(self):
1638 def unfiltered(self):
1640 """Return unfiltered version of the repository
1639 """Return unfiltered version of the repository
1641
1640
1642 Intended to be overwritten by filtered repo."""
1641 Intended to be overwritten by filtered repo."""
1643 return self
1642 return self
1644
1643
1645 def filtered(self, name, visibilityexceptions=None):
1644 def filtered(self, name, visibilityexceptions=None):
1646 """Return a filtered version of a repository
1645 """Return a filtered version of a repository
1647
1646
1648 The `name` parameter is the identifier of the requested view. This
1647 The `name` parameter is the identifier of the requested view. This
1649 will return a repoview object set "exactly" to the specified view.
1648 will return a repoview object set "exactly" to the specified view.
1650
1649
1651 This function does not apply recursive filtering to a repository. For
1650 This function does not apply recursive filtering to a repository. For
1652 example calling `repo.filtered("served")` will return a repoview using
1651 example calling `repo.filtered("served")` will return a repoview using
1653 the "served" view, regardless of the initial view used by `repo`.
1652 the "served" view, regardless of the initial view used by `repo`.
1654
1653
1655 In other word, there is always only one level of `repoview` "filtering".
1654 In other word, there is always only one level of `repoview` "filtering".
1656 """
1655 """
1657 if self._extrafilterid is not None and b'%' not in name:
1656 if self._extrafilterid is not None and b'%' not in name:
1658 name = name + b'%' + self._extrafilterid
1657 name = name + b'%' + self._extrafilterid
1659
1658
1660 cls = repoview.newtype(self.unfiltered().__class__)
1659 cls = repoview.newtype(self.unfiltered().__class__)
1661 return cls(self, name, visibilityexceptions)
1660 return cls(self, name, visibilityexceptions)
1662
1661
1663 @mixedrepostorecache(
1662 @mixedrepostorecache(
1664 (b'bookmarks', b'plain'),
1663 (b'bookmarks', b'plain'),
1665 (b'bookmarks.current', b'plain'),
1664 (b'bookmarks.current', b'plain'),
1666 (b'bookmarks', b''),
1665 (b'bookmarks', b''),
1667 (b'00changelog.i', b''),
1666 (b'00changelog.i', b''),
1668 )
1667 )
1669 def _bookmarks(self):
1668 def _bookmarks(self):
1670 # Since the multiple files involved in the transaction cannot be
1669 # Since the multiple files involved in the transaction cannot be
1671 # written atomically (with current repository format), there is a race
1670 # written atomically (with current repository format), there is a race
1672 # condition here.
1671 # condition here.
1673 #
1672 #
1674 # 1) changelog content A is read
1673 # 1) changelog content A is read
1675 # 2) outside transaction update changelog to content B
1674 # 2) outside transaction update changelog to content B
1676 # 3) outside transaction update bookmark file referring to content B
1675 # 3) outside transaction update bookmark file referring to content B
1677 # 4) bookmarks file content is read and filtered against changelog-A
1676 # 4) bookmarks file content is read and filtered against changelog-A
1678 #
1677 #
1679 # When this happens, bookmarks against nodes missing from A are dropped.
1678 # When this happens, bookmarks against nodes missing from A are dropped.
1680 #
1679 #
1681 # Having this happening during read is not great, but it become worse
1680 # Having this happening during read is not great, but it become worse
1682 # when this happen during write because the bookmarks to the "unknown"
1681 # when this happen during write because the bookmarks to the "unknown"
1683 # nodes will be dropped for good. However, writes happen within locks.
1682 # nodes will be dropped for good. However, writes happen within locks.
1684 # This locking makes it possible to have a race free consistent read.
1683 # This locking makes it possible to have a race free consistent read.
1685 # For this purpose data read from disc before locking are
1684 # For this purpose data read from disc before locking are
1686 # "invalidated" right after the locks are taken. This invalidations are
1685 # "invalidated" right after the locks are taken. This invalidations are
1687 # "light", the `filecache` mechanism keep the data in memory and will
1686 # "light", the `filecache` mechanism keep the data in memory and will
1688 # reuse them if the underlying files did not changed. Not parsing the
1687 # reuse them if the underlying files did not changed. Not parsing the
1689 # same data multiple times helps performances.
1688 # same data multiple times helps performances.
1690 #
1689 #
1691 # Unfortunately in the case describe above, the files tracked by the
1690 # Unfortunately in the case describe above, the files tracked by the
1692 # bookmarks file cache might not have changed, but the in-memory
1691 # bookmarks file cache might not have changed, but the in-memory
1693 # content is still "wrong" because we used an older changelog content
1692 # content is still "wrong" because we used an older changelog content
1694 # to process the on-disk data. So after locking, the changelog would be
1693 # to process the on-disk data. So after locking, the changelog would be
1695 # refreshed but `_bookmarks` would be preserved.
1694 # refreshed but `_bookmarks` would be preserved.
1696 # Adding `00changelog.i` to the list of tracked file is not
1695 # Adding `00changelog.i` to the list of tracked file is not
1697 # enough, because at the time we build the content for `_bookmarks` in
1696 # enough, because at the time we build the content for `_bookmarks` in
1698 # (4), the changelog file has already diverged from the content used
1697 # (4), the changelog file has already diverged from the content used
1699 # for loading `changelog` in (1)
1698 # for loading `changelog` in (1)
1700 #
1699 #
1701 # To prevent the issue, we force the changelog to be explicitly
1700 # To prevent the issue, we force the changelog to be explicitly
1702 # reloaded while computing `_bookmarks`. The data race can still happen
1701 # reloaded while computing `_bookmarks`. The data race can still happen
1703 # without the lock (with a narrower window), but it would no longer go
1702 # without the lock (with a narrower window), but it would no longer go
1704 # undetected during the lock time refresh.
1703 # undetected during the lock time refresh.
1705 #
1704 #
1706 # The new schedule is as follow
1705 # The new schedule is as follow
1707 #
1706 #
1708 # 1) filecache logic detect that `_bookmarks` needs to be computed
1707 # 1) filecache logic detect that `_bookmarks` needs to be computed
1709 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1708 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1710 # 3) We force `changelog` filecache to be tested
1709 # 3) We force `changelog` filecache to be tested
1711 # 4) cachestat for `changelog` are captured (for changelog)
1710 # 4) cachestat for `changelog` are captured (for changelog)
1712 # 5) `_bookmarks` is computed and cached
1711 # 5) `_bookmarks` is computed and cached
1713 #
1712 #
1714 # The step in (3) ensure we have a changelog at least as recent as the
1713 # The step in (3) ensure we have a changelog at least as recent as the
1715 # cache stat computed in (1). As a result at locking time:
1714 # cache stat computed in (1). As a result at locking time:
1716 # * if the changelog did not changed since (1) -> we can reuse the data
1715 # * if the changelog did not changed since (1) -> we can reuse the data
1717 # * otherwise -> the bookmarks get refreshed.
1716 # * otherwise -> the bookmarks get refreshed.
1718 self._refreshchangelog()
1717 self._refreshchangelog()
1719 return bookmarks.bmstore(self)
1718 return bookmarks.bmstore(self)
1720
1719
1721 def _refreshchangelog(self):
1720 def _refreshchangelog(self):
1722 """make sure the in memory changelog match the on-disk one"""
1721 """make sure the in memory changelog match the on-disk one"""
1723 if 'changelog' in vars(self) and self.currenttransaction() is None:
1722 if 'changelog' in vars(self) and self.currenttransaction() is None:
1724 del self.changelog
1723 del self.changelog
1725
1724
1726 @property
1725 @property
1727 def _activebookmark(self):
1726 def _activebookmark(self):
1728 return self._bookmarks.active
1727 return self._bookmarks.active
1729
1728
1730 # _phasesets depend on changelog. what we need is to call
1729 # _phasesets depend on changelog. what we need is to call
1731 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1730 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1732 # can't be easily expressed in filecache mechanism.
1731 # can't be easily expressed in filecache mechanism.
1733 @storecache(b'phaseroots', b'00changelog.i')
1732 @storecache(b'phaseroots', b'00changelog.i')
1734 def _phasecache(self):
1733 def _phasecache(self):
1735 return phases.phasecache(self, self._phasedefaults)
1734 return phases.phasecache(self, self._phasedefaults)
1736
1735
1737 @storecache(b'obsstore')
1736 @storecache(b'obsstore')
1738 def obsstore(self):
1737 def obsstore(self):
1739 return obsolete.makestore(self.ui, self)
1738 return obsolete.makestore(self.ui, self)
1740
1739
1741 @changelogcache()
1740 @changelogcache()
1742 def changelog(repo):
1741 def changelog(repo):
1743 # load dirstate before changelog to avoid race see issue6303
1742 # load dirstate before changelog to avoid race see issue6303
1744 repo.dirstate.prefetch_parents()
1743 repo.dirstate.prefetch_parents()
1745 return repo.store.changelog(
1744 return repo.store.changelog(
1746 txnutil.mayhavepending(repo.root),
1745 txnutil.mayhavepending(repo.root),
1747 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1746 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1748 )
1747 )
1749
1748
1750 @manifestlogcache()
1749 @manifestlogcache()
1751 def manifestlog(self):
1750 def manifestlog(self):
1752 return self.store.manifestlog(self, self._storenarrowmatch)
1751 return self.store.manifestlog(self, self._storenarrowmatch)
1753
1752
1754 @repofilecache(b'dirstate')
1753 @repofilecache(b'dirstate')
1755 def dirstate(self):
1754 def dirstate(self):
1756 return self._makedirstate()
1755 return self._makedirstate()
1757
1756
1758 def _makedirstate(self):
1757 def _makedirstate(self):
1759 """Extension point for wrapping the dirstate per-repo."""
1758 """Extension point for wrapping the dirstate per-repo."""
1760 sparsematchfn = None
1759 sparsematchfn = None
1761 if sparse.use_sparse(self):
1760 if sparse.use_sparse(self):
1762 sparsematchfn = lambda: sparse.matcher(self)
1761 sparsematchfn = lambda: sparse.matcher(self)
1763 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1762 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1764 th = requirementsmod.DIRSTATE_TRACKED_HINT_V1
1763 th = requirementsmod.DIRSTATE_TRACKED_HINT_V1
1765 use_dirstate_v2 = v2_req in self.requirements
1764 use_dirstate_v2 = v2_req in self.requirements
1766 use_tracked_hint = th in self.requirements
1765 use_tracked_hint = th in self.requirements
1767
1766
1768 return dirstate.dirstate(
1767 return dirstate.dirstate(
1769 self.vfs,
1768 self.vfs,
1770 self.ui,
1769 self.ui,
1771 self.root,
1770 self.root,
1772 self._dirstatevalidate,
1771 self._dirstatevalidate,
1773 sparsematchfn,
1772 sparsematchfn,
1774 self.nodeconstants,
1773 self.nodeconstants,
1775 use_dirstate_v2,
1774 use_dirstate_v2,
1776 use_tracked_hint=use_tracked_hint,
1775 use_tracked_hint=use_tracked_hint,
1777 )
1776 )
1778
1777
1779 def _dirstatevalidate(self, node):
1778 def _dirstatevalidate(self, node):
1780 try:
1779 try:
1781 self.changelog.rev(node)
1780 self.changelog.rev(node)
1782 return node
1781 return node
1783 except error.LookupError:
1782 except error.LookupError:
1784 if not self._dirstatevalidatewarned:
1783 if not self._dirstatevalidatewarned:
1785 self._dirstatevalidatewarned = True
1784 self._dirstatevalidatewarned = True
1786 self.ui.warn(
1785 self.ui.warn(
1787 _(b"warning: ignoring unknown working parent %s!\n")
1786 _(b"warning: ignoring unknown working parent %s!\n")
1788 % short(node)
1787 % short(node)
1789 )
1788 )
1790 return self.nullid
1789 return self.nullid
1791
1790
1792 @storecache(narrowspec.FILENAME)
1791 @storecache(narrowspec.FILENAME)
1793 def narrowpats(self):
1792 def narrowpats(self):
1794 """matcher patterns for this repository's narrowspec
1793 """matcher patterns for this repository's narrowspec
1795
1794
1796 A tuple of (includes, excludes).
1795 A tuple of (includes, excludes).
1797 """
1796 """
1798 return narrowspec.load(self)
1797 return narrowspec.load(self)
1799
1798
1800 @storecache(narrowspec.FILENAME)
1799 @storecache(narrowspec.FILENAME)
1801 def _storenarrowmatch(self):
1800 def _storenarrowmatch(self):
1802 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1801 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1803 return matchmod.always()
1802 return matchmod.always()
1804 include, exclude = self.narrowpats
1803 include, exclude = self.narrowpats
1805 return narrowspec.match(self.root, include=include, exclude=exclude)
1804 return narrowspec.match(self.root, include=include, exclude=exclude)
1806
1805
1807 @storecache(narrowspec.FILENAME)
1806 @storecache(narrowspec.FILENAME)
1808 def _narrowmatch(self):
1807 def _narrowmatch(self):
1809 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1808 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1810 return matchmod.always()
1809 return matchmod.always()
1811 narrowspec.checkworkingcopynarrowspec(self)
1810 narrowspec.checkworkingcopynarrowspec(self)
1812 include, exclude = self.narrowpats
1811 include, exclude = self.narrowpats
1813 return narrowspec.match(self.root, include=include, exclude=exclude)
1812 return narrowspec.match(self.root, include=include, exclude=exclude)
1814
1813
1815 def narrowmatch(self, match=None, includeexact=False):
1814 def narrowmatch(self, match=None, includeexact=False):
1816 """matcher corresponding the the repo's narrowspec
1815 """matcher corresponding the the repo's narrowspec
1817
1816
1818 If `match` is given, then that will be intersected with the narrow
1817 If `match` is given, then that will be intersected with the narrow
1819 matcher.
1818 matcher.
1820
1819
1821 If `includeexact` is True, then any exact matches from `match` will
1820 If `includeexact` is True, then any exact matches from `match` will
1822 be included even if they're outside the narrowspec.
1821 be included even if they're outside the narrowspec.
1823 """
1822 """
1824 if match:
1823 if match:
1825 if includeexact and not self._narrowmatch.always():
1824 if includeexact and not self._narrowmatch.always():
1826 # do not exclude explicitly-specified paths so that they can
1825 # do not exclude explicitly-specified paths so that they can
1827 # be warned later on
1826 # be warned later on
1828 em = matchmod.exact(match.files())
1827 em = matchmod.exact(match.files())
1829 nm = matchmod.unionmatcher([self._narrowmatch, em])
1828 nm = matchmod.unionmatcher([self._narrowmatch, em])
1830 return matchmod.intersectmatchers(match, nm)
1829 return matchmod.intersectmatchers(match, nm)
1831 return matchmod.intersectmatchers(match, self._narrowmatch)
1830 return matchmod.intersectmatchers(match, self._narrowmatch)
1832 return self._narrowmatch
1831 return self._narrowmatch
1833
1832
1834 def setnarrowpats(self, newincludes, newexcludes):
1833 def setnarrowpats(self, newincludes, newexcludes):
1835 narrowspec.save(self, newincludes, newexcludes)
1834 narrowspec.save(self, newincludes, newexcludes)
1836 self.invalidate(clearfilecache=True)
1835 self.invalidate(clearfilecache=True)
1837
1836
1838 @unfilteredpropertycache
1837 @unfilteredpropertycache
1839 def _quick_access_changeid_null(self):
1838 def _quick_access_changeid_null(self):
1840 return {
1839 return {
1841 b'null': (nullrev, self.nodeconstants.nullid),
1840 b'null': (nullrev, self.nodeconstants.nullid),
1842 nullrev: (nullrev, self.nodeconstants.nullid),
1841 nullrev: (nullrev, self.nodeconstants.nullid),
1843 self.nullid: (nullrev, self.nullid),
1842 self.nullid: (nullrev, self.nullid),
1844 }
1843 }
1845
1844
1846 @unfilteredpropertycache
1845 @unfilteredpropertycache
1847 def _quick_access_changeid_wc(self):
1846 def _quick_access_changeid_wc(self):
1848 # also fast path access to the working copy parents
1847 # also fast path access to the working copy parents
1849 # however, only do it for filter that ensure wc is visible.
1848 # however, only do it for filter that ensure wc is visible.
1850 quick = self._quick_access_changeid_null.copy()
1849 quick = self._quick_access_changeid_null.copy()
1851 cl = self.unfiltered().changelog
1850 cl = self.unfiltered().changelog
1852 for node in self.dirstate.parents():
1851 for node in self.dirstate.parents():
1853 if node == self.nullid:
1852 if node == self.nullid:
1854 continue
1853 continue
1855 rev = cl.index.get_rev(node)
1854 rev = cl.index.get_rev(node)
1856 if rev is None:
1855 if rev is None:
1857 # unknown working copy parent case:
1856 # unknown working copy parent case:
1858 #
1857 #
1859 # skip the fast path and let higher code deal with it
1858 # skip the fast path and let higher code deal with it
1860 continue
1859 continue
1861 pair = (rev, node)
1860 pair = (rev, node)
1862 quick[rev] = pair
1861 quick[rev] = pair
1863 quick[node] = pair
1862 quick[node] = pair
1864 # also add the parents of the parents
1863 # also add the parents of the parents
1865 for r in cl.parentrevs(rev):
1864 for r in cl.parentrevs(rev):
1866 if r == nullrev:
1865 if r == nullrev:
1867 continue
1866 continue
1868 n = cl.node(r)
1867 n = cl.node(r)
1869 pair = (r, n)
1868 pair = (r, n)
1870 quick[r] = pair
1869 quick[r] = pair
1871 quick[n] = pair
1870 quick[n] = pair
1872 p1node = self.dirstate.p1()
1871 p1node = self.dirstate.p1()
1873 if p1node != self.nullid:
1872 if p1node != self.nullid:
1874 quick[b'.'] = quick[p1node]
1873 quick[b'.'] = quick[p1node]
1875 return quick
1874 return quick
1876
1875
1877 @unfilteredmethod
1876 @unfilteredmethod
1878 def _quick_access_changeid_invalidate(self):
1877 def _quick_access_changeid_invalidate(self):
1879 if '_quick_access_changeid_wc' in vars(self):
1878 if '_quick_access_changeid_wc' in vars(self):
1880 del self.__dict__['_quick_access_changeid_wc']
1879 del self.__dict__['_quick_access_changeid_wc']
1881
1880
1882 @property
1881 @property
1883 def _quick_access_changeid(self):
1882 def _quick_access_changeid(self):
1884 """an helper dictionnary for __getitem__ calls
1883 """an helper dictionnary for __getitem__ calls
1885
1884
1886 This contains a list of symbol we can recognise right away without
1885 This contains a list of symbol we can recognise right away without
1887 further processing.
1886 further processing.
1888 """
1887 """
1889 if self.filtername in repoview.filter_has_wc:
1888 if self.filtername in repoview.filter_has_wc:
1890 return self._quick_access_changeid_wc
1889 return self._quick_access_changeid_wc
1891 return self._quick_access_changeid_null
1890 return self._quick_access_changeid_null
1892
1891
1893 def __getitem__(self, changeid):
1892 def __getitem__(self, changeid):
1894 # dealing with special cases
1893 # dealing with special cases
1895 if changeid is None:
1894 if changeid is None:
1896 return context.workingctx(self)
1895 return context.workingctx(self)
1897 if isinstance(changeid, context.basectx):
1896 if isinstance(changeid, context.basectx):
1898 return changeid
1897 return changeid
1899
1898
1900 # dealing with multiple revisions
1899 # dealing with multiple revisions
1901 if isinstance(changeid, slice):
1900 if isinstance(changeid, slice):
1902 # wdirrev isn't contiguous so the slice shouldn't include it
1901 # wdirrev isn't contiguous so the slice shouldn't include it
1903 return [
1902 return [
1904 self[i]
1903 self[i]
1905 for i in range(*changeid.indices(len(self)))
1904 for i in range(*changeid.indices(len(self)))
1906 if i not in self.changelog.filteredrevs
1905 if i not in self.changelog.filteredrevs
1907 ]
1906 ]
1908
1907
1909 # dealing with some special values
1908 # dealing with some special values
1910 quick_access = self._quick_access_changeid.get(changeid)
1909 quick_access = self._quick_access_changeid.get(changeid)
1911 if quick_access is not None:
1910 if quick_access is not None:
1912 rev, node = quick_access
1911 rev, node = quick_access
1913 return context.changectx(self, rev, node, maybe_filtered=False)
1912 return context.changectx(self, rev, node, maybe_filtered=False)
1914 if changeid == b'tip':
1913 if changeid == b'tip':
1915 node = self.changelog.tip()
1914 node = self.changelog.tip()
1916 rev = self.changelog.rev(node)
1915 rev = self.changelog.rev(node)
1917 return context.changectx(self, rev, node)
1916 return context.changectx(self, rev, node)
1918
1917
1919 # dealing with arbitrary values
1918 # dealing with arbitrary values
1920 try:
1919 try:
1921 if isinstance(changeid, int):
1920 if isinstance(changeid, int):
1922 node = self.changelog.node(changeid)
1921 node = self.changelog.node(changeid)
1923 rev = changeid
1922 rev = changeid
1924 elif changeid == b'.':
1923 elif changeid == b'.':
1925 # this is a hack to delay/avoid loading obsmarkers
1924 # this is a hack to delay/avoid loading obsmarkers
1926 # when we know that '.' won't be hidden
1925 # when we know that '.' won't be hidden
1927 node = self.dirstate.p1()
1926 node = self.dirstate.p1()
1928 rev = self.unfiltered().changelog.rev(node)
1927 rev = self.unfiltered().changelog.rev(node)
1929 elif len(changeid) == self.nodeconstants.nodelen:
1928 elif len(changeid) == self.nodeconstants.nodelen:
1930 try:
1929 try:
1931 node = changeid
1930 node = changeid
1932 rev = self.changelog.rev(changeid)
1931 rev = self.changelog.rev(changeid)
1933 except error.FilteredLookupError:
1932 except error.FilteredLookupError:
1934 changeid = hex(changeid) # for the error message
1933 changeid = hex(changeid) # for the error message
1935 raise
1934 raise
1936 except LookupError:
1935 except LookupError:
1937 # check if it might have come from damaged dirstate
1936 # check if it might have come from damaged dirstate
1938 #
1937 #
1939 # XXX we could avoid the unfiltered if we had a recognizable
1938 # XXX we could avoid the unfiltered if we had a recognizable
1940 # exception for filtered changeset access
1939 # exception for filtered changeset access
1941 if (
1940 if (
1942 self.local()
1941 self.local()
1943 and changeid in self.unfiltered().dirstate.parents()
1942 and changeid in self.unfiltered().dirstate.parents()
1944 ):
1943 ):
1945 msg = _(b"working directory has unknown parent '%s'!")
1944 msg = _(b"working directory has unknown parent '%s'!")
1946 raise error.Abort(msg % short(changeid))
1945 raise error.Abort(msg % short(changeid))
1947 changeid = hex(changeid) # for the error message
1946 changeid = hex(changeid) # for the error message
1948 raise
1947 raise
1949
1948
1950 elif len(changeid) == 2 * self.nodeconstants.nodelen:
1949 elif len(changeid) == 2 * self.nodeconstants.nodelen:
1951 node = bin(changeid)
1950 node = bin(changeid)
1952 rev = self.changelog.rev(node)
1951 rev = self.changelog.rev(node)
1953 else:
1952 else:
1954 raise error.ProgrammingError(
1953 raise error.ProgrammingError(
1955 b"unsupported changeid '%s' of type %s"
1954 b"unsupported changeid '%s' of type %s"
1956 % (changeid, pycompat.bytestr(type(changeid)))
1955 % (changeid, pycompat.bytestr(type(changeid)))
1957 )
1956 )
1958
1957
1959 return context.changectx(self, rev, node)
1958 return context.changectx(self, rev, node)
1960
1959
1961 except (error.FilteredIndexError, error.FilteredLookupError):
1960 except (error.FilteredIndexError, error.FilteredLookupError):
1962 raise error.FilteredRepoLookupError(
1961 raise error.FilteredRepoLookupError(
1963 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1962 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1964 )
1963 )
1965 except (IndexError, LookupError):
1964 except (IndexError, LookupError):
1966 raise error.RepoLookupError(
1965 raise error.RepoLookupError(
1967 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1966 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1968 )
1967 )
1969 except error.WdirUnsupported:
1968 except error.WdirUnsupported:
1970 return context.workingctx(self)
1969 return context.workingctx(self)
1971
1970
1972 def __contains__(self, changeid):
1971 def __contains__(self, changeid):
1973 """True if the given changeid exists"""
1972 """True if the given changeid exists"""
1974 try:
1973 try:
1975 self[changeid]
1974 self[changeid]
1976 return True
1975 return True
1977 except error.RepoLookupError:
1976 except error.RepoLookupError:
1978 return False
1977 return False
1979
1978
1980 def __nonzero__(self):
1979 def __nonzero__(self):
1981 return True
1980 return True
1982
1981
1983 __bool__ = __nonzero__
1982 __bool__ = __nonzero__
1984
1983
1985 def __len__(self):
1984 def __len__(self):
1986 # no need to pay the cost of repoview.changelog
1985 # no need to pay the cost of repoview.changelog
1987 unfi = self.unfiltered()
1986 unfi = self.unfiltered()
1988 return len(unfi.changelog)
1987 return len(unfi.changelog)
1989
1988
1990 def __iter__(self):
1989 def __iter__(self):
1991 return iter(self.changelog)
1990 return iter(self.changelog)
1992
1991
1993 def revs(self, expr: bytes, *args):
1992 def revs(self, expr: bytes, *args):
1994 """Find revisions matching a revset.
1993 """Find revisions matching a revset.
1995
1994
1996 The revset is specified as a string ``expr`` that may contain
1995 The revset is specified as a string ``expr`` that may contain
1997 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1996 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1998
1997
1999 Revset aliases from the configuration are not expanded. To expand
1998 Revset aliases from the configuration are not expanded. To expand
2000 user aliases, consider calling ``scmutil.revrange()`` or
1999 user aliases, consider calling ``scmutil.revrange()`` or
2001 ``repo.anyrevs([expr], user=True)``.
2000 ``repo.anyrevs([expr], user=True)``.
2002
2001
2003 Returns a smartset.abstractsmartset, which is a list-like interface
2002 Returns a smartset.abstractsmartset, which is a list-like interface
2004 that contains integer revisions.
2003 that contains integer revisions.
2005 """
2004 """
2006 tree = revsetlang.spectree(expr, *args)
2005 tree = revsetlang.spectree(expr, *args)
2007 return revset.makematcher(tree)(self)
2006 return revset.makematcher(tree)(self)
2008
2007
2009 def set(self, expr: bytes, *args):
2008 def set(self, expr: bytes, *args):
2010 """Find revisions matching a revset and emit changectx instances.
2009 """Find revisions matching a revset and emit changectx instances.
2011
2010
2012 This is a convenience wrapper around ``revs()`` that iterates the
2011 This is a convenience wrapper around ``revs()`` that iterates the
2013 result and is a generator of changectx instances.
2012 result and is a generator of changectx instances.
2014
2013
2015 Revset aliases from the configuration are not expanded. To expand
2014 Revset aliases from the configuration are not expanded. To expand
2016 user aliases, consider calling ``scmutil.revrange()``.
2015 user aliases, consider calling ``scmutil.revrange()``.
2017 """
2016 """
2018 for r in self.revs(expr, *args):
2017 for r in self.revs(expr, *args):
2019 yield self[r]
2018 yield self[r]
2020
2019
2021 def anyrevs(self, specs: bytes, user=False, localalias=None):
2020 def anyrevs(self, specs: bytes, user=False, localalias=None):
2022 """Find revisions matching one of the given revsets.
2021 """Find revisions matching one of the given revsets.
2023
2022
2024 Revset aliases from the configuration are not expanded by default. To
2023 Revset aliases from the configuration are not expanded by default. To
2025 expand user aliases, specify ``user=True``. To provide some local
2024 expand user aliases, specify ``user=True``. To provide some local
2026 definitions overriding user aliases, set ``localalias`` to
2025 definitions overriding user aliases, set ``localalias`` to
2027 ``{name: definitionstring}``.
2026 ``{name: definitionstring}``.
2028 """
2027 """
2029 if specs == [b'null']:
2028 if specs == [b'null']:
2030 return revset.baseset([nullrev])
2029 return revset.baseset([nullrev])
2031 if specs == [b'.']:
2030 if specs == [b'.']:
2032 quick_data = self._quick_access_changeid.get(b'.')
2031 quick_data = self._quick_access_changeid.get(b'.')
2033 if quick_data is not None:
2032 if quick_data is not None:
2034 return revset.baseset([quick_data[0]])
2033 return revset.baseset([quick_data[0]])
2035 if user:
2034 if user:
2036 m = revset.matchany(
2035 m = revset.matchany(
2037 self.ui,
2036 self.ui,
2038 specs,
2037 specs,
2039 lookup=revset.lookupfn(self),
2038 lookup=revset.lookupfn(self),
2040 localalias=localalias,
2039 localalias=localalias,
2041 )
2040 )
2042 else:
2041 else:
2043 m = revset.matchany(None, specs, localalias=localalias)
2042 m = revset.matchany(None, specs, localalias=localalias)
2044 return m(self)
2043 return m(self)
2045
2044
2046 def url(self) -> bytes:
2045 def url(self) -> bytes:
2047 return b'file:' + self.root
2046 return b'file:' + self.root
2048
2047
2049 def hook(self, name, throw=False, **args):
2048 def hook(self, name, throw=False, **args):
2050 """Call a hook, passing this repo instance.
2049 """Call a hook, passing this repo instance.
2051
2050
2052 This a convenience method to aid invoking hooks. Extensions likely
2051 This a convenience method to aid invoking hooks. Extensions likely
2053 won't call this unless they have registered a custom hook or are
2052 won't call this unless they have registered a custom hook or are
2054 replacing code that is expected to call a hook.
2053 replacing code that is expected to call a hook.
2055 """
2054 """
2056 return hook.hook(self.ui, self, name, throw, **args)
2055 return hook.hook(self.ui, self, name, throw, **args)
2057
2056
2058 @filteredpropertycache
2057 @filteredpropertycache
2059 def _tagscache(self):
2058 def _tagscache(self):
2060 """Returns a tagscache object that contains various tags related
2059 """Returns a tagscache object that contains various tags related
2061 caches."""
2060 caches."""
2062
2061
2063 # This simplifies its cache management by having one decorated
2062 # This simplifies its cache management by having one decorated
2064 # function (this one) and the rest simply fetch things from it.
2063 # function (this one) and the rest simply fetch things from it.
2065 class tagscache:
2064 class tagscache:
2066 def __init__(self):
2065 def __init__(self):
2067 # These two define the set of tags for this repository. tags
2066 # These two define the set of tags for this repository. tags
2068 # maps tag name to node; tagtypes maps tag name to 'global' or
2067 # maps tag name to node; tagtypes maps tag name to 'global' or
2069 # 'local'. (Global tags are defined by .hgtags across all
2068 # 'local'. (Global tags are defined by .hgtags across all
2070 # heads, and local tags are defined in .hg/localtags.)
2069 # heads, and local tags are defined in .hg/localtags.)
2071 # They constitute the in-memory cache of tags.
2070 # They constitute the in-memory cache of tags.
2072 self.tags = self.tagtypes = None
2071 self.tags = self.tagtypes = None
2073
2072
2074 self.nodetagscache = self.tagslist = None
2073 self.nodetagscache = self.tagslist = None
2075
2074
2076 cache = tagscache()
2075 cache = tagscache()
2077 cache.tags, cache.tagtypes = self._findtags()
2076 cache.tags, cache.tagtypes = self._findtags()
2078
2077
2079 return cache
2078 return cache
2080
2079
2081 def tags(self):
2080 def tags(self):
2082 '''return a mapping of tag to node'''
2081 '''return a mapping of tag to node'''
2083 t = {}
2082 t = {}
2084 if self.changelog.filteredrevs:
2083 if self.changelog.filteredrevs:
2085 tags, tt = self._findtags()
2084 tags, tt = self._findtags()
2086 else:
2085 else:
2087 tags = self._tagscache.tags
2086 tags = self._tagscache.tags
2088 rev = self.changelog.rev
2087 rev = self.changelog.rev
2089 for k, v in tags.items():
2088 for k, v in tags.items():
2090 try:
2089 try:
2091 # ignore tags to unknown nodes
2090 # ignore tags to unknown nodes
2092 rev(v)
2091 rev(v)
2093 t[k] = v
2092 t[k] = v
2094 except (error.LookupError, ValueError):
2093 except (error.LookupError, ValueError):
2095 pass
2094 pass
2096 return t
2095 return t
2097
2096
2098 def _findtags(self):
2097 def _findtags(self):
2099 """Do the hard work of finding tags. Return a pair of dicts
2098 """Do the hard work of finding tags. Return a pair of dicts
2100 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2099 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2101 maps tag name to a string like \'global\' or \'local\'.
2100 maps tag name to a string like \'global\' or \'local\'.
2102 Subclasses or extensions are free to add their own tags, but
2101 Subclasses or extensions are free to add their own tags, but
2103 should be aware that the returned dicts will be retained for the
2102 should be aware that the returned dicts will be retained for the
2104 duration of the localrepo object."""
2103 duration of the localrepo object."""
2105
2104
2106 # XXX what tagtype should subclasses/extensions use? Currently
2105 # XXX what tagtype should subclasses/extensions use? Currently
2107 # mq and bookmarks add tags, but do not set the tagtype at all.
2106 # mq and bookmarks add tags, but do not set the tagtype at all.
2108 # Should each extension invent its own tag type? Should there
2107 # Should each extension invent its own tag type? Should there
2109 # be one tagtype for all such "virtual" tags? Or is the status
2108 # be one tagtype for all such "virtual" tags? Or is the status
2110 # quo fine?
2109 # quo fine?
2111
2110
2112 # map tag name to (node, hist)
2111 # map tag name to (node, hist)
2113 alltags = tagsmod.findglobaltags(self.ui, self)
2112 alltags = tagsmod.findglobaltags(self.ui, self)
2114 # map tag name to tag type
2113 # map tag name to tag type
2115 tagtypes = {tag: b'global' for tag in alltags}
2114 tagtypes = {tag: b'global' for tag in alltags}
2116
2115
2117 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2116 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2118
2117
2119 # Build the return dicts. Have to re-encode tag names because
2118 # Build the return dicts. Have to re-encode tag names because
2120 # the tags module always uses UTF-8 (in order not to lose info
2119 # the tags module always uses UTF-8 (in order not to lose info
2121 # writing to the cache), but the rest of Mercurial wants them in
2120 # writing to the cache), but the rest of Mercurial wants them in
2122 # local encoding.
2121 # local encoding.
2123 tags = {}
2122 tags = {}
2124 for name, (node, hist) in alltags.items():
2123 for name, (node, hist) in alltags.items():
2125 if node != self.nullid:
2124 if node != self.nullid:
2126 tags[encoding.tolocal(name)] = node
2125 tags[encoding.tolocal(name)] = node
2127 tags[b'tip'] = self.changelog.tip()
2126 tags[b'tip'] = self.changelog.tip()
2128 tagtypes = {
2127 tagtypes = {
2129 encoding.tolocal(name): value for (name, value) in tagtypes.items()
2128 encoding.tolocal(name): value for (name, value) in tagtypes.items()
2130 }
2129 }
2131 return (tags, tagtypes)
2130 return (tags, tagtypes)
2132
2131
2133 def tagtype(self, tagname):
2132 def tagtype(self, tagname):
2134 """
2133 """
2135 return the type of the given tag. result can be:
2134 return the type of the given tag. result can be:
2136
2135
2137 'local' : a local tag
2136 'local' : a local tag
2138 'global' : a global tag
2137 'global' : a global tag
2139 None : tag does not exist
2138 None : tag does not exist
2140 """
2139 """
2141
2140
2142 return self._tagscache.tagtypes.get(tagname)
2141 return self._tagscache.tagtypes.get(tagname)
2143
2142
2144 def tagslist(self):
2143 def tagslist(self):
2145 '''return a list of tags ordered by revision'''
2144 '''return a list of tags ordered by revision'''
2146 if not self._tagscache.tagslist:
2145 if not self._tagscache.tagslist:
2147 l = []
2146 l = []
2148 for t, n in self.tags().items():
2147 for t, n in self.tags().items():
2149 l.append((self.changelog.rev(n), t, n))
2148 l.append((self.changelog.rev(n), t, n))
2150 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2149 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2151
2150
2152 return self._tagscache.tagslist
2151 return self._tagscache.tagslist
2153
2152
2154 def nodetags(self, node):
2153 def nodetags(self, node):
2155 '''return the tags associated with a node'''
2154 '''return the tags associated with a node'''
2156 if not self._tagscache.nodetagscache:
2155 if not self._tagscache.nodetagscache:
2157 nodetagscache = {}
2156 nodetagscache = {}
2158 for t, n in self._tagscache.tags.items():
2157 for t, n in self._tagscache.tags.items():
2159 nodetagscache.setdefault(n, []).append(t)
2158 nodetagscache.setdefault(n, []).append(t)
2160 for tags in nodetagscache.values():
2159 for tags in nodetagscache.values():
2161 tags.sort()
2160 tags.sort()
2162 self._tagscache.nodetagscache = nodetagscache
2161 self._tagscache.nodetagscache = nodetagscache
2163 return self._tagscache.nodetagscache.get(node, [])
2162 return self._tagscache.nodetagscache.get(node, [])
2164
2163
2165 def nodebookmarks(self, node):
2164 def nodebookmarks(self, node):
2166 """return the list of bookmarks pointing to the specified node"""
2165 """return the list of bookmarks pointing to the specified node"""
2167 return self._bookmarks.names(node)
2166 return self._bookmarks.names(node)
2168
2167
2169 def branchmap(self):
2168 def branchmap(self):
2170 """returns a dictionary {branch: [branchheads]} with branchheads
2169 """returns a dictionary {branch: [branchheads]} with branchheads
2171 ordered by increasing revision number"""
2170 ordered by increasing revision number"""
2172 return self._branchcaches[self]
2171 return self._branchcaches[self]
2173
2172
2174 @unfilteredmethod
2173 @unfilteredmethod
2175 def revbranchcache(self):
2174 def revbranchcache(self):
2176 if not self._revbranchcache:
2175 if not self._revbranchcache:
2177 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2176 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2178 return self._revbranchcache
2177 return self._revbranchcache
2179
2178
2180 def register_changeset(self, rev, changelogrevision):
2179 def register_changeset(self, rev, changelogrevision):
2181 self.revbranchcache().setdata(rev, changelogrevision)
2180 self.revbranchcache().setdata(rev, changelogrevision)
2182
2181
2183 def branchtip(self, branch, ignoremissing=False):
2182 def branchtip(self, branch, ignoremissing=False):
2184 """return the tip node for a given branch
2183 """return the tip node for a given branch
2185
2184
2186 If ignoremissing is True, then this method will not raise an error.
2185 If ignoremissing is True, then this method will not raise an error.
2187 This is helpful for callers that only expect None for a missing branch
2186 This is helpful for callers that only expect None for a missing branch
2188 (e.g. namespace).
2187 (e.g. namespace).
2189
2188
2190 """
2189 """
2191 try:
2190 try:
2192 return self.branchmap().branchtip(branch)
2191 return self.branchmap().branchtip(branch)
2193 except KeyError:
2192 except KeyError:
2194 if not ignoremissing:
2193 if not ignoremissing:
2195 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2194 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2196 else:
2195 else:
2197 pass
2196 pass
2198
2197
2199 def lookup(self, key):
2198 def lookup(self, key):
2200 node = scmutil.revsymbol(self, key).node()
2199 node = scmutil.revsymbol(self, key).node()
2201 if node is None:
2200 if node is None:
2202 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2201 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2203 return node
2202 return node
2204
2203
2205 def lookupbranch(self, key):
2204 def lookupbranch(self, key):
2206 if self.branchmap().hasbranch(key):
2205 if self.branchmap().hasbranch(key):
2207 return key
2206 return key
2208
2207
2209 return scmutil.revsymbol(self, key).branch()
2208 return scmutil.revsymbol(self, key).branch()
2210
2209
2211 def known(self, nodes):
2210 def known(self, nodes):
2212 cl = self.changelog
2211 cl = self.changelog
2213 get_rev = cl.index.get_rev
2212 get_rev = cl.index.get_rev
2214 filtered = cl.filteredrevs
2213 filtered = cl.filteredrevs
2215 result = []
2214 result = []
2216 for n in nodes:
2215 for n in nodes:
2217 r = get_rev(n)
2216 r = get_rev(n)
2218 resp = not (r is None or r in filtered)
2217 resp = not (r is None or r in filtered)
2219 result.append(resp)
2218 result.append(resp)
2220 return result
2219 return result
2221
2220
2222 def local(self):
2221 def local(self):
2223 return self
2222 return self
2224
2223
2225 def publishing(self):
2224 def publishing(self):
2226 # it's safe (and desirable) to trust the publish flag unconditionally
2225 # it's safe (and desirable) to trust the publish flag unconditionally
2227 # so that we don't finalize changes shared between users via ssh or nfs
2226 # so that we don't finalize changes shared between users via ssh or nfs
2228 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2227 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2229
2228
2230 def cancopy(self):
2229 def cancopy(self):
2231 # so statichttprepo's override of local() works
2230 # so statichttprepo's override of local() works
2232 if not self.local():
2231 if not self.local():
2233 return False
2232 return False
2234 if not self.publishing():
2233 if not self.publishing():
2235 return True
2234 return True
2236 # if publishing we can't copy if there is filtered content
2235 # if publishing we can't copy if there is filtered content
2237 return not self.filtered(b'visible').changelog.filteredrevs
2236 return not self.filtered(b'visible').changelog.filteredrevs
2238
2237
2239 def shared(self):
2238 def shared(self):
2240 '''the type of shared repository (None if not shared)'''
2239 '''the type of shared repository (None if not shared)'''
2241 if self.sharedpath != self.path:
2240 if self.sharedpath != self.path:
2242 return b'store'
2241 return b'store'
2243 return None
2242 return None
2244
2243
2245 def wjoin(self, f: bytes, *insidef: bytes) -> bytes:
2244 def wjoin(self, f: bytes, *insidef: bytes) -> bytes:
2246 return self.vfs.reljoin(self.root, f, *insidef)
2245 return self.vfs.reljoin(self.root, f, *insidef)
2247
2246
2248 def setparents(self, p1, p2=None):
2247 def setparents(self, p1, p2=None):
2249 if p2 is None:
2248 if p2 is None:
2250 p2 = self.nullid
2249 p2 = self.nullid
2251 self[None].setparents(p1, p2)
2250 self[None].setparents(p1, p2)
2252 self._quick_access_changeid_invalidate()
2251 self._quick_access_changeid_invalidate()
2253
2252
2254 def filectx(self, path: bytes, changeid=None, fileid=None, changectx=None):
2253 def filectx(self, path: bytes, changeid=None, fileid=None, changectx=None):
2255 """changeid must be a changeset revision, if specified.
2254 """changeid must be a changeset revision, if specified.
2256 fileid can be a file revision or node."""
2255 fileid can be a file revision or node."""
2257 return context.filectx(
2256 return context.filectx(
2258 self, path, changeid, fileid, changectx=changectx
2257 self, path, changeid, fileid, changectx=changectx
2259 )
2258 )
2260
2259
2261 def getcwd(self) -> bytes:
2260 def getcwd(self) -> bytes:
2262 return self.dirstate.getcwd()
2261 return self.dirstate.getcwd()
2263
2262
2264 def pathto(self, f: bytes, cwd: Optional[bytes] = None) -> bytes:
2263 def pathto(self, f: bytes, cwd: Optional[bytes] = None) -> bytes:
2265 return self.dirstate.pathto(f, cwd)
2264 return self.dirstate.pathto(f, cwd)
2266
2265
2267 def _loadfilter(self, filter):
2266 def _loadfilter(self, filter):
2268 if filter not in self._filterpats:
2267 if filter not in self._filterpats:
2269 l = []
2268 l = []
2270 for pat, cmd in self.ui.configitems(filter):
2269 for pat, cmd in self.ui.configitems(filter):
2271 if cmd == b'!':
2270 if cmd == b'!':
2272 continue
2271 continue
2273 mf = matchmod.match(self.root, b'', [pat])
2272 mf = matchmod.match(self.root, b'', [pat])
2274 fn = None
2273 fn = None
2275 params = cmd
2274 params = cmd
2276 for name, filterfn in self._datafilters.items():
2275 for name, filterfn in self._datafilters.items():
2277 if cmd.startswith(name):
2276 if cmd.startswith(name):
2278 fn = filterfn
2277 fn = filterfn
2279 params = cmd[len(name) :].lstrip()
2278 params = cmd[len(name) :].lstrip()
2280 break
2279 break
2281 if not fn:
2280 if not fn:
2282 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2281 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2283 fn.__name__ = 'commandfilter'
2282 fn.__name__ = 'commandfilter'
2284 # Wrap old filters not supporting keyword arguments
2283 # Wrap old filters not supporting keyword arguments
2285 if not pycompat.getargspec(fn)[2]:
2284 if not pycompat.getargspec(fn)[2]:
2286 oldfn = fn
2285 oldfn = fn
2287 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2286 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2288 fn.__name__ = 'compat-' + oldfn.__name__
2287 fn.__name__ = 'compat-' + oldfn.__name__
2289 l.append((mf, fn, params))
2288 l.append((mf, fn, params))
2290 self._filterpats[filter] = l
2289 self._filterpats[filter] = l
2291 return self._filterpats[filter]
2290 return self._filterpats[filter]
2292
2291
2293 def _filter(self, filterpats, filename, data):
2292 def _filter(self, filterpats, filename, data):
2294 for mf, fn, cmd in filterpats:
2293 for mf, fn, cmd in filterpats:
2295 if mf(filename):
2294 if mf(filename):
2296 self.ui.debug(
2295 self.ui.debug(
2297 b"filtering %s through %s\n"
2296 b"filtering %s through %s\n"
2298 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2297 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2299 )
2298 )
2300 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2299 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2301 break
2300 break
2302
2301
2303 return data
2302 return data
2304
2303
2305 @unfilteredpropertycache
2304 @unfilteredpropertycache
2306 def _encodefilterpats(self):
2305 def _encodefilterpats(self):
2307 return self._loadfilter(b'encode')
2306 return self._loadfilter(b'encode')
2308
2307
2309 @unfilteredpropertycache
2308 @unfilteredpropertycache
2310 def _decodefilterpats(self):
2309 def _decodefilterpats(self):
2311 return self._loadfilter(b'decode')
2310 return self._loadfilter(b'decode')
2312
2311
2313 def adddatafilter(self, name, filter):
2312 def adddatafilter(self, name, filter):
2314 self._datafilters[name] = filter
2313 self._datafilters[name] = filter
2315
2314
2316 def wread(self, filename: bytes) -> bytes:
2315 def wread(self, filename: bytes) -> bytes:
2317 if self.wvfs.islink(filename):
2316 if self.wvfs.islink(filename):
2318 data = self.wvfs.readlink(filename)
2317 data = self.wvfs.readlink(filename)
2319 else:
2318 else:
2320 data = self.wvfs.read(filename)
2319 data = self.wvfs.read(filename)
2321 return self._filter(self._encodefilterpats, filename, data)
2320 return self._filter(self._encodefilterpats, filename, data)
2322
2321
2323 def wwrite(
2322 def wwrite(
2324 self,
2323 self,
2325 filename: bytes,
2324 filename: bytes,
2326 data: bytes,
2325 data: bytes,
2327 flags: bytes,
2326 flags: bytes,
2328 backgroundclose=False,
2327 backgroundclose=False,
2329 **kwargs
2328 **kwargs
2330 ) -> int:
2329 ) -> int:
2331 """write ``data`` into ``filename`` in the working directory
2330 """write ``data`` into ``filename`` in the working directory
2332
2331
2333 This returns length of written (maybe decoded) data.
2332 This returns length of written (maybe decoded) data.
2334 """
2333 """
2335 data = self._filter(self._decodefilterpats, filename, data)
2334 data = self._filter(self._decodefilterpats, filename, data)
2336 if b'l' in flags:
2335 if b'l' in flags:
2337 self.wvfs.symlink(data, filename)
2336 self.wvfs.symlink(data, filename)
2338 else:
2337 else:
2339 self.wvfs.write(
2338 self.wvfs.write(
2340 filename, data, backgroundclose=backgroundclose, **kwargs
2339 filename, data, backgroundclose=backgroundclose, **kwargs
2341 )
2340 )
2342 if b'x' in flags:
2341 if b'x' in flags:
2343 self.wvfs.setflags(filename, False, True)
2342 self.wvfs.setflags(filename, False, True)
2344 else:
2343 else:
2345 self.wvfs.setflags(filename, False, False)
2344 self.wvfs.setflags(filename, False, False)
2346 return len(data)
2345 return len(data)
2347
2346
2348 def wwritedata(self, filename: bytes, data: bytes) -> bytes:
2347 def wwritedata(self, filename: bytes, data: bytes) -> bytes:
2349 return self._filter(self._decodefilterpats, filename, data)
2348 return self._filter(self._decodefilterpats, filename, data)
2350
2349
2351 def currenttransaction(self):
2350 def currenttransaction(self):
2352 """return the current transaction or None if non exists"""
2351 """return the current transaction or None if non exists"""
2353 if self._transref:
2352 if self._transref:
2354 tr = self._transref()
2353 tr = self._transref()
2355 else:
2354 else:
2356 tr = None
2355 tr = None
2357
2356
2358 if tr and tr.running():
2357 if tr and tr.running():
2359 return tr
2358 return tr
2360 return None
2359 return None
2361
2360
2362 def transaction(self, desc, report=None):
2361 def transaction(self, desc, report=None):
2363 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2362 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2364 b'devel', b'check-locks'
2363 b'devel', b'check-locks'
2365 ):
2364 ):
2366 if self._currentlock(self._lockref) is None:
2365 if self._currentlock(self._lockref) is None:
2367 raise error.ProgrammingError(b'transaction requires locking')
2366 raise error.ProgrammingError(b'transaction requires locking')
2368 tr = self.currenttransaction()
2367 tr = self.currenttransaction()
2369 if tr is not None:
2368 if tr is not None:
2370 return tr.nest(name=desc)
2369 return tr.nest(name=desc)
2371
2370
2372 # abort here if the journal already exists
2371 # abort here if the journal already exists
2373 if self.svfs.exists(b"journal"):
2372 if self.svfs.exists(b"journal"):
2374 raise error.RepoError(
2373 raise error.RepoError(
2375 _(b"abandoned transaction found"),
2374 _(b"abandoned transaction found"),
2376 hint=_(b"run 'hg recover' to clean up transaction"),
2375 hint=_(b"run 'hg recover' to clean up transaction"),
2377 )
2376 )
2378
2377
2379 idbase = b"%.40f#%f" % (random.random(), time.time())
2378 idbase = b"%.40f#%f" % (random.random(), time.time())
2380 ha = hex(hashutil.sha1(idbase).digest())
2379 ha = hex(hashutil.sha1(idbase).digest())
2381 txnid = b'TXN:' + ha
2380 txnid = b'TXN:' + ha
2382 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2381 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2383
2382
2384 self._writejournal(desc)
2383 self._writejournal(desc)
2385 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2384 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2386 if report:
2385 if report:
2387 rp = report
2386 rp = report
2388 else:
2387 else:
2389 rp = self.ui.warn
2388 rp = self.ui.warn
2390 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2389 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2391 # we must avoid cyclic reference between repo and transaction.
2390 # we must avoid cyclic reference between repo and transaction.
2392 reporef = weakref.ref(self)
2391 reporef = weakref.ref(self)
2393 # Code to track tag movement
2392 # Code to track tag movement
2394 #
2393 #
2395 # Since tags are all handled as file content, it is actually quite hard
2394 # Since tags are all handled as file content, it is actually quite hard
2396 # to track these movement from a code perspective. So we fallback to a
2395 # to track these movement from a code perspective. So we fallback to a
2397 # tracking at the repository level. One could envision to track changes
2396 # tracking at the repository level. One could envision to track changes
2398 # to the '.hgtags' file through changegroup apply but that fails to
2397 # to the '.hgtags' file through changegroup apply but that fails to
2399 # cope with case where transaction expose new heads without changegroup
2398 # cope with case where transaction expose new heads without changegroup
2400 # being involved (eg: phase movement).
2399 # being involved (eg: phase movement).
2401 #
2400 #
2402 # For now, We gate the feature behind a flag since this likely comes
2401 # For now, We gate the feature behind a flag since this likely comes
2403 # with performance impacts. The current code run more often than needed
2402 # with performance impacts. The current code run more often than needed
2404 # and do not use caches as much as it could. The current focus is on
2403 # and do not use caches as much as it could. The current focus is on
2405 # the behavior of the feature so we disable it by default. The flag
2404 # the behavior of the feature so we disable it by default. The flag
2406 # will be removed when we are happy with the performance impact.
2405 # will be removed when we are happy with the performance impact.
2407 #
2406 #
2408 # Once this feature is no longer experimental move the following
2407 # Once this feature is no longer experimental move the following
2409 # documentation to the appropriate help section:
2408 # documentation to the appropriate help section:
2410 #
2409 #
2411 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2410 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2412 # tags (new or changed or deleted tags). In addition the details of
2411 # tags (new or changed or deleted tags). In addition the details of
2413 # these changes are made available in a file at:
2412 # these changes are made available in a file at:
2414 # ``REPOROOT/.hg/changes/tags.changes``.
2413 # ``REPOROOT/.hg/changes/tags.changes``.
2415 # Make sure you check for HG_TAG_MOVED before reading that file as it
2414 # Make sure you check for HG_TAG_MOVED before reading that file as it
2416 # might exist from a previous transaction even if no tag were touched
2415 # might exist from a previous transaction even if no tag were touched
2417 # in this one. Changes are recorded in a line base format::
2416 # in this one. Changes are recorded in a line base format::
2418 #
2417 #
2419 # <action> <hex-node> <tag-name>\n
2418 # <action> <hex-node> <tag-name>\n
2420 #
2419 #
2421 # Actions are defined as follow:
2420 # Actions are defined as follow:
2422 # "-R": tag is removed,
2421 # "-R": tag is removed,
2423 # "+A": tag is added,
2422 # "+A": tag is added,
2424 # "-M": tag is moved (old value),
2423 # "-M": tag is moved (old value),
2425 # "+M": tag is moved (new value),
2424 # "+M": tag is moved (new value),
2426 tracktags = lambda x: None
2425 tracktags = lambda x: None
2427 # experimental config: experimental.hook-track-tags
2426 # experimental config: experimental.hook-track-tags
2428 shouldtracktags = self.ui.configbool(
2427 shouldtracktags = self.ui.configbool(
2429 b'experimental', b'hook-track-tags'
2428 b'experimental', b'hook-track-tags'
2430 )
2429 )
2431 if desc != b'strip' and shouldtracktags:
2430 if desc != b'strip' and shouldtracktags:
2432 oldheads = self.changelog.headrevs()
2431 oldheads = self.changelog.headrevs()
2433
2432
2434 def tracktags(tr2):
2433 def tracktags(tr2):
2435 repo = reporef()
2434 repo = reporef()
2436 assert repo is not None # help pytype
2435 assert repo is not None # help pytype
2437 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2436 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2438 newheads = repo.changelog.headrevs()
2437 newheads = repo.changelog.headrevs()
2439 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2438 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2440 # notes: we compare lists here.
2439 # notes: we compare lists here.
2441 # As we do it only once buiding set would not be cheaper
2440 # As we do it only once buiding set would not be cheaper
2442 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2441 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2443 if changes:
2442 if changes:
2444 tr2.hookargs[b'tag_moved'] = b'1'
2443 tr2.hookargs[b'tag_moved'] = b'1'
2445 with repo.vfs(
2444 with repo.vfs(
2446 b'changes/tags.changes', b'w', atomictemp=True
2445 b'changes/tags.changes', b'w', atomictemp=True
2447 ) as changesfile:
2446 ) as changesfile:
2448 # note: we do not register the file to the transaction
2447 # note: we do not register the file to the transaction
2449 # because we needs it to still exist on the transaction
2448 # because we needs it to still exist on the transaction
2450 # is close (for txnclose hooks)
2449 # is close (for txnclose hooks)
2451 tagsmod.writediff(changesfile, changes)
2450 tagsmod.writediff(changesfile, changes)
2452
2451
2453 def validate(tr2):
2452 def validate(tr2):
2454 """will run pre-closing hooks"""
2453 """will run pre-closing hooks"""
2455 # XXX the transaction API is a bit lacking here so we take a hacky
2454 # XXX the transaction API is a bit lacking here so we take a hacky
2456 # path for now
2455 # path for now
2457 #
2456 #
2458 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2457 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2459 # dict is copied before these run. In addition we needs the data
2458 # dict is copied before these run. In addition we needs the data
2460 # available to in memory hooks too.
2459 # available to in memory hooks too.
2461 #
2460 #
2462 # Moreover, we also need to make sure this runs before txnclose
2461 # Moreover, we also need to make sure this runs before txnclose
2463 # hooks and there is no "pending" mechanism that would execute
2462 # hooks and there is no "pending" mechanism that would execute
2464 # logic only if hooks are about to run.
2463 # logic only if hooks are about to run.
2465 #
2464 #
2466 # Fixing this limitation of the transaction is also needed to track
2465 # Fixing this limitation of the transaction is also needed to track
2467 # other families of changes (bookmarks, phases, obsolescence).
2466 # other families of changes (bookmarks, phases, obsolescence).
2468 #
2467 #
2469 # This will have to be fixed before we remove the experimental
2468 # This will have to be fixed before we remove the experimental
2470 # gating.
2469 # gating.
2471 tracktags(tr2)
2470 tracktags(tr2)
2472 repo = reporef()
2471 repo = reporef()
2473 assert repo is not None # help pytype
2472 assert repo is not None # help pytype
2474
2473
2475 singleheadopt = (b'experimental', b'single-head-per-branch')
2474 singleheadopt = (b'experimental', b'single-head-per-branch')
2476 singlehead = repo.ui.configbool(*singleheadopt)
2475 singlehead = repo.ui.configbool(*singleheadopt)
2477 if singlehead:
2476 if singlehead:
2478 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2477 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2479 accountclosed = singleheadsub.get(
2478 accountclosed = singleheadsub.get(
2480 b"account-closed-heads", False
2479 b"account-closed-heads", False
2481 )
2480 )
2482 if singleheadsub.get(b"public-changes-only", False):
2481 if singleheadsub.get(b"public-changes-only", False):
2483 filtername = b"immutable"
2482 filtername = b"immutable"
2484 else:
2483 else:
2485 filtername = b"visible"
2484 filtername = b"visible"
2486 scmutil.enforcesinglehead(
2485 scmutil.enforcesinglehead(
2487 repo, tr2, desc, accountclosed, filtername
2486 repo, tr2, desc, accountclosed, filtername
2488 )
2487 )
2489 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2488 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2490 for name, (old, new) in sorted(
2489 for name, (old, new) in sorted(
2491 tr.changes[b'bookmarks'].items()
2490 tr.changes[b'bookmarks'].items()
2492 ):
2491 ):
2493 args = tr.hookargs.copy()
2492 args = tr.hookargs.copy()
2494 args.update(bookmarks.preparehookargs(name, old, new))
2493 args.update(bookmarks.preparehookargs(name, old, new))
2495 repo.hook(
2494 repo.hook(
2496 b'pretxnclose-bookmark',
2495 b'pretxnclose-bookmark',
2497 throw=True,
2496 throw=True,
2498 **pycompat.strkwargs(args)
2497 **pycompat.strkwargs(args)
2499 )
2498 )
2500 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2499 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2501 cl = repo.unfiltered().changelog
2500 cl = repo.unfiltered().changelog
2502 for revs, (old, new) in tr.changes[b'phases']:
2501 for revs, (old, new) in tr.changes[b'phases']:
2503 for rev in revs:
2502 for rev in revs:
2504 args = tr.hookargs.copy()
2503 args = tr.hookargs.copy()
2505 node = hex(cl.node(rev))
2504 node = hex(cl.node(rev))
2506 args.update(phases.preparehookargs(node, old, new))
2505 args.update(phases.preparehookargs(node, old, new))
2507 repo.hook(
2506 repo.hook(
2508 b'pretxnclose-phase',
2507 b'pretxnclose-phase',
2509 throw=True,
2508 throw=True,
2510 **pycompat.strkwargs(args)
2509 **pycompat.strkwargs(args)
2511 )
2510 )
2512
2511
2513 repo.hook(
2512 repo.hook(
2514 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2513 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2515 )
2514 )
2516
2515
2517 def releasefn(tr, success):
2516 def releasefn(tr, success):
2518 repo = reporef()
2517 repo = reporef()
2519 if repo is None:
2518 if repo is None:
2520 # If the repo has been GC'd (and this release function is being
2519 # If the repo has been GC'd (and this release function is being
2521 # called from transaction.__del__), there's not much we can do,
2520 # called from transaction.__del__), there's not much we can do,
2522 # so just leave the unfinished transaction there and let the
2521 # so just leave the unfinished transaction there and let the
2523 # user run `hg recover`.
2522 # user run `hg recover`.
2524 return
2523 return
2525 if success:
2524 if success:
2526 # this should be explicitly invoked here, because
2525 # this should be explicitly invoked here, because
2527 # in-memory changes aren't written out at closing
2526 # in-memory changes aren't written out at closing
2528 # transaction, if tr.addfilegenerator (via
2527 # transaction, if tr.addfilegenerator (via
2529 # dirstate.write or so) isn't invoked while
2528 # dirstate.write or so) isn't invoked while
2530 # transaction running
2529 # transaction running
2531 repo.dirstate.write(None)
2530 repo.dirstate.write(None)
2532 else:
2531 else:
2533 # discard all changes (including ones already written
2532 # discard all changes (including ones already written
2534 # out) in this transaction
2533 # out) in this transaction
2535 narrowspec.restorebackup(self, b'journal.narrowspec')
2534 narrowspec.restorebackup(self, b'journal.narrowspec')
2536 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2535 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2537 if repo.currentwlock() is not None:
2536 if repo.currentwlock() is not None:
2538 repo.dirstate.restorebackup(None, b'journal.dirstate')
2537 repo.dirstate.restorebackup(None, b'journal.dirstate')
2539
2538
2540 repo.invalidate(clearfilecache=True)
2539 repo.invalidate(clearfilecache=True)
2541
2540
2542 tr = transaction.transaction(
2541 tr = transaction.transaction(
2543 rp,
2542 rp,
2544 self.svfs,
2543 self.svfs,
2545 vfsmap,
2544 vfsmap,
2546 b"journal",
2545 b"journal",
2547 b"undo",
2546 b"undo",
2548 aftertrans(renames),
2547 aftertrans(renames),
2549 self.store.createmode,
2548 self.store.createmode,
2550 validator=validate,
2549 validator=validate,
2551 releasefn=releasefn,
2550 releasefn=releasefn,
2552 checkambigfiles=_cachedfiles,
2551 checkambigfiles=_cachedfiles,
2553 name=desc,
2552 name=desc,
2554 )
2553 )
2555 tr.changes[b'origrepolen'] = len(self)
2554 tr.changes[b'origrepolen'] = len(self)
2556 tr.changes[b'obsmarkers'] = set()
2555 tr.changes[b'obsmarkers'] = set()
2557 tr.changes[b'phases'] = []
2556 tr.changes[b'phases'] = []
2558 tr.changes[b'bookmarks'] = {}
2557 tr.changes[b'bookmarks'] = {}
2559
2558
2560 tr.hookargs[b'txnid'] = txnid
2559 tr.hookargs[b'txnid'] = txnid
2561 tr.hookargs[b'txnname'] = desc
2560 tr.hookargs[b'txnname'] = desc
2562 tr.hookargs[b'changes'] = tr.changes
2561 tr.hookargs[b'changes'] = tr.changes
2563 # note: writing the fncache only during finalize mean that the file is
2562 # note: writing the fncache only during finalize mean that the file is
2564 # outdated when running hooks. As fncache is used for streaming clone,
2563 # outdated when running hooks. As fncache is used for streaming clone,
2565 # this is not expected to break anything that happen during the hooks.
2564 # this is not expected to break anything that happen during the hooks.
2566 tr.addfinalize(b'flush-fncache', self.store.write)
2565 tr.addfinalize(b'flush-fncache', self.store.write)
2567
2566
2568 def txnclosehook(tr2):
2567 def txnclosehook(tr2):
2569 """To be run if transaction is successful, will schedule a hook run"""
2568 """To be run if transaction is successful, will schedule a hook run"""
2570 # Don't reference tr2 in hook() so we don't hold a reference.
2569 # Don't reference tr2 in hook() so we don't hold a reference.
2571 # This reduces memory consumption when there are multiple
2570 # This reduces memory consumption when there are multiple
2572 # transactions per lock. This can likely go away if issue5045
2571 # transactions per lock. This can likely go away if issue5045
2573 # fixes the function accumulation.
2572 # fixes the function accumulation.
2574 hookargs = tr2.hookargs
2573 hookargs = tr2.hookargs
2575
2574
2576 def hookfunc(unused_success):
2575 def hookfunc(unused_success):
2577 repo = reporef()
2576 repo = reporef()
2578 assert repo is not None # help pytype
2577 assert repo is not None # help pytype
2579
2578
2580 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2579 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2581 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2580 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2582 for name, (old, new) in bmchanges:
2581 for name, (old, new) in bmchanges:
2583 args = tr.hookargs.copy()
2582 args = tr.hookargs.copy()
2584 args.update(bookmarks.preparehookargs(name, old, new))
2583 args.update(bookmarks.preparehookargs(name, old, new))
2585 repo.hook(
2584 repo.hook(
2586 b'txnclose-bookmark',
2585 b'txnclose-bookmark',
2587 throw=False,
2586 throw=False,
2588 **pycompat.strkwargs(args)
2587 **pycompat.strkwargs(args)
2589 )
2588 )
2590
2589
2591 if hook.hashook(repo.ui, b'txnclose-phase'):
2590 if hook.hashook(repo.ui, b'txnclose-phase'):
2592 cl = repo.unfiltered().changelog
2591 cl = repo.unfiltered().changelog
2593 phasemv = sorted(
2592 phasemv = sorted(
2594 tr.changes[b'phases'], key=lambda r: r[0][0]
2593 tr.changes[b'phases'], key=lambda r: r[0][0]
2595 )
2594 )
2596 for revs, (old, new) in phasemv:
2595 for revs, (old, new) in phasemv:
2597 for rev in revs:
2596 for rev in revs:
2598 args = tr.hookargs.copy()
2597 args = tr.hookargs.copy()
2599 node = hex(cl.node(rev))
2598 node = hex(cl.node(rev))
2600 args.update(phases.preparehookargs(node, old, new))
2599 args.update(phases.preparehookargs(node, old, new))
2601 repo.hook(
2600 repo.hook(
2602 b'txnclose-phase',
2601 b'txnclose-phase',
2603 throw=False,
2602 throw=False,
2604 **pycompat.strkwargs(args)
2603 **pycompat.strkwargs(args)
2605 )
2604 )
2606
2605
2607 repo.hook(
2606 repo.hook(
2608 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2607 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2609 )
2608 )
2610
2609
2611 repo = reporef()
2610 repo = reporef()
2612 assert repo is not None # help pytype
2611 assert repo is not None # help pytype
2613 repo._afterlock(hookfunc)
2612 repo._afterlock(hookfunc)
2614
2613
2615 tr.addfinalize(b'txnclose-hook', txnclosehook)
2614 tr.addfinalize(b'txnclose-hook', txnclosehook)
2616 # Include a leading "-" to make it happen before the transaction summary
2615 # Include a leading "-" to make it happen before the transaction summary
2617 # reports registered via scmutil.registersummarycallback() whose names
2616 # reports registered via scmutil.registersummarycallback() whose names
2618 # are 00-txnreport etc. That way, the caches will be warm when the
2617 # are 00-txnreport etc. That way, the caches will be warm when the
2619 # callbacks run.
2618 # callbacks run.
2620 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2619 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2621
2620
2622 def txnaborthook(tr2):
2621 def txnaborthook(tr2):
2623 """To be run if transaction is aborted"""
2622 """To be run if transaction is aborted"""
2624 repo = reporef()
2623 repo = reporef()
2625 assert repo is not None # help pytype
2624 assert repo is not None # help pytype
2626 repo.hook(
2625 repo.hook(
2627 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2626 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2628 )
2627 )
2629
2628
2630 tr.addabort(b'txnabort-hook', txnaborthook)
2629 tr.addabort(b'txnabort-hook', txnaborthook)
2631 # avoid eager cache invalidation. in-memory data should be identical
2630 # avoid eager cache invalidation. in-memory data should be identical
2632 # to stored data if transaction has no error.
2631 # to stored data if transaction has no error.
2633 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2632 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2634 self._transref = weakref.ref(tr)
2633 self._transref = weakref.ref(tr)
2635 scmutil.registersummarycallback(self, tr, desc)
2634 scmutil.registersummarycallback(self, tr, desc)
2636 return tr
2635 return tr
2637
2636
2638 def _journalfiles(self):
2637 def _journalfiles(self):
2639 first = (
2638 first = (
2640 (self.svfs, b'journal'),
2639 (self.svfs, b'journal'),
2641 (self.svfs, b'journal.narrowspec'),
2640 (self.svfs, b'journal.narrowspec'),
2642 (self.vfs, b'journal.narrowspec.dirstate'),
2641 (self.vfs, b'journal.narrowspec.dirstate'),
2643 (self.vfs, b'journal.dirstate'),
2642 (self.vfs, b'journal.dirstate'),
2644 )
2643 )
2645 middle = []
2644 middle = []
2646 dirstate_data = self.dirstate.data_backup_filename(b'journal.dirstate')
2645 dirstate_data = self.dirstate.data_backup_filename(b'journal.dirstate')
2647 if dirstate_data is not None:
2646 if dirstate_data is not None:
2648 middle.append((self.vfs, dirstate_data))
2647 middle.append((self.vfs, dirstate_data))
2649 end = (
2648 end = (
2650 (self.vfs, b'journal.branch'),
2649 (self.vfs, b'journal.branch'),
2651 (self.vfs, b'journal.desc'),
2650 (self.vfs, b'journal.desc'),
2652 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2651 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2653 (self.svfs, b'journal.phaseroots'),
2652 (self.svfs, b'journal.phaseroots'),
2654 )
2653 )
2655 return first + tuple(middle) + end
2654 return first + tuple(middle) + end
2656
2655
2657 def undofiles(self):
2656 def undofiles(self):
2658 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2657 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2659
2658
2660 @unfilteredmethod
2659 @unfilteredmethod
2661 def _writejournal(self, desc):
2660 def _writejournal(self, desc):
2662 if self.currentwlock() is not None:
2661 if self.currentwlock() is not None:
2663 self.dirstate.savebackup(None, b'journal.dirstate')
2662 self.dirstate.savebackup(None, b'journal.dirstate')
2664 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2663 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2665 narrowspec.savebackup(self, b'journal.narrowspec')
2664 narrowspec.savebackup(self, b'journal.narrowspec')
2666 self.vfs.write(
2665 self.vfs.write(
2667 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2666 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2668 )
2667 )
2669 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2668 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2670 bookmarksvfs = bookmarks.bookmarksvfs(self)
2669 bookmarksvfs = bookmarks.bookmarksvfs(self)
2671 bookmarksvfs.write(
2670 bookmarksvfs.write(
2672 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2671 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2673 )
2672 )
2674 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2673 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2675
2674
2676 def recover(self):
2675 def recover(self):
2677 with self.lock():
2676 with self.lock():
2678 if self.svfs.exists(b"journal"):
2677 if self.svfs.exists(b"journal"):
2679 self.ui.status(_(b"rolling back interrupted transaction\n"))
2678 self.ui.status(_(b"rolling back interrupted transaction\n"))
2680 vfsmap = {
2679 vfsmap = {
2681 b'': self.svfs,
2680 b'': self.svfs,
2682 b'plain': self.vfs,
2681 b'plain': self.vfs,
2683 }
2682 }
2684 transaction.rollback(
2683 transaction.rollback(
2685 self.svfs,
2684 self.svfs,
2686 vfsmap,
2685 vfsmap,
2687 b"journal",
2686 b"journal",
2688 self.ui.warn,
2687 self.ui.warn,
2689 checkambigfiles=_cachedfiles,
2688 checkambigfiles=_cachedfiles,
2690 )
2689 )
2691 self.invalidate()
2690 self.invalidate()
2692 return True
2691 return True
2693 else:
2692 else:
2694 self.ui.warn(_(b"no interrupted transaction available\n"))
2693 self.ui.warn(_(b"no interrupted transaction available\n"))
2695 return False
2694 return False
2696
2695
2697 def rollback(self, dryrun=False, force=False):
2696 def rollback(self, dryrun=False, force=False):
2698 wlock = lock = dsguard = None
2697 wlock = lock = None
2699 try:
2698 try:
2700 wlock = self.wlock()
2699 wlock = self.wlock()
2701 lock = self.lock()
2700 lock = self.lock()
2702 if self.svfs.exists(b"undo"):
2701 if self.svfs.exists(b"undo"):
2703 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2702 return self._rollback(dryrun, force)
2704
2705 return self._rollback(dryrun, force, dsguard)
2706 else:
2703 else:
2707 self.ui.warn(_(b"no rollback information available\n"))
2704 self.ui.warn(_(b"no rollback information available\n"))
2708 return 1
2705 return 1
2709 finally:
2706 finally:
2710 release(dsguard, lock, wlock)
2707 release(lock, wlock)
2711
2708
2712 @unfilteredmethod # Until we get smarter cache management
2709 @unfilteredmethod # Until we get smarter cache management
2713 def _rollback(self, dryrun, force, dsguard):
2710 def _rollback(self, dryrun, force):
2714 ui = self.ui
2711 ui = self.ui
2715
2712
2716 parents = self.dirstate.parents()
2713 parents = self.dirstate.parents()
2717 try:
2714 try:
2718 args = self.vfs.read(b'undo.desc').splitlines()
2715 args = self.vfs.read(b'undo.desc').splitlines()
2719 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2716 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2720 if len(args) >= 3:
2717 if len(args) >= 3:
2721 detail = args[2]
2718 detail = args[2]
2722 oldtip = oldlen - 1
2719 oldtip = oldlen - 1
2723
2720
2724 if detail and ui.verbose:
2721 if detail and ui.verbose:
2725 msg = _(
2722 msg = _(
2726 b'repository tip rolled back to revision %d'
2723 b'repository tip rolled back to revision %d'
2727 b' (undo %s: %s)\n'
2724 b' (undo %s: %s)\n'
2728 ) % (oldtip, desc, detail)
2725 ) % (oldtip, desc, detail)
2729 else:
2726 else:
2730 msg = _(
2727 msg = _(
2731 b'repository tip rolled back to revision %d (undo %s)\n'
2728 b'repository tip rolled back to revision %d (undo %s)\n'
2732 ) % (oldtip, desc)
2729 ) % (oldtip, desc)
2733 parentgone = any(self[p].rev() > oldtip for p in parents)
2730 parentgone = any(self[p].rev() > oldtip for p in parents)
2734 except IOError:
2731 except IOError:
2735 msg = _(b'rolling back unknown transaction\n')
2732 msg = _(b'rolling back unknown transaction\n')
2736 desc = None
2733 desc = None
2737 parentgone = True
2734 parentgone = True
2738
2735
2739 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2736 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2740 raise error.Abort(
2737 raise error.Abort(
2741 _(
2738 _(
2742 b'rollback of last commit while not checked out '
2739 b'rollback of last commit while not checked out '
2743 b'may lose data'
2740 b'may lose data'
2744 ),
2741 ),
2745 hint=_(b'use -f to force'),
2742 hint=_(b'use -f to force'),
2746 )
2743 )
2747
2744
2748 ui.status(msg)
2745 ui.status(msg)
2749 if dryrun:
2746 if dryrun:
2750 return 0
2747 return 0
2751
2748
2752 self.destroying()
2749 self.destroying()
2753 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2750 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2754 skip_journal_pattern = None
2751 skip_journal_pattern = None
2755 if not parentgone:
2752 if not parentgone:
2756 skip_journal_pattern = RE_SKIP_DIRSTATE_ROLLBACK
2753 skip_journal_pattern = RE_SKIP_DIRSTATE_ROLLBACK
2757 transaction.rollback(
2754 transaction.rollback(
2758 self.svfs,
2755 self.svfs,
2759 vfsmap,
2756 vfsmap,
2760 b'undo',
2757 b'undo',
2761 ui.warn,
2758 ui.warn,
2762 checkambigfiles=_cachedfiles,
2759 checkambigfiles=_cachedfiles,
2763 skip_journal_pattern=skip_journal_pattern,
2760 skip_journal_pattern=skip_journal_pattern,
2764 )
2761 )
2765 bookmarksvfs = bookmarks.bookmarksvfs(self)
2762 bookmarksvfs = bookmarks.bookmarksvfs(self)
2766 if bookmarksvfs.exists(b'undo.bookmarks'):
2763 if bookmarksvfs.exists(b'undo.bookmarks'):
2767 bookmarksvfs.rename(
2764 bookmarksvfs.rename(
2768 b'undo.bookmarks', b'bookmarks', checkambig=True
2765 b'undo.bookmarks', b'bookmarks', checkambig=True
2769 )
2766 )
2770 if self.svfs.exists(b'undo.phaseroots'):
2767 if self.svfs.exists(b'undo.phaseroots'):
2771 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2768 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2772 self.invalidate()
2769 self.invalidate()
2773
2770
2774 if parentgone:
2771 if parentgone:
2775 # prevent dirstateguard from overwriting already restored one
2776 dsguard.close()
2777
2778 narrowspec.restorebackup(self, b'undo.narrowspec')
2772 narrowspec.restorebackup(self, b'undo.narrowspec')
2779 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2773 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2780 self.dirstate.restorebackup(None, b'undo.dirstate')
2774 self.dirstate.restorebackup(None, b'undo.dirstate')
2781 try:
2775 try:
2782 branch = self.vfs.read(b'undo.branch')
2776 branch = self.vfs.read(b'undo.branch')
2783 self.dirstate.setbranch(encoding.tolocal(branch))
2777 self.dirstate.setbranch(encoding.tolocal(branch))
2784 except IOError:
2778 except IOError:
2785 ui.warn(
2779 ui.warn(
2786 _(
2780 _(
2787 b'named branch could not be reset: '
2781 b'named branch could not be reset: '
2788 b'current branch is still \'%s\'\n'
2782 b'current branch is still \'%s\'\n'
2789 )
2783 )
2790 % self.dirstate.branch()
2784 % self.dirstate.branch()
2791 )
2785 )
2792
2786
2793 parents = tuple([p.rev() for p in self[None].parents()])
2787 parents = tuple([p.rev() for p in self[None].parents()])
2794 if len(parents) > 1:
2788 if len(parents) > 1:
2795 ui.status(
2789 ui.status(
2796 _(
2790 _(
2797 b'working directory now based on '
2791 b'working directory now based on '
2798 b'revisions %d and %d\n'
2792 b'revisions %d and %d\n'
2799 )
2793 )
2800 % parents
2794 % parents
2801 )
2795 )
2802 else:
2796 else:
2803 ui.status(
2797 ui.status(
2804 _(b'working directory now based on revision %d\n') % parents
2798 _(b'working directory now based on revision %d\n') % parents
2805 )
2799 )
2806 mergestatemod.mergestate.clean(self)
2800 mergestatemod.mergestate.clean(self)
2807
2801
2808 # TODO: if we know which new heads may result from this rollback, pass
2802 # TODO: if we know which new heads may result from this rollback, pass
2809 # them to destroy(), which will prevent the branchhead cache from being
2803 # them to destroy(), which will prevent the branchhead cache from being
2810 # invalidated.
2804 # invalidated.
2811 self.destroyed()
2805 self.destroyed()
2812 return 0
2806 return 0
2813
2807
2814 def _buildcacheupdater(self, newtransaction):
2808 def _buildcacheupdater(self, newtransaction):
2815 """called during transaction to build the callback updating cache
2809 """called during transaction to build the callback updating cache
2816
2810
2817 Lives on the repository to help extension who might want to augment
2811 Lives on the repository to help extension who might want to augment
2818 this logic. For this purpose, the created transaction is passed to the
2812 this logic. For this purpose, the created transaction is passed to the
2819 method.
2813 method.
2820 """
2814 """
2821 # we must avoid cyclic reference between repo and transaction.
2815 # we must avoid cyclic reference between repo and transaction.
2822 reporef = weakref.ref(self)
2816 reporef = weakref.ref(self)
2823
2817
2824 def updater(tr):
2818 def updater(tr):
2825 repo = reporef()
2819 repo = reporef()
2826 assert repo is not None # help pytype
2820 assert repo is not None # help pytype
2827 repo.updatecaches(tr)
2821 repo.updatecaches(tr)
2828
2822
2829 return updater
2823 return updater
2830
2824
2831 @unfilteredmethod
2825 @unfilteredmethod
2832 def updatecaches(self, tr=None, full=False, caches=None):
2826 def updatecaches(self, tr=None, full=False, caches=None):
2833 """warm appropriate caches
2827 """warm appropriate caches
2834
2828
2835 If this function is called after a transaction closed. The transaction
2829 If this function is called after a transaction closed. The transaction
2836 will be available in the 'tr' argument. This can be used to selectively
2830 will be available in the 'tr' argument. This can be used to selectively
2837 update caches relevant to the changes in that transaction.
2831 update caches relevant to the changes in that transaction.
2838
2832
2839 If 'full' is set, make sure all caches the function knows about have
2833 If 'full' is set, make sure all caches the function knows about have
2840 up-to-date data. Even the ones usually loaded more lazily.
2834 up-to-date data. Even the ones usually loaded more lazily.
2841
2835
2842 The `full` argument can take a special "post-clone" value. In this case
2836 The `full` argument can take a special "post-clone" value. In this case
2843 the cache warming is made after a clone and of the slower cache might
2837 the cache warming is made after a clone and of the slower cache might
2844 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2838 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2845 as we plan for a cleaner way to deal with this for 5.9.
2839 as we plan for a cleaner way to deal with this for 5.9.
2846 """
2840 """
2847 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2841 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2848 # During strip, many caches are invalid but
2842 # During strip, many caches are invalid but
2849 # later call to `destroyed` will refresh them.
2843 # later call to `destroyed` will refresh them.
2850 return
2844 return
2851
2845
2852 unfi = self.unfiltered()
2846 unfi = self.unfiltered()
2853
2847
2854 if full:
2848 if full:
2855 msg = (
2849 msg = (
2856 "`full` argument for `repo.updatecaches` is deprecated\n"
2850 "`full` argument for `repo.updatecaches` is deprecated\n"
2857 "(use `caches=repository.CACHE_ALL` instead)"
2851 "(use `caches=repository.CACHE_ALL` instead)"
2858 )
2852 )
2859 self.ui.deprecwarn(msg, b"5.9")
2853 self.ui.deprecwarn(msg, b"5.9")
2860 caches = repository.CACHES_ALL
2854 caches = repository.CACHES_ALL
2861 if full == b"post-clone":
2855 if full == b"post-clone":
2862 caches = repository.CACHES_POST_CLONE
2856 caches = repository.CACHES_POST_CLONE
2863 caches = repository.CACHES_ALL
2857 caches = repository.CACHES_ALL
2864 elif caches is None:
2858 elif caches is None:
2865 caches = repository.CACHES_DEFAULT
2859 caches = repository.CACHES_DEFAULT
2866
2860
2867 if repository.CACHE_BRANCHMAP_SERVED in caches:
2861 if repository.CACHE_BRANCHMAP_SERVED in caches:
2868 if tr is None or tr.changes[b'origrepolen'] < len(self):
2862 if tr is None or tr.changes[b'origrepolen'] < len(self):
2869 # accessing the 'served' branchmap should refresh all the others,
2863 # accessing the 'served' branchmap should refresh all the others,
2870 self.ui.debug(b'updating the branch cache\n')
2864 self.ui.debug(b'updating the branch cache\n')
2871 self.filtered(b'served').branchmap()
2865 self.filtered(b'served').branchmap()
2872 self.filtered(b'served.hidden').branchmap()
2866 self.filtered(b'served.hidden').branchmap()
2873 # flush all possibly delayed write.
2867 # flush all possibly delayed write.
2874 self._branchcaches.write_delayed(self)
2868 self._branchcaches.write_delayed(self)
2875
2869
2876 if repository.CACHE_CHANGELOG_CACHE in caches:
2870 if repository.CACHE_CHANGELOG_CACHE in caches:
2877 self.changelog.update_caches(transaction=tr)
2871 self.changelog.update_caches(transaction=tr)
2878
2872
2879 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2873 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2880 self.manifestlog.update_caches(transaction=tr)
2874 self.manifestlog.update_caches(transaction=tr)
2881
2875
2882 if repository.CACHE_REV_BRANCH in caches:
2876 if repository.CACHE_REV_BRANCH in caches:
2883 rbc = unfi.revbranchcache()
2877 rbc = unfi.revbranchcache()
2884 for r in unfi.changelog:
2878 for r in unfi.changelog:
2885 rbc.branchinfo(r)
2879 rbc.branchinfo(r)
2886 rbc.write()
2880 rbc.write()
2887
2881
2888 if repository.CACHE_FULL_MANIFEST in caches:
2882 if repository.CACHE_FULL_MANIFEST in caches:
2889 # ensure the working copy parents are in the manifestfulltextcache
2883 # ensure the working copy parents are in the manifestfulltextcache
2890 for ctx in self[b'.'].parents():
2884 for ctx in self[b'.'].parents():
2891 ctx.manifest() # accessing the manifest is enough
2885 ctx.manifest() # accessing the manifest is enough
2892
2886
2893 if repository.CACHE_FILE_NODE_TAGS in caches:
2887 if repository.CACHE_FILE_NODE_TAGS in caches:
2894 # accessing fnode cache warms the cache
2888 # accessing fnode cache warms the cache
2895 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2889 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2896
2890
2897 if repository.CACHE_TAGS_DEFAULT in caches:
2891 if repository.CACHE_TAGS_DEFAULT in caches:
2898 # accessing tags warm the cache
2892 # accessing tags warm the cache
2899 self.tags()
2893 self.tags()
2900 if repository.CACHE_TAGS_SERVED in caches:
2894 if repository.CACHE_TAGS_SERVED in caches:
2901 self.filtered(b'served').tags()
2895 self.filtered(b'served').tags()
2902
2896
2903 if repository.CACHE_BRANCHMAP_ALL in caches:
2897 if repository.CACHE_BRANCHMAP_ALL in caches:
2904 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2898 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2905 # so we're forcing a write to cause these caches to be warmed up
2899 # so we're forcing a write to cause these caches to be warmed up
2906 # even if they haven't explicitly been requested yet (if they've
2900 # even if they haven't explicitly been requested yet (if they've
2907 # never been used by hg, they won't ever have been written, even if
2901 # never been used by hg, they won't ever have been written, even if
2908 # they're a subset of another kind of cache that *has* been used).
2902 # they're a subset of another kind of cache that *has* been used).
2909 for filt in repoview.filtertable.keys():
2903 for filt in repoview.filtertable.keys():
2910 filtered = self.filtered(filt)
2904 filtered = self.filtered(filt)
2911 filtered.branchmap().write(filtered)
2905 filtered.branchmap().write(filtered)
2912
2906
2913 def invalidatecaches(self):
2907 def invalidatecaches(self):
2914 if '_tagscache' in vars(self):
2908 if '_tagscache' in vars(self):
2915 # can't use delattr on proxy
2909 # can't use delattr on proxy
2916 del self.__dict__['_tagscache']
2910 del self.__dict__['_tagscache']
2917
2911
2918 self._branchcaches.clear()
2912 self._branchcaches.clear()
2919 self.invalidatevolatilesets()
2913 self.invalidatevolatilesets()
2920 self._sparsesignaturecache.clear()
2914 self._sparsesignaturecache.clear()
2921
2915
2922 def invalidatevolatilesets(self):
2916 def invalidatevolatilesets(self):
2923 self.filteredrevcache.clear()
2917 self.filteredrevcache.clear()
2924 obsolete.clearobscaches(self)
2918 obsolete.clearobscaches(self)
2925 self._quick_access_changeid_invalidate()
2919 self._quick_access_changeid_invalidate()
2926
2920
2927 def invalidatedirstate(self):
2921 def invalidatedirstate(self):
2928 """Invalidates the dirstate, causing the next call to dirstate
2922 """Invalidates the dirstate, causing the next call to dirstate
2929 to check if it was modified since the last time it was read,
2923 to check if it was modified since the last time it was read,
2930 rereading it if it has.
2924 rereading it if it has.
2931
2925
2932 This is different to dirstate.invalidate() that it doesn't always
2926 This is different to dirstate.invalidate() that it doesn't always
2933 rereads the dirstate. Use dirstate.invalidate() if you want to
2927 rereads the dirstate. Use dirstate.invalidate() if you want to
2934 explicitly read the dirstate again (i.e. restoring it to a previous
2928 explicitly read the dirstate again (i.e. restoring it to a previous
2935 known good state)."""
2929 known good state)."""
2936 if hasunfilteredcache(self, 'dirstate'):
2930 if hasunfilteredcache(self, 'dirstate'):
2937 for k in self.dirstate._filecache:
2931 for k in self.dirstate._filecache:
2938 try:
2932 try:
2939 delattr(self.dirstate, k)
2933 delattr(self.dirstate, k)
2940 except AttributeError:
2934 except AttributeError:
2941 pass
2935 pass
2942 delattr(self.unfiltered(), 'dirstate')
2936 delattr(self.unfiltered(), 'dirstate')
2943
2937
2944 def invalidate(self, clearfilecache=False):
2938 def invalidate(self, clearfilecache=False):
2945 """Invalidates both store and non-store parts other than dirstate
2939 """Invalidates both store and non-store parts other than dirstate
2946
2940
2947 If a transaction is running, invalidation of store is omitted,
2941 If a transaction is running, invalidation of store is omitted,
2948 because discarding in-memory changes might cause inconsistency
2942 because discarding in-memory changes might cause inconsistency
2949 (e.g. incomplete fncache causes unintentional failure, but
2943 (e.g. incomplete fncache causes unintentional failure, but
2950 redundant one doesn't).
2944 redundant one doesn't).
2951 """
2945 """
2952 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2946 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2953 for k in list(self._filecache.keys()):
2947 for k in list(self._filecache.keys()):
2954 # dirstate is invalidated separately in invalidatedirstate()
2948 # dirstate is invalidated separately in invalidatedirstate()
2955 if k == b'dirstate':
2949 if k == b'dirstate':
2956 continue
2950 continue
2957 if (
2951 if (
2958 k == b'changelog'
2952 k == b'changelog'
2959 and self.currenttransaction()
2953 and self.currenttransaction()
2960 and self.changelog._delayed
2954 and self.changelog._delayed
2961 ):
2955 ):
2962 # The changelog object may store unwritten revisions. We don't
2956 # The changelog object may store unwritten revisions. We don't
2963 # want to lose them.
2957 # want to lose them.
2964 # TODO: Solve the problem instead of working around it.
2958 # TODO: Solve the problem instead of working around it.
2965 continue
2959 continue
2966
2960
2967 if clearfilecache:
2961 if clearfilecache:
2968 del self._filecache[k]
2962 del self._filecache[k]
2969 try:
2963 try:
2970 delattr(unfiltered, k)
2964 delattr(unfiltered, k)
2971 except AttributeError:
2965 except AttributeError:
2972 pass
2966 pass
2973 self.invalidatecaches()
2967 self.invalidatecaches()
2974 if not self.currenttransaction():
2968 if not self.currenttransaction():
2975 # TODO: Changing contents of store outside transaction
2969 # TODO: Changing contents of store outside transaction
2976 # causes inconsistency. We should make in-memory store
2970 # causes inconsistency. We should make in-memory store
2977 # changes detectable, and abort if changed.
2971 # changes detectable, and abort if changed.
2978 self.store.invalidatecaches()
2972 self.store.invalidatecaches()
2979
2973
2980 def invalidateall(self):
2974 def invalidateall(self):
2981 """Fully invalidates both store and non-store parts, causing the
2975 """Fully invalidates both store and non-store parts, causing the
2982 subsequent operation to reread any outside changes."""
2976 subsequent operation to reread any outside changes."""
2983 # extension should hook this to invalidate its caches
2977 # extension should hook this to invalidate its caches
2984 self.invalidate()
2978 self.invalidate()
2985 self.invalidatedirstate()
2979 self.invalidatedirstate()
2986
2980
2987 @unfilteredmethod
2981 @unfilteredmethod
2988 def _refreshfilecachestats(self, tr):
2982 def _refreshfilecachestats(self, tr):
2989 """Reload stats of cached files so that they are flagged as valid"""
2983 """Reload stats of cached files so that they are flagged as valid"""
2990 for k, ce in self._filecache.items():
2984 for k, ce in self._filecache.items():
2991 k = pycompat.sysstr(k)
2985 k = pycompat.sysstr(k)
2992 if k == 'dirstate' or k not in self.__dict__:
2986 if k == 'dirstate' or k not in self.__dict__:
2993 continue
2987 continue
2994 ce.refresh()
2988 ce.refresh()
2995
2989
2996 def _lock(
2990 def _lock(
2997 self,
2991 self,
2998 vfs,
2992 vfs,
2999 lockname,
2993 lockname,
3000 wait,
2994 wait,
3001 releasefn,
2995 releasefn,
3002 acquirefn,
2996 acquirefn,
3003 desc,
2997 desc,
3004 ):
2998 ):
3005 timeout = 0
2999 timeout = 0
3006 warntimeout = 0
3000 warntimeout = 0
3007 if wait:
3001 if wait:
3008 timeout = self.ui.configint(b"ui", b"timeout")
3002 timeout = self.ui.configint(b"ui", b"timeout")
3009 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
3003 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
3010 # internal config: ui.signal-safe-lock
3004 # internal config: ui.signal-safe-lock
3011 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
3005 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
3012
3006
3013 l = lockmod.trylock(
3007 l = lockmod.trylock(
3014 self.ui,
3008 self.ui,
3015 vfs,
3009 vfs,
3016 lockname,
3010 lockname,
3017 timeout,
3011 timeout,
3018 warntimeout,
3012 warntimeout,
3019 releasefn=releasefn,
3013 releasefn=releasefn,
3020 acquirefn=acquirefn,
3014 acquirefn=acquirefn,
3021 desc=desc,
3015 desc=desc,
3022 signalsafe=signalsafe,
3016 signalsafe=signalsafe,
3023 )
3017 )
3024 return l
3018 return l
3025
3019
3026 def _afterlock(self, callback):
3020 def _afterlock(self, callback):
3027 """add a callback to be run when the repository is fully unlocked
3021 """add a callback to be run when the repository is fully unlocked
3028
3022
3029 The callback will be executed when the outermost lock is released
3023 The callback will be executed when the outermost lock is released
3030 (with wlock being higher level than 'lock')."""
3024 (with wlock being higher level than 'lock')."""
3031 for ref in (self._wlockref, self._lockref):
3025 for ref in (self._wlockref, self._lockref):
3032 l = ref and ref()
3026 l = ref and ref()
3033 if l and l.held:
3027 if l and l.held:
3034 l.postrelease.append(callback)
3028 l.postrelease.append(callback)
3035 break
3029 break
3036 else: # no lock have been found.
3030 else: # no lock have been found.
3037 callback(True)
3031 callback(True)
3038
3032
3039 def lock(self, wait=True):
3033 def lock(self, wait=True):
3040 """Lock the repository store (.hg/store) and return a weak reference
3034 """Lock the repository store (.hg/store) and return a weak reference
3041 to the lock. Use this before modifying the store (e.g. committing or
3035 to the lock. Use this before modifying the store (e.g. committing or
3042 stripping). If you are opening a transaction, get a lock as well.)
3036 stripping). If you are opening a transaction, get a lock as well.)
3043
3037
3044 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3038 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3045 'wlock' first to avoid a dead-lock hazard."""
3039 'wlock' first to avoid a dead-lock hazard."""
3046 l = self._currentlock(self._lockref)
3040 l = self._currentlock(self._lockref)
3047 if l is not None:
3041 if l is not None:
3048 l.lock()
3042 l.lock()
3049 return l
3043 return l
3050
3044
3051 l = self._lock(
3045 l = self._lock(
3052 vfs=self.svfs,
3046 vfs=self.svfs,
3053 lockname=b"lock",
3047 lockname=b"lock",
3054 wait=wait,
3048 wait=wait,
3055 releasefn=None,
3049 releasefn=None,
3056 acquirefn=self.invalidate,
3050 acquirefn=self.invalidate,
3057 desc=_(b'repository %s') % self.origroot,
3051 desc=_(b'repository %s') % self.origroot,
3058 )
3052 )
3059 self._lockref = weakref.ref(l)
3053 self._lockref = weakref.ref(l)
3060 return l
3054 return l
3061
3055
3062 def wlock(self, wait=True):
3056 def wlock(self, wait=True):
3063 """Lock the non-store parts of the repository (everything under
3057 """Lock the non-store parts of the repository (everything under
3064 .hg except .hg/store) and return a weak reference to the lock.
3058 .hg except .hg/store) and return a weak reference to the lock.
3065
3059
3066 Use this before modifying files in .hg.
3060 Use this before modifying files in .hg.
3067
3061
3068 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3062 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3069 'wlock' first to avoid a dead-lock hazard."""
3063 'wlock' first to avoid a dead-lock hazard."""
3070 l = self._wlockref() if self._wlockref else None
3064 l = self._wlockref() if self._wlockref else None
3071 if l is not None and l.held:
3065 if l is not None and l.held:
3072 l.lock()
3066 l.lock()
3073 return l
3067 return l
3074
3068
3075 # We do not need to check for non-waiting lock acquisition. Such
3069 # We do not need to check for non-waiting lock acquisition. Such
3076 # acquisition would not cause dead-lock as they would just fail.
3070 # acquisition would not cause dead-lock as they would just fail.
3077 if wait and (
3071 if wait and (
3078 self.ui.configbool(b'devel', b'all-warnings')
3072 self.ui.configbool(b'devel', b'all-warnings')
3079 or self.ui.configbool(b'devel', b'check-locks')
3073 or self.ui.configbool(b'devel', b'check-locks')
3080 ):
3074 ):
3081 if self._currentlock(self._lockref) is not None:
3075 if self._currentlock(self._lockref) is not None:
3082 self.ui.develwarn(b'"wlock" acquired after "lock"')
3076 self.ui.develwarn(b'"wlock" acquired after "lock"')
3083
3077
3084 def unlock():
3078 def unlock():
3085 if self.dirstate.is_changing_any:
3079 if self.dirstate.is_changing_any:
3086 msg = b"wlock release in the middle of a changing parents"
3080 msg = b"wlock release in the middle of a changing parents"
3087 self.ui.develwarn(msg)
3081 self.ui.develwarn(msg)
3088 self.dirstate.invalidate()
3082 self.dirstate.invalidate()
3089 else:
3083 else:
3090 if self.dirstate._dirty:
3084 if self.dirstate._dirty:
3091 msg = b"dirty dirstate on wlock release"
3085 msg = b"dirty dirstate on wlock release"
3092 self.ui.develwarn(msg)
3086 self.ui.develwarn(msg)
3093 self.dirstate.write(None)
3087 self.dirstate.write(None)
3094
3088
3095 self._filecache[b'dirstate'].refresh()
3089 self._filecache[b'dirstate'].refresh()
3096
3090
3097 l = self._lock(
3091 l = self._lock(
3098 self.vfs,
3092 self.vfs,
3099 b"wlock",
3093 b"wlock",
3100 wait,
3094 wait,
3101 unlock,
3095 unlock,
3102 self.invalidatedirstate,
3096 self.invalidatedirstate,
3103 _(b'working directory of %s') % self.origroot,
3097 _(b'working directory of %s') % self.origroot,
3104 )
3098 )
3105 self._wlockref = weakref.ref(l)
3099 self._wlockref = weakref.ref(l)
3106 return l
3100 return l
3107
3101
3108 def _currentlock(self, lockref):
3102 def _currentlock(self, lockref):
3109 """Returns the lock if it's held, or None if it's not."""
3103 """Returns the lock if it's held, or None if it's not."""
3110 if lockref is None:
3104 if lockref is None:
3111 return None
3105 return None
3112 l = lockref()
3106 l = lockref()
3113 if l is None or not l.held:
3107 if l is None or not l.held:
3114 return None
3108 return None
3115 return l
3109 return l
3116
3110
3117 def currentwlock(self):
3111 def currentwlock(self):
3118 """Returns the wlock if it's held, or None if it's not."""
3112 """Returns the wlock if it's held, or None if it's not."""
3119 return self._currentlock(self._wlockref)
3113 return self._currentlock(self._wlockref)
3120
3114
3121 def checkcommitpatterns(self, wctx, match, status, fail):
3115 def checkcommitpatterns(self, wctx, match, status, fail):
3122 """check for commit arguments that aren't committable"""
3116 """check for commit arguments that aren't committable"""
3123 if match.isexact() or match.prefix():
3117 if match.isexact() or match.prefix():
3124 matched = set(status.modified + status.added + status.removed)
3118 matched = set(status.modified + status.added + status.removed)
3125
3119
3126 for f in match.files():
3120 for f in match.files():
3127 f = self.dirstate.normalize(f)
3121 f = self.dirstate.normalize(f)
3128 if f == b'.' or f in matched or f in wctx.substate:
3122 if f == b'.' or f in matched or f in wctx.substate:
3129 continue
3123 continue
3130 if f in status.deleted:
3124 if f in status.deleted:
3131 fail(f, _(b'file not found!'))
3125 fail(f, _(b'file not found!'))
3132 # Is it a directory that exists or used to exist?
3126 # Is it a directory that exists or used to exist?
3133 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3127 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3134 d = f + b'/'
3128 d = f + b'/'
3135 for mf in matched:
3129 for mf in matched:
3136 if mf.startswith(d):
3130 if mf.startswith(d):
3137 break
3131 break
3138 else:
3132 else:
3139 fail(f, _(b"no match under directory!"))
3133 fail(f, _(b"no match under directory!"))
3140 elif f not in self.dirstate:
3134 elif f not in self.dirstate:
3141 fail(f, _(b"file not tracked!"))
3135 fail(f, _(b"file not tracked!"))
3142
3136
3143 @unfilteredmethod
3137 @unfilteredmethod
3144 def commit(
3138 def commit(
3145 self,
3139 self,
3146 text=b"",
3140 text=b"",
3147 user=None,
3141 user=None,
3148 date=None,
3142 date=None,
3149 match=None,
3143 match=None,
3150 force=False,
3144 force=False,
3151 editor=None,
3145 editor=None,
3152 extra=None,
3146 extra=None,
3153 ):
3147 ):
3154 """Add a new revision to current repository.
3148 """Add a new revision to current repository.
3155
3149
3156 Revision information is gathered from the working directory,
3150 Revision information is gathered from the working directory,
3157 match can be used to filter the committed files. If editor is
3151 match can be used to filter the committed files. If editor is
3158 supplied, it is called to get a commit message.
3152 supplied, it is called to get a commit message.
3159 """
3153 """
3160 if extra is None:
3154 if extra is None:
3161 extra = {}
3155 extra = {}
3162
3156
3163 def fail(f, msg):
3157 def fail(f, msg):
3164 raise error.InputError(b'%s: %s' % (f, msg))
3158 raise error.InputError(b'%s: %s' % (f, msg))
3165
3159
3166 if not match:
3160 if not match:
3167 match = matchmod.always()
3161 match = matchmod.always()
3168
3162
3169 if not force:
3163 if not force:
3170 match.bad = fail
3164 match.bad = fail
3171
3165
3172 # lock() for recent changelog (see issue4368)
3166 # lock() for recent changelog (see issue4368)
3173 with self.wlock(), self.lock():
3167 with self.wlock(), self.lock():
3174 wctx = self[None]
3168 wctx = self[None]
3175 merge = len(wctx.parents()) > 1
3169 merge = len(wctx.parents()) > 1
3176
3170
3177 if not force and merge and not match.always():
3171 if not force and merge and not match.always():
3178 raise error.Abort(
3172 raise error.Abort(
3179 _(
3173 _(
3180 b'cannot partially commit a merge '
3174 b'cannot partially commit a merge '
3181 b'(do not specify files or patterns)'
3175 b'(do not specify files or patterns)'
3182 )
3176 )
3183 )
3177 )
3184
3178
3185 status = self.status(match=match, clean=force)
3179 status = self.status(match=match, clean=force)
3186 if force:
3180 if force:
3187 status.modified.extend(
3181 status.modified.extend(
3188 status.clean
3182 status.clean
3189 ) # mq may commit clean files
3183 ) # mq may commit clean files
3190
3184
3191 # check subrepos
3185 # check subrepos
3192 subs, commitsubs, newstate = subrepoutil.precommit(
3186 subs, commitsubs, newstate = subrepoutil.precommit(
3193 self.ui, wctx, status, match, force=force
3187 self.ui, wctx, status, match, force=force
3194 )
3188 )
3195
3189
3196 # make sure all explicit patterns are matched
3190 # make sure all explicit patterns are matched
3197 if not force:
3191 if not force:
3198 self.checkcommitpatterns(wctx, match, status, fail)
3192 self.checkcommitpatterns(wctx, match, status, fail)
3199
3193
3200 cctx = context.workingcommitctx(
3194 cctx = context.workingcommitctx(
3201 self, status, text, user, date, extra
3195 self, status, text, user, date, extra
3202 )
3196 )
3203
3197
3204 ms = mergestatemod.mergestate.read(self)
3198 ms = mergestatemod.mergestate.read(self)
3205 mergeutil.checkunresolved(ms)
3199 mergeutil.checkunresolved(ms)
3206
3200
3207 # internal config: ui.allowemptycommit
3201 # internal config: ui.allowemptycommit
3208 if cctx.isempty() and not self.ui.configbool(
3202 if cctx.isempty() and not self.ui.configbool(
3209 b'ui', b'allowemptycommit'
3203 b'ui', b'allowemptycommit'
3210 ):
3204 ):
3211 self.ui.debug(b'nothing to commit, clearing merge state\n')
3205 self.ui.debug(b'nothing to commit, clearing merge state\n')
3212 ms.reset()
3206 ms.reset()
3213 return None
3207 return None
3214
3208
3215 if merge and cctx.deleted():
3209 if merge and cctx.deleted():
3216 raise error.Abort(_(b"cannot commit merge with missing files"))
3210 raise error.Abort(_(b"cannot commit merge with missing files"))
3217
3211
3218 if editor:
3212 if editor:
3219 cctx._text = editor(self, cctx, subs)
3213 cctx._text = editor(self, cctx, subs)
3220 edited = text != cctx._text
3214 edited = text != cctx._text
3221
3215
3222 # Save commit message in case this transaction gets rolled back
3216 # Save commit message in case this transaction gets rolled back
3223 # (e.g. by a pretxncommit hook). Leave the content alone on
3217 # (e.g. by a pretxncommit hook). Leave the content alone on
3224 # the assumption that the user will use the same editor again.
3218 # the assumption that the user will use the same editor again.
3225 msg_path = self.savecommitmessage(cctx._text)
3219 msg_path = self.savecommitmessage(cctx._text)
3226
3220
3227 # commit subs and write new state
3221 # commit subs and write new state
3228 if subs:
3222 if subs:
3229 uipathfn = scmutil.getuipathfn(self)
3223 uipathfn = scmutil.getuipathfn(self)
3230 for s in sorted(commitsubs):
3224 for s in sorted(commitsubs):
3231 sub = wctx.sub(s)
3225 sub = wctx.sub(s)
3232 self.ui.status(
3226 self.ui.status(
3233 _(b'committing subrepository %s\n')
3227 _(b'committing subrepository %s\n')
3234 % uipathfn(subrepoutil.subrelpath(sub))
3228 % uipathfn(subrepoutil.subrelpath(sub))
3235 )
3229 )
3236 sr = sub.commit(cctx._text, user, date)
3230 sr = sub.commit(cctx._text, user, date)
3237 newstate[s] = (newstate[s][0], sr)
3231 newstate[s] = (newstate[s][0], sr)
3238 subrepoutil.writestate(self, newstate)
3232 subrepoutil.writestate(self, newstate)
3239
3233
3240 p1, p2 = self.dirstate.parents()
3234 p1, p2 = self.dirstate.parents()
3241 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3235 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3242 try:
3236 try:
3243 self.hook(
3237 self.hook(
3244 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3238 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3245 )
3239 )
3246 with self.transaction(b'commit'):
3240 with self.transaction(b'commit'):
3247 ret = self.commitctx(cctx, True)
3241 ret = self.commitctx(cctx, True)
3248 # update bookmarks, dirstate and mergestate
3242 # update bookmarks, dirstate and mergestate
3249 bookmarks.update(self, [p1, p2], ret)
3243 bookmarks.update(self, [p1, p2], ret)
3250 cctx.markcommitted(ret)
3244 cctx.markcommitted(ret)
3251 ms.reset()
3245 ms.reset()
3252 except: # re-raises
3246 except: # re-raises
3253 if edited:
3247 if edited:
3254 self.ui.write(
3248 self.ui.write(
3255 _(b'note: commit message saved in %s\n') % msg_path
3249 _(b'note: commit message saved in %s\n') % msg_path
3256 )
3250 )
3257 self.ui.write(
3251 self.ui.write(
3258 _(
3252 _(
3259 b"note: use 'hg commit --logfile "
3253 b"note: use 'hg commit --logfile "
3260 b"%s --edit' to reuse it\n"
3254 b"%s --edit' to reuse it\n"
3261 )
3255 )
3262 % msg_path
3256 % msg_path
3263 )
3257 )
3264 raise
3258 raise
3265
3259
3266 def commithook(unused_success):
3260 def commithook(unused_success):
3267 # hack for command that use a temporary commit (eg: histedit)
3261 # hack for command that use a temporary commit (eg: histedit)
3268 # temporary commit got stripped before hook release
3262 # temporary commit got stripped before hook release
3269 if self.changelog.hasnode(ret):
3263 if self.changelog.hasnode(ret):
3270 self.hook(
3264 self.hook(
3271 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3265 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3272 )
3266 )
3273
3267
3274 self._afterlock(commithook)
3268 self._afterlock(commithook)
3275 return ret
3269 return ret
3276
3270
3277 @unfilteredmethod
3271 @unfilteredmethod
3278 def commitctx(self, ctx, error=False, origctx=None):
3272 def commitctx(self, ctx, error=False, origctx=None):
3279 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3273 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3280
3274
3281 @unfilteredmethod
3275 @unfilteredmethod
3282 def destroying(self):
3276 def destroying(self):
3283 """Inform the repository that nodes are about to be destroyed.
3277 """Inform the repository that nodes are about to be destroyed.
3284 Intended for use by strip and rollback, so there's a common
3278 Intended for use by strip and rollback, so there's a common
3285 place for anything that has to be done before destroying history.
3279 place for anything that has to be done before destroying history.
3286
3280
3287 This is mostly useful for saving state that is in memory and waiting
3281 This is mostly useful for saving state that is in memory and waiting
3288 to be flushed when the current lock is released. Because a call to
3282 to be flushed when the current lock is released. Because a call to
3289 destroyed is imminent, the repo will be invalidated causing those
3283 destroyed is imminent, the repo will be invalidated causing those
3290 changes to stay in memory (waiting for the next unlock), or vanish
3284 changes to stay in memory (waiting for the next unlock), or vanish
3291 completely.
3285 completely.
3292 """
3286 """
3293 # When using the same lock to commit and strip, the phasecache is left
3287 # When using the same lock to commit and strip, the phasecache is left
3294 # dirty after committing. Then when we strip, the repo is invalidated,
3288 # dirty after committing. Then when we strip, the repo is invalidated,
3295 # causing those changes to disappear.
3289 # causing those changes to disappear.
3296 if '_phasecache' in vars(self):
3290 if '_phasecache' in vars(self):
3297 self._phasecache.write()
3291 self._phasecache.write()
3298
3292
3299 @unfilteredmethod
3293 @unfilteredmethod
3300 def destroyed(self):
3294 def destroyed(self):
3301 """Inform the repository that nodes have been destroyed.
3295 """Inform the repository that nodes have been destroyed.
3302 Intended for use by strip and rollback, so there's a common
3296 Intended for use by strip and rollback, so there's a common
3303 place for anything that has to be done after destroying history.
3297 place for anything that has to be done after destroying history.
3304 """
3298 """
3305 # When one tries to:
3299 # When one tries to:
3306 # 1) destroy nodes thus calling this method (e.g. strip)
3300 # 1) destroy nodes thus calling this method (e.g. strip)
3307 # 2) use phasecache somewhere (e.g. commit)
3301 # 2) use phasecache somewhere (e.g. commit)
3308 #
3302 #
3309 # then 2) will fail because the phasecache contains nodes that were
3303 # then 2) will fail because the phasecache contains nodes that were
3310 # removed. We can either remove phasecache from the filecache,
3304 # removed. We can either remove phasecache from the filecache,
3311 # causing it to reload next time it is accessed, or simply filter
3305 # causing it to reload next time it is accessed, or simply filter
3312 # the removed nodes now and write the updated cache.
3306 # the removed nodes now and write the updated cache.
3313 self._phasecache.filterunknown(self)
3307 self._phasecache.filterunknown(self)
3314 self._phasecache.write()
3308 self._phasecache.write()
3315
3309
3316 # refresh all repository caches
3310 # refresh all repository caches
3317 self.updatecaches()
3311 self.updatecaches()
3318
3312
3319 # Ensure the persistent tag cache is updated. Doing it now
3313 # Ensure the persistent tag cache is updated. Doing it now
3320 # means that the tag cache only has to worry about destroyed
3314 # means that the tag cache only has to worry about destroyed
3321 # heads immediately after a strip/rollback. That in turn
3315 # heads immediately after a strip/rollback. That in turn
3322 # guarantees that "cachetip == currenttip" (comparing both rev
3316 # guarantees that "cachetip == currenttip" (comparing both rev
3323 # and node) always means no nodes have been added or destroyed.
3317 # and node) always means no nodes have been added or destroyed.
3324
3318
3325 # XXX this is suboptimal when qrefresh'ing: we strip the current
3319 # XXX this is suboptimal when qrefresh'ing: we strip the current
3326 # head, refresh the tag cache, then immediately add a new head.
3320 # head, refresh the tag cache, then immediately add a new head.
3327 # But I think doing it this way is necessary for the "instant
3321 # But I think doing it this way is necessary for the "instant
3328 # tag cache retrieval" case to work.
3322 # tag cache retrieval" case to work.
3329 self.invalidate()
3323 self.invalidate()
3330
3324
3331 def status(
3325 def status(
3332 self,
3326 self,
3333 node1=b'.',
3327 node1=b'.',
3334 node2=None,
3328 node2=None,
3335 match=None,
3329 match=None,
3336 ignored=False,
3330 ignored=False,
3337 clean=False,
3331 clean=False,
3338 unknown=False,
3332 unknown=False,
3339 listsubrepos=False,
3333 listsubrepos=False,
3340 ):
3334 ):
3341 '''a convenience method that calls node1.status(node2)'''
3335 '''a convenience method that calls node1.status(node2)'''
3342 return self[node1].status(
3336 return self[node1].status(
3343 node2, match, ignored, clean, unknown, listsubrepos
3337 node2, match, ignored, clean, unknown, listsubrepos
3344 )
3338 )
3345
3339
3346 def addpostdsstatus(self, ps):
3340 def addpostdsstatus(self, ps):
3347 """Add a callback to run within the wlock, at the point at which status
3341 """Add a callback to run within the wlock, at the point at which status
3348 fixups happen.
3342 fixups happen.
3349
3343
3350 On status completion, callback(wctx, status) will be called with the
3344 On status completion, callback(wctx, status) will be called with the
3351 wlock held, unless the dirstate has changed from underneath or the wlock
3345 wlock held, unless the dirstate has changed from underneath or the wlock
3352 couldn't be grabbed.
3346 couldn't be grabbed.
3353
3347
3354 Callbacks should not capture and use a cached copy of the dirstate --
3348 Callbacks should not capture and use a cached copy of the dirstate --
3355 it might change in the meanwhile. Instead, they should access the
3349 it might change in the meanwhile. Instead, they should access the
3356 dirstate via wctx.repo().dirstate.
3350 dirstate via wctx.repo().dirstate.
3357
3351
3358 This list is emptied out after each status run -- extensions should
3352 This list is emptied out after each status run -- extensions should
3359 make sure it adds to this list each time dirstate.status is called.
3353 make sure it adds to this list each time dirstate.status is called.
3360 Extensions should also make sure they don't call this for statuses
3354 Extensions should also make sure they don't call this for statuses
3361 that don't involve the dirstate.
3355 that don't involve the dirstate.
3362 """
3356 """
3363
3357
3364 # The list is located here for uniqueness reasons -- it is actually
3358 # The list is located here for uniqueness reasons -- it is actually
3365 # managed by the workingctx, but that isn't unique per-repo.
3359 # managed by the workingctx, but that isn't unique per-repo.
3366 self._postdsstatus.append(ps)
3360 self._postdsstatus.append(ps)
3367
3361
3368 def postdsstatus(self):
3362 def postdsstatus(self):
3369 """Used by workingctx to get the list of post-dirstate-status hooks."""
3363 """Used by workingctx to get the list of post-dirstate-status hooks."""
3370 return self._postdsstatus
3364 return self._postdsstatus
3371
3365
3372 def clearpostdsstatus(self):
3366 def clearpostdsstatus(self):
3373 """Used by workingctx to clear post-dirstate-status hooks."""
3367 """Used by workingctx to clear post-dirstate-status hooks."""
3374 del self._postdsstatus[:]
3368 del self._postdsstatus[:]
3375
3369
3376 def heads(self, start=None):
3370 def heads(self, start=None):
3377 if start is None:
3371 if start is None:
3378 cl = self.changelog
3372 cl = self.changelog
3379 headrevs = reversed(cl.headrevs())
3373 headrevs = reversed(cl.headrevs())
3380 return [cl.node(rev) for rev in headrevs]
3374 return [cl.node(rev) for rev in headrevs]
3381
3375
3382 heads = self.changelog.heads(start)
3376 heads = self.changelog.heads(start)
3383 # sort the output in rev descending order
3377 # sort the output in rev descending order
3384 return sorted(heads, key=self.changelog.rev, reverse=True)
3378 return sorted(heads, key=self.changelog.rev, reverse=True)
3385
3379
3386 def branchheads(self, branch=None, start=None, closed=False):
3380 def branchheads(self, branch=None, start=None, closed=False):
3387 """return a (possibly filtered) list of heads for the given branch
3381 """return a (possibly filtered) list of heads for the given branch
3388
3382
3389 Heads are returned in topological order, from newest to oldest.
3383 Heads are returned in topological order, from newest to oldest.
3390 If branch is None, use the dirstate branch.
3384 If branch is None, use the dirstate branch.
3391 If start is not None, return only heads reachable from start.
3385 If start is not None, return only heads reachable from start.
3392 If closed is True, return heads that are marked as closed as well.
3386 If closed is True, return heads that are marked as closed as well.
3393 """
3387 """
3394 if branch is None:
3388 if branch is None:
3395 branch = self[None].branch()
3389 branch = self[None].branch()
3396 branches = self.branchmap()
3390 branches = self.branchmap()
3397 if not branches.hasbranch(branch):
3391 if not branches.hasbranch(branch):
3398 return []
3392 return []
3399 # the cache returns heads ordered lowest to highest
3393 # the cache returns heads ordered lowest to highest
3400 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3394 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3401 if start is not None:
3395 if start is not None:
3402 # filter out the heads that cannot be reached from startrev
3396 # filter out the heads that cannot be reached from startrev
3403 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3397 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3404 bheads = [h for h in bheads if h in fbheads]
3398 bheads = [h for h in bheads if h in fbheads]
3405 return bheads
3399 return bheads
3406
3400
3407 def branches(self, nodes):
3401 def branches(self, nodes):
3408 if not nodes:
3402 if not nodes:
3409 nodes = [self.changelog.tip()]
3403 nodes = [self.changelog.tip()]
3410 b = []
3404 b = []
3411 for n in nodes:
3405 for n in nodes:
3412 t = n
3406 t = n
3413 while True:
3407 while True:
3414 p = self.changelog.parents(n)
3408 p = self.changelog.parents(n)
3415 if p[1] != self.nullid or p[0] == self.nullid:
3409 if p[1] != self.nullid or p[0] == self.nullid:
3416 b.append((t, n, p[0], p[1]))
3410 b.append((t, n, p[0], p[1]))
3417 break
3411 break
3418 n = p[0]
3412 n = p[0]
3419 return b
3413 return b
3420
3414
3421 def between(self, pairs):
3415 def between(self, pairs):
3422 r = []
3416 r = []
3423
3417
3424 for top, bottom in pairs:
3418 for top, bottom in pairs:
3425 n, l, i = top, [], 0
3419 n, l, i = top, [], 0
3426 f = 1
3420 f = 1
3427
3421
3428 while n != bottom and n != self.nullid:
3422 while n != bottom and n != self.nullid:
3429 p = self.changelog.parents(n)[0]
3423 p = self.changelog.parents(n)[0]
3430 if i == f:
3424 if i == f:
3431 l.append(n)
3425 l.append(n)
3432 f = f * 2
3426 f = f * 2
3433 n = p
3427 n = p
3434 i += 1
3428 i += 1
3435
3429
3436 r.append(l)
3430 r.append(l)
3437
3431
3438 return r
3432 return r
3439
3433
3440 def checkpush(self, pushop):
3434 def checkpush(self, pushop):
3441 """Extensions can override this function if additional checks have
3435 """Extensions can override this function if additional checks have
3442 to be performed before pushing, or call it if they override push
3436 to be performed before pushing, or call it if they override push
3443 command.
3437 command.
3444 """
3438 """
3445
3439
3446 @unfilteredpropertycache
3440 @unfilteredpropertycache
3447 def prepushoutgoinghooks(self):
3441 def prepushoutgoinghooks(self):
3448 """Return util.hooks consists of a pushop with repo, remote, outgoing
3442 """Return util.hooks consists of a pushop with repo, remote, outgoing
3449 methods, which are called before pushing changesets.
3443 methods, which are called before pushing changesets.
3450 """
3444 """
3451 return util.hooks()
3445 return util.hooks()
3452
3446
3453 def pushkey(self, namespace, key, old, new):
3447 def pushkey(self, namespace, key, old, new):
3454 try:
3448 try:
3455 tr = self.currenttransaction()
3449 tr = self.currenttransaction()
3456 hookargs = {}
3450 hookargs = {}
3457 if tr is not None:
3451 if tr is not None:
3458 hookargs.update(tr.hookargs)
3452 hookargs.update(tr.hookargs)
3459 hookargs = pycompat.strkwargs(hookargs)
3453 hookargs = pycompat.strkwargs(hookargs)
3460 hookargs['namespace'] = namespace
3454 hookargs['namespace'] = namespace
3461 hookargs['key'] = key
3455 hookargs['key'] = key
3462 hookargs['old'] = old
3456 hookargs['old'] = old
3463 hookargs['new'] = new
3457 hookargs['new'] = new
3464 self.hook(b'prepushkey', throw=True, **hookargs)
3458 self.hook(b'prepushkey', throw=True, **hookargs)
3465 except error.HookAbort as exc:
3459 except error.HookAbort as exc:
3466 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3460 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3467 if exc.hint:
3461 if exc.hint:
3468 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3462 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3469 return False
3463 return False
3470 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3464 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3471 ret = pushkey.push(self, namespace, key, old, new)
3465 ret = pushkey.push(self, namespace, key, old, new)
3472
3466
3473 def runhook(unused_success):
3467 def runhook(unused_success):
3474 self.hook(
3468 self.hook(
3475 b'pushkey',
3469 b'pushkey',
3476 namespace=namespace,
3470 namespace=namespace,
3477 key=key,
3471 key=key,
3478 old=old,
3472 old=old,
3479 new=new,
3473 new=new,
3480 ret=ret,
3474 ret=ret,
3481 )
3475 )
3482
3476
3483 self._afterlock(runhook)
3477 self._afterlock(runhook)
3484 return ret
3478 return ret
3485
3479
3486 def listkeys(self, namespace):
3480 def listkeys(self, namespace):
3487 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3481 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3488 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3482 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3489 values = pushkey.list(self, namespace)
3483 values = pushkey.list(self, namespace)
3490 self.hook(b'listkeys', namespace=namespace, values=values)
3484 self.hook(b'listkeys', namespace=namespace, values=values)
3491 return values
3485 return values
3492
3486
3493 def debugwireargs(self, one, two, three=None, four=None, five=None):
3487 def debugwireargs(self, one, two, three=None, four=None, five=None):
3494 '''used to test argument passing over the wire'''
3488 '''used to test argument passing over the wire'''
3495 return b"%s %s %s %s %s" % (
3489 return b"%s %s %s %s %s" % (
3496 one,
3490 one,
3497 two,
3491 two,
3498 pycompat.bytestr(three),
3492 pycompat.bytestr(three),
3499 pycompat.bytestr(four),
3493 pycompat.bytestr(four),
3500 pycompat.bytestr(five),
3494 pycompat.bytestr(five),
3501 )
3495 )
3502
3496
3503 def savecommitmessage(self, text):
3497 def savecommitmessage(self, text):
3504 fp = self.vfs(b'last-message.txt', b'wb')
3498 fp = self.vfs(b'last-message.txt', b'wb')
3505 try:
3499 try:
3506 fp.write(text)
3500 fp.write(text)
3507 finally:
3501 finally:
3508 fp.close()
3502 fp.close()
3509 return self.pathto(fp.name[len(self.root) + 1 :])
3503 return self.pathto(fp.name[len(self.root) + 1 :])
3510
3504
3511 def register_wanted_sidedata(self, category):
3505 def register_wanted_sidedata(self, category):
3512 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3506 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3513 # Only revlogv2 repos can want sidedata.
3507 # Only revlogv2 repos can want sidedata.
3514 return
3508 return
3515 self._wanted_sidedata.add(pycompat.bytestr(category))
3509 self._wanted_sidedata.add(pycompat.bytestr(category))
3516
3510
3517 def register_sidedata_computer(
3511 def register_sidedata_computer(
3518 self, kind, category, keys, computer, flags, replace=False
3512 self, kind, category, keys, computer, flags, replace=False
3519 ):
3513 ):
3520 if kind not in revlogconst.ALL_KINDS:
3514 if kind not in revlogconst.ALL_KINDS:
3521 msg = _(b"unexpected revlog kind '%s'.")
3515 msg = _(b"unexpected revlog kind '%s'.")
3522 raise error.ProgrammingError(msg % kind)
3516 raise error.ProgrammingError(msg % kind)
3523 category = pycompat.bytestr(category)
3517 category = pycompat.bytestr(category)
3524 already_registered = category in self._sidedata_computers.get(kind, [])
3518 already_registered = category in self._sidedata_computers.get(kind, [])
3525 if already_registered and not replace:
3519 if already_registered and not replace:
3526 msg = _(
3520 msg = _(
3527 b"cannot register a sidedata computer twice for category '%s'."
3521 b"cannot register a sidedata computer twice for category '%s'."
3528 )
3522 )
3529 raise error.ProgrammingError(msg % category)
3523 raise error.ProgrammingError(msg % category)
3530 if replace and not already_registered:
3524 if replace and not already_registered:
3531 msg = _(
3525 msg = _(
3532 b"cannot replace a sidedata computer that isn't registered "
3526 b"cannot replace a sidedata computer that isn't registered "
3533 b"for category '%s'."
3527 b"for category '%s'."
3534 )
3528 )
3535 raise error.ProgrammingError(msg % category)
3529 raise error.ProgrammingError(msg % category)
3536 self._sidedata_computers.setdefault(kind, {})
3530 self._sidedata_computers.setdefault(kind, {})
3537 self._sidedata_computers[kind][category] = (keys, computer, flags)
3531 self._sidedata_computers[kind][category] = (keys, computer, flags)
3538
3532
3539
3533
3540 # used to avoid circular references so destructors work
3534 # used to avoid circular references so destructors work
3541 def aftertrans(files):
3535 def aftertrans(files):
3542 renamefiles = [tuple(t) for t in files]
3536 renamefiles = [tuple(t) for t in files]
3543
3537
3544 def a():
3538 def a():
3545 for vfs, src, dest in renamefiles:
3539 for vfs, src, dest in renamefiles:
3546 # if src and dest refer to a same file, vfs.rename is a no-op,
3540 # if src and dest refer to a same file, vfs.rename is a no-op,
3547 # leaving both src and dest on disk. delete dest to make sure
3541 # leaving both src and dest on disk. delete dest to make sure
3548 # the rename couldn't be such a no-op.
3542 # the rename couldn't be such a no-op.
3549 vfs.tryunlink(dest)
3543 vfs.tryunlink(dest)
3550 try:
3544 try:
3551 vfs.rename(src, dest)
3545 vfs.rename(src, dest)
3552 except FileNotFoundError: # journal file does not yet exist
3546 except FileNotFoundError: # journal file does not yet exist
3553 pass
3547 pass
3554
3548
3555 return a
3549 return a
3556
3550
3557
3551
3558 def undoname(fn: bytes) -> bytes:
3552 def undoname(fn: bytes) -> bytes:
3559 base, name = os.path.split(fn)
3553 base, name = os.path.split(fn)
3560 assert name.startswith(b'journal')
3554 assert name.startswith(b'journal')
3561 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3555 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3562
3556
3563
3557
3564 def instance(ui, path: bytes, create, intents=None, createopts=None):
3558 def instance(ui, path: bytes, create, intents=None, createopts=None):
3565 # prevent cyclic import localrepo -> upgrade -> localrepo
3559 # prevent cyclic import localrepo -> upgrade -> localrepo
3566 from . import upgrade
3560 from . import upgrade
3567
3561
3568 localpath = urlutil.urllocalpath(path)
3562 localpath = urlutil.urllocalpath(path)
3569 if create:
3563 if create:
3570 createrepository(ui, localpath, createopts=createopts)
3564 createrepository(ui, localpath, createopts=createopts)
3571
3565
3572 def repo_maker():
3566 def repo_maker():
3573 return makelocalrepository(ui, localpath, intents=intents)
3567 return makelocalrepository(ui, localpath, intents=intents)
3574
3568
3575 repo = repo_maker()
3569 repo = repo_maker()
3576 repo = upgrade.may_auto_upgrade(repo, repo_maker)
3570 repo = upgrade.may_auto_upgrade(repo, repo_maker)
3577 return repo
3571 return repo
3578
3572
3579
3573
3580 def islocal(path: bytes) -> bool:
3574 def islocal(path: bytes) -> bool:
3581 return True
3575 return True
3582
3576
3583
3577
3584 def defaultcreateopts(ui, createopts=None):
3578 def defaultcreateopts(ui, createopts=None):
3585 """Populate the default creation options for a repository.
3579 """Populate the default creation options for a repository.
3586
3580
3587 A dictionary of explicitly requested creation options can be passed
3581 A dictionary of explicitly requested creation options can be passed
3588 in. Missing keys will be populated.
3582 in. Missing keys will be populated.
3589 """
3583 """
3590 createopts = dict(createopts or {})
3584 createopts = dict(createopts or {})
3591
3585
3592 if b'backend' not in createopts:
3586 if b'backend' not in createopts:
3593 # experimental config: storage.new-repo-backend
3587 # experimental config: storage.new-repo-backend
3594 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3588 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3595
3589
3596 return createopts
3590 return createopts
3597
3591
3598
3592
3599 def clone_requirements(ui, createopts, srcrepo):
3593 def clone_requirements(ui, createopts, srcrepo):
3600 """clone the requirements of a local repo for a local clone
3594 """clone the requirements of a local repo for a local clone
3601
3595
3602 The store requirements are unchanged while the working copy requirements
3596 The store requirements are unchanged while the working copy requirements
3603 depends on the configuration
3597 depends on the configuration
3604 """
3598 """
3605 target_requirements = set()
3599 target_requirements = set()
3606 if not srcrepo.requirements:
3600 if not srcrepo.requirements:
3607 # this is a legacy revlog "v0" repository, we cannot do anything fancy
3601 # this is a legacy revlog "v0" repository, we cannot do anything fancy
3608 # with it.
3602 # with it.
3609 return target_requirements
3603 return target_requirements
3610 createopts = defaultcreateopts(ui, createopts=createopts)
3604 createopts = defaultcreateopts(ui, createopts=createopts)
3611 for r in newreporequirements(ui, createopts):
3605 for r in newreporequirements(ui, createopts):
3612 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3606 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3613 target_requirements.add(r)
3607 target_requirements.add(r)
3614
3608
3615 for r in srcrepo.requirements:
3609 for r in srcrepo.requirements:
3616 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3610 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3617 target_requirements.add(r)
3611 target_requirements.add(r)
3618 return target_requirements
3612 return target_requirements
3619
3613
3620
3614
3621 def newreporequirements(ui, createopts):
3615 def newreporequirements(ui, createopts):
3622 """Determine the set of requirements for a new local repository.
3616 """Determine the set of requirements for a new local repository.
3623
3617
3624 Extensions can wrap this function to specify custom requirements for
3618 Extensions can wrap this function to specify custom requirements for
3625 new repositories.
3619 new repositories.
3626 """
3620 """
3627
3621
3628 if b'backend' not in createopts:
3622 if b'backend' not in createopts:
3629 raise error.ProgrammingError(
3623 raise error.ProgrammingError(
3630 b'backend key not present in createopts; '
3624 b'backend key not present in createopts; '
3631 b'was defaultcreateopts() called?'
3625 b'was defaultcreateopts() called?'
3632 )
3626 )
3633
3627
3634 if createopts[b'backend'] != b'revlogv1':
3628 if createopts[b'backend'] != b'revlogv1':
3635 raise error.Abort(
3629 raise error.Abort(
3636 _(
3630 _(
3637 b'unable to determine repository requirements for '
3631 b'unable to determine repository requirements for '
3638 b'storage backend: %s'
3632 b'storage backend: %s'
3639 )
3633 )
3640 % createopts[b'backend']
3634 % createopts[b'backend']
3641 )
3635 )
3642
3636
3643 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3637 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3644 if ui.configbool(b'format', b'usestore'):
3638 if ui.configbool(b'format', b'usestore'):
3645 requirements.add(requirementsmod.STORE_REQUIREMENT)
3639 requirements.add(requirementsmod.STORE_REQUIREMENT)
3646 if ui.configbool(b'format', b'usefncache'):
3640 if ui.configbool(b'format', b'usefncache'):
3647 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3641 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3648 if ui.configbool(b'format', b'dotencode'):
3642 if ui.configbool(b'format', b'dotencode'):
3649 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3643 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3650
3644
3651 compengines = ui.configlist(b'format', b'revlog-compression')
3645 compengines = ui.configlist(b'format', b'revlog-compression')
3652 for compengine in compengines:
3646 for compengine in compengines:
3653 if compengine in util.compengines:
3647 if compengine in util.compengines:
3654 engine = util.compengines[compengine]
3648 engine = util.compengines[compengine]
3655 if engine.available() and engine.revlogheader():
3649 if engine.available() and engine.revlogheader():
3656 break
3650 break
3657 else:
3651 else:
3658 raise error.Abort(
3652 raise error.Abort(
3659 _(
3653 _(
3660 b'compression engines %s defined by '
3654 b'compression engines %s defined by '
3661 b'format.revlog-compression not available'
3655 b'format.revlog-compression not available'
3662 )
3656 )
3663 % b', '.join(b'"%s"' % e for e in compengines),
3657 % b', '.join(b'"%s"' % e for e in compengines),
3664 hint=_(
3658 hint=_(
3665 b'run "hg debuginstall" to list available '
3659 b'run "hg debuginstall" to list available '
3666 b'compression engines'
3660 b'compression engines'
3667 ),
3661 ),
3668 )
3662 )
3669
3663
3670 # zlib is the historical default and doesn't need an explicit requirement.
3664 # zlib is the historical default and doesn't need an explicit requirement.
3671 if compengine == b'zstd':
3665 if compengine == b'zstd':
3672 requirements.add(b'revlog-compression-zstd')
3666 requirements.add(b'revlog-compression-zstd')
3673 elif compengine != b'zlib':
3667 elif compengine != b'zlib':
3674 requirements.add(b'exp-compression-%s' % compengine)
3668 requirements.add(b'exp-compression-%s' % compengine)
3675
3669
3676 if scmutil.gdinitconfig(ui):
3670 if scmutil.gdinitconfig(ui):
3677 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3671 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3678 if ui.configbool(b'format', b'sparse-revlog'):
3672 if ui.configbool(b'format', b'sparse-revlog'):
3679 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3673 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3680
3674
3681 # experimental config: format.use-dirstate-v2
3675 # experimental config: format.use-dirstate-v2
3682 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3676 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3683 if ui.configbool(b'format', b'use-dirstate-v2'):
3677 if ui.configbool(b'format', b'use-dirstate-v2'):
3684 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3678 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3685
3679
3686 # experimental config: format.exp-use-copies-side-data-changeset
3680 # experimental config: format.exp-use-copies-side-data-changeset
3687 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3681 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3688 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3682 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3689 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3683 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3690 if ui.configbool(b'experimental', b'treemanifest'):
3684 if ui.configbool(b'experimental', b'treemanifest'):
3691 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3685 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3692
3686
3693 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3687 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3694 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3688 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3695 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3689 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3696
3690
3697 revlogv2 = ui.config(b'experimental', b'revlogv2')
3691 revlogv2 = ui.config(b'experimental', b'revlogv2')
3698 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3692 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3699 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3693 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3700 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3694 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3701 # experimental config: format.internal-phase
3695 # experimental config: format.internal-phase
3702 if ui.configbool(b'format', b'use-internal-phase'):
3696 if ui.configbool(b'format', b'use-internal-phase'):
3703 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3697 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3704
3698
3705 # experimental config: format.exp-archived-phase
3699 # experimental config: format.exp-archived-phase
3706 if ui.configbool(b'format', b'exp-archived-phase'):
3700 if ui.configbool(b'format', b'exp-archived-phase'):
3707 requirements.add(requirementsmod.ARCHIVED_PHASE_REQUIREMENT)
3701 requirements.add(requirementsmod.ARCHIVED_PHASE_REQUIREMENT)
3708
3702
3709 if createopts.get(b'narrowfiles'):
3703 if createopts.get(b'narrowfiles'):
3710 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3704 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3711
3705
3712 if createopts.get(b'lfs'):
3706 if createopts.get(b'lfs'):
3713 requirements.add(b'lfs')
3707 requirements.add(b'lfs')
3714
3708
3715 if ui.configbool(b'format', b'bookmarks-in-store'):
3709 if ui.configbool(b'format', b'bookmarks-in-store'):
3716 requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3710 requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3717
3711
3718 if ui.configbool(b'format', b'use-persistent-nodemap'):
3712 if ui.configbool(b'format', b'use-persistent-nodemap'):
3719 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3713 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3720
3714
3721 # if share-safe is enabled, let's create the new repository with the new
3715 # if share-safe is enabled, let's create the new repository with the new
3722 # requirement
3716 # requirement
3723 if ui.configbool(b'format', b'use-share-safe'):
3717 if ui.configbool(b'format', b'use-share-safe'):
3724 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3718 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3725
3719
3726 # if we are creating a share-repoΒΉ we have to handle requirement
3720 # if we are creating a share-repoΒΉ we have to handle requirement
3727 # differently.
3721 # differently.
3728 #
3722 #
3729 # [1] (i.e. reusing the store from another repository, just having a
3723 # [1] (i.e. reusing the store from another repository, just having a
3730 # working copy)
3724 # working copy)
3731 if b'sharedrepo' in createopts:
3725 if b'sharedrepo' in createopts:
3732 source_requirements = set(createopts[b'sharedrepo'].requirements)
3726 source_requirements = set(createopts[b'sharedrepo'].requirements)
3733
3727
3734 if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements:
3728 if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements:
3735 # share to an old school repository, we have to copy the
3729 # share to an old school repository, we have to copy the
3736 # requirements and hope for the best.
3730 # requirements and hope for the best.
3737 requirements = source_requirements
3731 requirements = source_requirements
3738 else:
3732 else:
3739 # We have control on the working copy only, so "copy" the non
3733 # We have control on the working copy only, so "copy" the non
3740 # working copy part over, ignoring previous logic.
3734 # working copy part over, ignoring previous logic.
3741 to_drop = set()
3735 to_drop = set()
3742 for req in requirements:
3736 for req in requirements:
3743 if req in requirementsmod.WORKING_DIR_REQUIREMENTS:
3737 if req in requirementsmod.WORKING_DIR_REQUIREMENTS:
3744 continue
3738 continue
3745 if req in source_requirements:
3739 if req in source_requirements:
3746 continue
3740 continue
3747 to_drop.add(req)
3741 to_drop.add(req)
3748 requirements -= to_drop
3742 requirements -= to_drop
3749 requirements |= source_requirements
3743 requirements |= source_requirements
3750
3744
3751 if createopts.get(b'sharedrelative'):
3745 if createopts.get(b'sharedrelative'):
3752 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3746 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3753 else:
3747 else:
3754 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3748 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3755
3749
3756 if ui.configbool(b'format', b'use-dirstate-tracked-hint'):
3750 if ui.configbool(b'format', b'use-dirstate-tracked-hint'):
3757 version = ui.configint(b'format', b'use-dirstate-tracked-hint.version')
3751 version = ui.configint(b'format', b'use-dirstate-tracked-hint.version')
3758 msg = _(b"ignoring unknown tracked key version: %d\n")
3752 msg = _(b"ignoring unknown tracked key version: %d\n")
3759 hint = _(
3753 hint = _(
3760 b"see `hg help config.format.use-dirstate-tracked-hint-version"
3754 b"see `hg help config.format.use-dirstate-tracked-hint-version"
3761 )
3755 )
3762 if version != 1:
3756 if version != 1:
3763 ui.warn(msg % version, hint=hint)
3757 ui.warn(msg % version, hint=hint)
3764 else:
3758 else:
3765 requirements.add(requirementsmod.DIRSTATE_TRACKED_HINT_V1)
3759 requirements.add(requirementsmod.DIRSTATE_TRACKED_HINT_V1)
3766
3760
3767 return requirements
3761 return requirements
3768
3762
3769
3763
3770 def checkrequirementscompat(ui, requirements):
3764 def checkrequirementscompat(ui, requirements):
3771 """Checks compatibility of repository requirements enabled and disabled.
3765 """Checks compatibility of repository requirements enabled and disabled.
3772
3766
3773 Returns a set of requirements which needs to be dropped because dependend
3767 Returns a set of requirements which needs to be dropped because dependend
3774 requirements are not enabled. Also warns users about it"""
3768 requirements are not enabled. Also warns users about it"""
3775
3769
3776 dropped = set()
3770 dropped = set()
3777
3771
3778 if requirementsmod.STORE_REQUIREMENT not in requirements:
3772 if requirementsmod.STORE_REQUIREMENT not in requirements:
3779 if requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3773 if requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3780 ui.warn(
3774 ui.warn(
3781 _(
3775 _(
3782 b'ignoring enabled \'format.bookmarks-in-store\' config '
3776 b'ignoring enabled \'format.bookmarks-in-store\' config '
3783 b'beacuse it is incompatible with disabled '
3777 b'beacuse it is incompatible with disabled '
3784 b'\'format.usestore\' config\n'
3778 b'\'format.usestore\' config\n'
3785 )
3779 )
3786 )
3780 )
3787 dropped.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3781 dropped.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3788
3782
3789 if (
3783 if (
3790 requirementsmod.SHARED_REQUIREMENT in requirements
3784 requirementsmod.SHARED_REQUIREMENT in requirements
3791 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3785 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3792 ):
3786 ):
3793 raise error.Abort(
3787 raise error.Abort(
3794 _(
3788 _(
3795 b"cannot create shared repository as source was created"
3789 b"cannot create shared repository as source was created"
3796 b" with 'format.usestore' config disabled"
3790 b" with 'format.usestore' config disabled"
3797 )
3791 )
3798 )
3792 )
3799
3793
3800 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3794 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3801 if ui.hasconfig(b'format', b'use-share-safe'):
3795 if ui.hasconfig(b'format', b'use-share-safe'):
3802 msg = _(
3796 msg = _(
3803 b"ignoring enabled 'format.use-share-safe' config because "
3797 b"ignoring enabled 'format.use-share-safe' config because "
3804 b"it is incompatible with disabled 'format.usestore'"
3798 b"it is incompatible with disabled 'format.usestore'"
3805 b" config\n"
3799 b" config\n"
3806 )
3800 )
3807 ui.warn(msg)
3801 ui.warn(msg)
3808 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3802 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3809
3803
3810 return dropped
3804 return dropped
3811
3805
3812
3806
3813 def filterknowncreateopts(ui, createopts):
3807 def filterknowncreateopts(ui, createopts):
3814 """Filters a dict of repo creation options against options that are known.
3808 """Filters a dict of repo creation options against options that are known.
3815
3809
3816 Receives a dict of repo creation options and returns a dict of those
3810 Receives a dict of repo creation options and returns a dict of those
3817 options that we don't know how to handle.
3811 options that we don't know how to handle.
3818
3812
3819 This function is called as part of repository creation. If the
3813 This function is called as part of repository creation. If the
3820 returned dict contains any items, repository creation will not
3814 returned dict contains any items, repository creation will not
3821 be allowed, as it means there was a request to create a repository
3815 be allowed, as it means there was a request to create a repository
3822 with options not recognized by loaded code.
3816 with options not recognized by loaded code.
3823
3817
3824 Extensions can wrap this function to filter out creation options
3818 Extensions can wrap this function to filter out creation options
3825 they know how to handle.
3819 they know how to handle.
3826 """
3820 """
3827 known = {
3821 known = {
3828 b'backend',
3822 b'backend',
3829 b'lfs',
3823 b'lfs',
3830 b'narrowfiles',
3824 b'narrowfiles',
3831 b'sharedrepo',
3825 b'sharedrepo',
3832 b'sharedrelative',
3826 b'sharedrelative',
3833 b'shareditems',
3827 b'shareditems',
3834 b'shallowfilestore',
3828 b'shallowfilestore',
3835 }
3829 }
3836
3830
3837 return {k: v for k, v in createopts.items() if k not in known}
3831 return {k: v for k, v in createopts.items() if k not in known}
3838
3832
3839
3833
3840 def createrepository(ui, path: bytes, createopts=None, requirements=None):
3834 def createrepository(ui, path: bytes, createopts=None, requirements=None):
3841 """Create a new repository in a vfs.
3835 """Create a new repository in a vfs.
3842
3836
3843 ``path`` path to the new repo's working directory.
3837 ``path`` path to the new repo's working directory.
3844 ``createopts`` options for the new repository.
3838 ``createopts`` options for the new repository.
3845 ``requirement`` predefined set of requirements.
3839 ``requirement`` predefined set of requirements.
3846 (incompatible with ``createopts``)
3840 (incompatible with ``createopts``)
3847
3841
3848 The following keys for ``createopts`` are recognized:
3842 The following keys for ``createopts`` are recognized:
3849
3843
3850 backend
3844 backend
3851 The storage backend to use.
3845 The storage backend to use.
3852 lfs
3846 lfs
3853 Repository will be created with ``lfs`` requirement. The lfs extension
3847 Repository will be created with ``lfs`` requirement. The lfs extension
3854 will automatically be loaded when the repository is accessed.
3848 will automatically be loaded when the repository is accessed.
3855 narrowfiles
3849 narrowfiles
3856 Set up repository to support narrow file storage.
3850 Set up repository to support narrow file storage.
3857 sharedrepo
3851 sharedrepo
3858 Repository object from which storage should be shared.
3852 Repository object from which storage should be shared.
3859 sharedrelative
3853 sharedrelative
3860 Boolean indicating if the path to the shared repo should be
3854 Boolean indicating if the path to the shared repo should be
3861 stored as relative. By default, the pointer to the "parent" repo
3855 stored as relative. By default, the pointer to the "parent" repo
3862 is stored as an absolute path.
3856 is stored as an absolute path.
3863 shareditems
3857 shareditems
3864 Set of items to share to the new repository (in addition to storage).
3858 Set of items to share to the new repository (in addition to storage).
3865 shallowfilestore
3859 shallowfilestore
3866 Indicates that storage for files should be shallow (not all ancestor
3860 Indicates that storage for files should be shallow (not all ancestor
3867 revisions are known).
3861 revisions are known).
3868 """
3862 """
3869
3863
3870 if requirements is not None:
3864 if requirements is not None:
3871 if createopts is not None:
3865 if createopts is not None:
3872 msg = b'cannot specify both createopts and requirements'
3866 msg = b'cannot specify both createopts and requirements'
3873 raise error.ProgrammingError(msg)
3867 raise error.ProgrammingError(msg)
3874 createopts = {}
3868 createopts = {}
3875 else:
3869 else:
3876 createopts = defaultcreateopts(ui, createopts=createopts)
3870 createopts = defaultcreateopts(ui, createopts=createopts)
3877
3871
3878 unknownopts = filterknowncreateopts(ui, createopts)
3872 unknownopts = filterknowncreateopts(ui, createopts)
3879
3873
3880 if not isinstance(unknownopts, dict):
3874 if not isinstance(unknownopts, dict):
3881 raise error.ProgrammingError(
3875 raise error.ProgrammingError(
3882 b'filterknowncreateopts() did not return a dict'
3876 b'filterknowncreateopts() did not return a dict'
3883 )
3877 )
3884
3878
3885 if unknownopts:
3879 if unknownopts:
3886 raise error.Abort(
3880 raise error.Abort(
3887 _(
3881 _(
3888 b'unable to create repository because of unknown '
3882 b'unable to create repository because of unknown '
3889 b'creation option: %s'
3883 b'creation option: %s'
3890 )
3884 )
3891 % b', '.join(sorted(unknownopts)),
3885 % b', '.join(sorted(unknownopts)),
3892 hint=_(b'is a required extension not loaded?'),
3886 hint=_(b'is a required extension not loaded?'),
3893 )
3887 )
3894
3888
3895 requirements = newreporequirements(ui, createopts=createopts)
3889 requirements = newreporequirements(ui, createopts=createopts)
3896 requirements -= checkrequirementscompat(ui, requirements)
3890 requirements -= checkrequirementscompat(ui, requirements)
3897
3891
3898 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3892 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3899
3893
3900 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3894 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3901 if hgvfs.exists():
3895 if hgvfs.exists():
3902 raise error.RepoError(_(b'repository %s already exists') % path)
3896 raise error.RepoError(_(b'repository %s already exists') % path)
3903
3897
3904 if b'sharedrepo' in createopts:
3898 if b'sharedrepo' in createopts:
3905 sharedpath = createopts[b'sharedrepo'].sharedpath
3899 sharedpath = createopts[b'sharedrepo'].sharedpath
3906
3900
3907 if createopts.get(b'sharedrelative'):
3901 if createopts.get(b'sharedrelative'):
3908 try:
3902 try:
3909 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3903 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3910 sharedpath = util.pconvert(sharedpath)
3904 sharedpath = util.pconvert(sharedpath)
3911 except (IOError, ValueError) as e:
3905 except (IOError, ValueError) as e:
3912 # ValueError is raised on Windows if the drive letters differ
3906 # ValueError is raised on Windows if the drive letters differ
3913 # on each path.
3907 # on each path.
3914 raise error.Abort(
3908 raise error.Abort(
3915 _(b'cannot calculate relative path'),
3909 _(b'cannot calculate relative path'),
3916 hint=stringutil.forcebytestr(e),
3910 hint=stringutil.forcebytestr(e),
3917 )
3911 )
3918
3912
3919 if not wdirvfs.exists():
3913 if not wdirvfs.exists():
3920 wdirvfs.makedirs()
3914 wdirvfs.makedirs()
3921
3915
3922 hgvfs.makedir(notindexed=True)
3916 hgvfs.makedir(notindexed=True)
3923 if b'sharedrepo' not in createopts:
3917 if b'sharedrepo' not in createopts:
3924 hgvfs.mkdir(b'cache')
3918 hgvfs.mkdir(b'cache')
3925 hgvfs.mkdir(b'wcache')
3919 hgvfs.mkdir(b'wcache')
3926
3920
3927 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3921 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3928 if has_store and b'sharedrepo' not in createopts:
3922 if has_store and b'sharedrepo' not in createopts:
3929 hgvfs.mkdir(b'store')
3923 hgvfs.mkdir(b'store')
3930
3924
3931 # We create an invalid changelog outside the store so very old
3925 # We create an invalid changelog outside the store so very old
3932 # Mercurial versions (which didn't know about the requirements
3926 # Mercurial versions (which didn't know about the requirements
3933 # file) encounter an error on reading the changelog. This
3927 # file) encounter an error on reading the changelog. This
3934 # effectively locks out old clients and prevents them from
3928 # effectively locks out old clients and prevents them from
3935 # mucking with a repo in an unknown format.
3929 # mucking with a repo in an unknown format.
3936 #
3930 #
3937 # The revlog header has version 65535, which won't be recognized by
3931 # The revlog header has version 65535, which won't be recognized by
3938 # such old clients.
3932 # such old clients.
3939 hgvfs.append(
3933 hgvfs.append(
3940 b'00changelog.i',
3934 b'00changelog.i',
3941 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3935 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3942 b'layout',
3936 b'layout',
3943 )
3937 )
3944
3938
3945 # Filter the requirements into working copy and store ones
3939 # Filter the requirements into working copy and store ones
3946 wcreq, storereq = scmutil.filterrequirements(requirements)
3940 wcreq, storereq = scmutil.filterrequirements(requirements)
3947 # write working copy ones
3941 # write working copy ones
3948 scmutil.writerequires(hgvfs, wcreq)
3942 scmutil.writerequires(hgvfs, wcreq)
3949 # If there are store requirements and the current repository
3943 # If there are store requirements and the current repository
3950 # is not a shared one, write stored requirements
3944 # is not a shared one, write stored requirements
3951 # For new shared repository, we don't need to write the store
3945 # For new shared repository, we don't need to write the store
3952 # requirements as they are already present in store requires
3946 # requirements as they are already present in store requires
3953 if storereq and b'sharedrepo' not in createopts:
3947 if storereq and b'sharedrepo' not in createopts:
3954 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3948 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3955 scmutil.writerequires(storevfs, storereq)
3949 scmutil.writerequires(storevfs, storereq)
3956
3950
3957 # Write out file telling readers where to find the shared store.
3951 # Write out file telling readers where to find the shared store.
3958 if b'sharedrepo' in createopts:
3952 if b'sharedrepo' in createopts:
3959 hgvfs.write(b'sharedpath', sharedpath)
3953 hgvfs.write(b'sharedpath', sharedpath)
3960
3954
3961 if createopts.get(b'shareditems'):
3955 if createopts.get(b'shareditems'):
3962 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3956 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3963 hgvfs.write(b'shared', shared)
3957 hgvfs.write(b'shared', shared)
3964
3958
3965
3959
3966 def poisonrepository(repo):
3960 def poisonrepository(repo):
3967 """Poison a repository instance so it can no longer be used."""
3961 """Poison a repository instance so it can no longer be used."""
3968 # Perform any cleanup on the instance.
3962 # Perform any cleanup on the instance.
3969 repo.close()
3963 repo.close()
3970
3964
3971 # Our strategy is to replace the type of the object with one that
3965 # Our strategy is to replace the type of the object with one that
3972 # has all attribute lookups result in error.
3966 # has all attribute lookups result in error.
3973 #
3967 #
3974 # But we have to allow the close() method because some constructors
3968 # But we have to allow the close() method because some constructors
3975 # of repos call close() on repo references.
3969 # of repos call close() on repo references.
3976 class poisonedrepository:
3970 class poisonedrepository:
3977 def __getattribute__(self, item):
3971 def __getattribute__(self, item):
3978 if item == 'close':
3972 if item == 'close':
3979 return object.__getattribute__(self, item)
3973 return object.__getattribute__(self, item)
3980
3974
3981 raise error.ProgrammingError(
3975 raise error.ProgrammingError(
3982 b'repo instances should not be used after unshare'
3976 b'repo instances should not be used after unshare'
3983 )
3977 )
3984
3978
3985 def close(self):
3979 def close(self):
3986 pass
3980 pass
3987
3981
3988 # We may have a repoview, which intercepts __setattr__. So be sure
3982 # We may have a repoview, which intercepts __setattr__. So be sure
3989 # we operate at the lowest level possible.
3983 # we operate at the lowest level possible.
3990 object.__setattr__(repo, '__class__', poisonedrepository)
3984 object.__setattr__(repo, '__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now