##// END OF EJS Templates
manifestlog: also monitor `00manifest.n` when applicable...
marmoute -
r48854:7970895a stable
parent child Browse files
Show More
@@ -1,3866 +1,3881 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import functools
11 import functools
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 bin,
20 bin,
21 hex,
21 hex,
22 nullrev,
22 nullrev,
23 sha1nodeconstants,
23 sha1nodeconstants,
24 short,
24 short,
25 )
25 )
26 from .pycompat import (
26 from .pycompat import (
27 delattr,
27 delattr,
28 getattr,
28 getattr,
29 )
29 )
30 from . import (
30 from . import (
31 bookmarks,
31 bookmarks,
32 branchmap,
32 branchmap,
33 bundle2,
33 bundle2,
34 bundlecaches,
34 bundlecaches,
35 changegroup,
35 changegroup,
36 color,
36 color,
37 commit,
37 commit,
38 context,
38 context,
39 dirstate,
39 dirstate,
40 dirstateguard,
40 dirstateguard,
41 discovery,
41 discovery,
42 encoding,
42 encoding,
43 error,
43 error,
44 exchange,
44 exchange,
45 extensions,
45 extensions,
46 filelog,
46 filelog,
47 hook,
47 hook,
48 lock as lockmod,
48 lock as lockmod,
49 match as matchmod,
49 match as matchmod,
50 mergestate as mergestatemod,
50 mergestate as mergestatemod,
51 mergeutil,
51 mergeutil,
52 namespaces,
52 namespaces,
53 narrowspec,
53 narrowspec,
54 obsolete,
54 obsolete,
55 pathutil,
55 pathutil,
56 phases,
56 phases,
57 pushkey,
57 pushkey,
58 pycompat,
58 pycompat,
59 rcutil,
59 rcutil,
60 repoview,
60 repoview,
61 requirements as requirementsmod,
61 requirements as requirementsmod,
62 revlog,
62 revlog,
63 revset,
63 revset,
64 revsetlang,
64 revsetlang,
65 scmutil,
65 scmutil,
66 sparse,
66 sparse,
67 store as storemod,
67 store as storemod,
68 subrepoutil,
68 subrepoutil,
69 tags as tagsmod,
69 tags as tagsmod,
70 transaction,
70 transaction,
71 txnutil,
71 txnutil,
72 util,
72 util,
73 vfs as vfsmod,
73 vfs as vfsmod,
74 wireprototypes,
74 wireprototypes,
75 )
75 )
76
76
77 from .interfaces import (
77 from .interfaces import (
78 repository,
78 repository,
79 util as interfaceutil,
79 util as interfaceutil,
80 )
80 )
81
81
82 from .utils import (
82 from .utils import (
83 hashutil,
83 hashutil,
84 procutil,
84 procutil,
85 stringutil,
85 stringutil,
86 urlutil,
86 urlutil,
87 )
87 )
88
88
89 from .revlogutils import (
89 from .revlogutils import (
90 concurrency_checker as revlogchecker,
90 concurrency_checker as revlogchecker,
91 constants as revlogconst,
91 constants as revlogconst,
92 sidedata as sidedatamod,
92 sidedata as sidedatamod,
93 )
93 )
94
94
95 release = lockmod.release
95 release = lockmod.release
96 urlerr = util.urlerr
96 urlerr = util.urlerr
97 urlreq = util.urlreq
97 urlreq = util.urlreq
98
98
99 # set of (path, vfs-location) tuples. vfs-location is:
99 # set of (path, vfs-location) tuples. vfs-location is:
100 # - 'plain for vfs relative paths
100 # - 'plain for vfs relative paths
101 # - '' for svfs relative paths
101 # - '' for svfs relative paths
102 _cachedfiles = set()
102 _cachedfiles = set()
103
103
104
104
105 class _basefilecache(scmutil.filecache):
105 class _basefilecache(scmutil.filecache):
106 """All filecache usage on repo are done for logic that should be unfiltered"""
106 """All filecache usage on repo are done for logic that should be unfiltered"""
107
107
108 def __get__(self, repo, type=None):
108 def __get__(self, repo, type=None):
109 if repo is None:
109 if repo is None:
110 return self
110 return self
111 # proxy to unfiltered __dict__ since filtered repo has no entry
111 # proxy to unfiltered __dict__ since filtered repo has no entry
112 unfi = repo.unfiltered()
112 unfi = repo.unfiltered()
113 try:
113 try:
114 return unfi.__dict__[self.sname]
114 return unfi.__dict__[self.sname]
115 except KeyError:
115 except KeyError:
116 pass
116 pass
117 return super(_basefilecache, self).__get__(unfi, type)
117 return super(_basefilecache, self).__get__(unfi, type)
118
118
119 def set(self, repo, value):
119 def set(self, repo, value):
120 return super(_basefilecache, self).set(repo.unfiltered(), value)
120 return super(_basefilecache, self).set(repo.unfiltered(), value)
121
121
122
122
123 class repofilecache(_basefilecache):
123 class repofilecache(_basefilecache):
124 """filecache for files in .hg but outside of .hg/store"""
124 """filecache for files in .hg but outside of .hg/store"""
125
125
126 def __init__(self, *paths):
126 def __init__(self, *paths):
127 super(repofilecache, self).__init__(*paths)
127 super(repofilecache, self).__init__(*paths)
128 for path in paths:
128 for path in paths:
129 _cachedfiles.add((path, b'plain'))
129 _cachedfiles.add((path, b'plain'))
130
130
131 def join(self, obj, fname):
131 def join(self, obj, fname):
132 return obj.vfs.join(fname)
132 return obj.vfs.join(fname)
133
133
134
134
135 class storecache(_basefilecache):
135 class storecache(_basefilecache):
136 """filecache for files in the store"""
136 """filecache for files in the store"""
137
137
138 def __init__(self, *paths):
138 def __init__(self, *paths):
139 super(storecache, self).__init__(*paths)
139 super(storecache, self).__init__(*paths)
140 for path in paths:
140 for path in paths:
141 _cachedfiles.add((path, b''))
141 _cachedfiles.add((path, b''))
142
142
143 def join(self, obj, fname):
143 def join(self, obj, fname):
144 return obj.sjoin(fname)
144 return obj.sjoin(fname)
145
145
146
146
147 class changelogcache(storecache):
147 class changelogcache(storecache):
148 """filecache for the changelog"""
148 """filecache for the changelog"""
149
149
150 def __init__(self):
150 def __init__(self):
151 super(changelogcache, self).__init__()
151 super(changelogcache, self).__init__()
152 _cachedfiles.add((b'00changelog.i', b''))
152 _cachedfiles.add((b'00changelog.i', b''))
153 _cachedfiles.add((b'00changelog.n', b''))
153 _cachedfiles.add((b'00changelog.n', b''))
154
154
155 def tracked_paths(self, obj):
155 def tracked_paths(self, obj):
156 paths = [self.join(obj, b'00changelog.i')]
156 paths = [self.join(obj, b'00changelog.i')]
157 if obj.store.opener.options.get(b'persistent-nodemap', False):
157 if obj.store.opener.options.get(b'persistent-nodemap', False):
158 paths.append(self.join(obj, b'00changelog.n'))
158 paths.append(self.join(obj, b'00changelog.n'))
159 return paths
159 return paths
160
160
161
161
162 class manifestlogcache(storecache):
163 """filecache for the manifestlog"""
164
165 def __init__(self):
166 super(manifestlogcache, self).__init__()
167 _cachedfiles.add((b'00manifest.i', b''))
168 _cachedfiles.add((b'00manifest.n', b''))
169
170 def tracked_paths(self, obj):
171 paths = [self.join(obj, b'00manifest.i')]
172 if obj.store.opener.options.get(b'persistent-nodemap', False):
173 paths.append(self.join(obj, b'00manifest.n'))
174 return paths
175
176
162 class mixedrepostorecache(_basefilecache):
177 class mixedrepostorecache(_basefilecache):
163 """filecache for a mix files in .hg/store and outside"""
178 """filecache for a mix files in .hg/store and outside"""
164
179
165 def __init__(self, *pathsandlocations):
180 def __init__(self, *pathsandlocations):
166 # scmutil.filecache only uses the path for passing back into our
181 # scmutil.filecache only uses the path for passing back into our
167 # join(), so we can safely pass a list of paths and locations
182 # join(), so we can safely pass a list of paths and locations
168 super(mixedrepostorecache, self).__init__(*pathsandlocations)
183 super(mixedrepostorecache, self).__init__(*pathsandlocations)
169 _cachedfiles.update(pathsandlocations)
184 _cachedfiles.update(pathsandlocations)
170
185
171 def join(self, obj, fnameandlocation):
186 def join(self, obj, fnameandlocation):
172 fname, location = fnameandlocation
187 fname, location = fnameandlocation
173 if location == b'plain':
188 if location == b'plain':
174 return obj.vfs.join(fname)
189 return obj.vfs.join(fname)
175 else:
190 else:
176 if location != b'':
191 if location != b'':
177 raise error.ProgrammingError(
192 raise error.ProgrammingError(
178 b'unexpected location: %s' % location
193 b'unexpected location: %s' % location
179 )
194 )
180 return obj.sjoin(fname)
195 return obj.sjoin(fname)
181
196
182
197
183 def isfilecached(repo, name):
198 def isfilecached(repo, name):
184 """check if a repo has already cached "name" filecache-ed property
199 """check if a repo has already cached "name" filecache-ed property
185
200
186 This returns (cachedobj-or-None, iscached) tuple.
201 This returns (cachedobj-or-None, iscached) tuple.
187 """
202 """
188 cacheentry = repo.unfiltered()._filecache.get(name, None)
203 cacheentry = repo.unfiltered()._filecache.get(name, None)
189 if not cacheentry:
204 if not cacheentry:
190 return None, False
205 return None, False
191 return cacheentry.obj, True
206 return cacheentry.obj, True
192
207
193
208
194 class unfilteredpropertycache(util.propertycache):
209 class unfilteredpropertycache(util.propertycache):
195 """propertycache that apply to unfiltered repo only"""
210 """propertycache that apply to unfiltered repo only"""
196
211
197 def __get__(self, repo, type=None):
212 def __get__(self, repo, type=None):
198 unfi = repo.unfiltered()
213 unfi = repo.unfiltered()
199 if unfi is repo:
214 if unfi is repo:
200 return super(unfilteredpropertycache, self).__get__(unfi)
215 return super(unfilteredpropertycache, self).__get__(unfi)
201 return getattr(unfi, self.name)
216 return getattr(unfi, self.name)
202
217
203
218
204 class filteredpropertycache(util.propertycache):
219 class filteredpropertycache(util.propertycache):
205 """propertycache that must take filtering in account"""
220 """propertycache that must take filtering in account"""
206
221
207 def cachevalue(self, obj, value):
222 def cachevalue(self, obj, value):
208 object.__setattr__(obj, self.name, value)
223 object.__setattr__(obj, self.name, value)
209
224
210
225
211 def hasunfilteredcache(repo, name):
226 def hasunfilteredcache(repo, name):
212 """check if a repo has an unfilteredpropertycache value for <name>"""
227 """check if a repo has an unfilteredpropertycache value for <name>"""
213 return name in vars(repo.unfiltered())
228 return name in vars(repo.unfiltered())
214
229
215
230
216 def unfilteredmethod(orig):
231 def unfilteredmethod(orig):
217 """decorate method that always need to be run on unfiltered version"""
232 """decorate method that always need to be run on unfiltered version"""
218
233
219 @functools.wraps(orig)
234 @functools.wraps(orig)
220 def wrapper(repo, *args, **kwargs):
235 def wrapper(repo, *args, **kwargs):
221 return orig(repo.unfiltered(), *args, **kwargs)
236 return orig(repo.unfiltered(), *args, **kwargs)
222
237
223 return wrapper
238 return wrapper
224
239
225
240
226 moderncaps = {
241 moderncaps = {
227 b'lookup',
242 b'lookup',
228 b'branchmap',
243 b'branchmap',
229 b'pushkey',
244 b'pushkey',
230 b'known',
245 b'known',
231 b'getbundle',
246 b'getbundle',
232 b'unbundle',
247 b'unbundle',
233 }
248 }
234 legacycaps = moderncaps.union({b'changegroupsubset'})
249 legacycaps = moderncaps.union({b'changegroupsubset'})
235
250
236
251
237 @interfaceutil.implementer(repository.ipeercommandexecutor)
252 @interfaceutil.implementer(repository.ipeercommandexecutor)
238 class localcommandexecutor(object):
253 class localcommandexecutor(object):
239 def __init__(self, peer):
254 def __init__(self, peer):
240 self._peer = peer
255 self._peer = peer
241 self._sent = False
256 self._sent = False
242 self._closed = False
257 self._closed = False
243
258
244 def __enter__(self):
259 def __enter__(self):
245 return self
260 return self
246
261
247 def __exit__(self, exctype, excvalue, exctb):
262 def __exit__(self, exctype, excvalue, exctb):
248 self.close()
263 self.close()
249
264
250 def callcommand(self, command, args):
265 def callcommand(self, command, args):
251 if self._sent:
266 if self._sent:
252 raise error.ProgrammingError(
267 raise error.ProgrammingError(
253 b'callcommand() cannot be used after sendcommands()'
268 b'callcommand() cannot be used after sendcommands()'
254 )
269 )
255
270
256 if self._closed:
271 if self._closed:
257 raise error.ProgrammingError(
272 raise error.ProgrammingError(
258 b'callcommand() cannot be used after close()'
273 b'callcommand() cannot be used after close()'
259 )
274 )
260
275
261 # We don't need to support anything fancy. Just call the named
276 # We don't need to support anything fancy. Just call the named
262 # method on the peer and return a resolved future.
277 # method on the peer and return a resolved future.
263 fn = getattr(self._peer, pycompat.sysstr(command))
278 fn = getattr(self._peer, pycompat.sysstr(command))
264
279
265 f = pycompat.futures.Future()
280 f = pycompat.futures.Future()
266
281
267 try:
282 try:
268 result = fn(**pycompat.strkwargs(args))
283 result = fn(**pycompat.strkwargs(args))
269 except Exception:
284 except Exception:
270 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
285 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
271 else:
286 else:
272 f.set_result(result)
287 f.set_result(result)
273
288
274 return f
289 return f
275
290
276 def sendcommands(self):
291 def sendcommands(self):
277 self._sent = True
292 self._sent = True
278
293
279 def close(self):
294 def close(self):
280 self._closed = True
295 self._closed = True
281
296
282
297
283 @interfaceutil.implementer(repository.ipeercommands)
298 @interfaceutil.implementer(repository.ipeercommands)
284 class localpeer(repository.peer):
299 class localpeer(repository.peer):
285 '''peer for a local repo; reflects only the most recent API'''
300 '''peer for a local repo; reflects only the most recent API'''
286
301
287 def __init__(self, repo, caps=None):
302 def __init__(self, repo, caps=None):
288 super(localpeer, self).__init__()
303 super(localpeer, self).__init__()
289
304
290 if caps is None:
305 if caps is None:
291 caps = moderncaps.copy()
306 caps = moderncaps.copy()
292 self._repo = repo.filtered(b'served')
307 self._repo = repo.filtered(b'served')
293 self.ui = repo.ui
308 self.ui = repo.ui
294
309
295 if repo._wanted_sidedata:
310 if repo._wanted_sidedata:
296 formatted = bundle2.format_remote_wanted_sidedata(repo)
311 formatted = bundle2.format_remote_wanted_sidedata(repo)
297 caps.add(b'exp-wanted-sidedata=' + formatted)
312 caps.add(b'exp-wanted-sidedata=' + formatted)
298
313
299 self._caps = repo._restrictcapabilities(caps)
314 self._caps = repo._restrictcapabilities(caps)
300
315
301 # Begin of _basepeer interface.
316 # Begin of _basepeer interface.
302
317
303 def url(self):
318 def url(self):
304 return self._repo.url()
319 return self._repo.url()
305
320
306 def local(self):
321 def local(self):
307 return self._repo
322 return self._repo
308
323
309 def peer(self):
324 def peer(self):
310 return self
325 return self
311
326
312 def canpush(self):
327 def canpush(self):
313 return True
328 return True
314
329
315 def close(self):
330 def close(self):
316 self._repo.close()
331 self._repo.close()
317
332
318 # End of _basepeer interface.
333 # End of _basepeer interface.
319
334
320 # Begin of _basewirecommands interface.
335 # Begin of _basewirecommands interface.
321
336
322 def branchmap(self):
337 def branchmap(self):
323 return self._repo.branchmap()
338 return self._repo.branchmap()
324
339
325 def capabilities(self):
340 def capabilities(self):
326 return self._caps
341 return self._caps
327
342
328 def clonebundles(self):
343 def clonebundles(self):
329 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
344 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
330
345
331 def debugwireargs(self, one, two, three=None, four=None, five=None):
346 def debugwireargs(self, one, two, three=None, four=None, five=None):
332 """Used to test argument passing over the wire"""
347 """Used to test argument passing over the wire"""
333 return b"%s %s %s %s %s" % (
348 return b"%s %s %s %s %s" % (
334 one,
349 one,
335 two,
350 two,
336 pycompat.bytestr(three),
351 pycompat.bytestr(three),
337 pycompat.bytestr(four),
352 pycompat.bytestr(four),
338 pycompat.bytestr(five),
353 pycompat.bytestr(five),
339 )
354 )
340
355
341 def getbundle(
356 def getbundle(
342 self,
357 self,
343 source,
358 source,
344 heads=None,
359 heads=None,
345 common=None,
360 common=None,
346 bundlecaps=None,
361 bundlecaps=None,
347 remote_sidedata=None,
362 remote_sidedata=None,
348 **kwargs
363 **kwargs
349 ):
364 ):
350 chunks = exchange.getbundlechunks(
365 chunks = exchange.getbundlechunks(
351 self._repo,
366 self._repo,
352 source,
367 source,
353 heads=heads,
368 heads=heads,
354 common=common,
369 common=common,
355 bundlecaps=bundlecaps,
370 bundlecaps=bundlecaps,
356 remote_sidedata=remote_sidedata,
371 remote_sidedata=remote_sidedata,
357 **kwargs
372 **kwargs
358 )[1]
373 )[1]
359 cb = util.chunkbuffer(chunks)
374 cb = util.chunkbuffer(chunks)
360
375
361 if exchange.bundle2requested(bundlecaps):
376 if exchange.bundle2requested(bundlecaps):
362 # When requesting a bundle2, getbundle returns a stream to make the
377 # When requesting a bundle2, getbundle returns a stream to make the
363 # wire level function happier. We need to build a proper object
378 # wire level function happier. We need to build a proper object
364 # from it in local peer.
379 # from it in local peer.
365 return bundle2.getunbundler(self.ui, cb)
380 return bundle2.getunbundler(self.ui, cb)
366 else:
381 else:
367 return changegroup.getunbundler(b'01', cb, None)
382 return changegroup.getunbundler(b'01', cb, None)
368
383
369 def heads(self):
384 def heads(self):
370 return self._repo.heads()
385 return self._repo.heads()
371
386
372 def known(self, nodes):
387 def known(self, nodes):
373 return self._repo.known(nodes)
388 return self._repo.known(nodes)
374
389
375 def listkeys(self, namespace):
390 def listkeys(self, namespace):
376 return self._repo.listkeys(namespace)
391 return self._repo.listkeys(namespace)
377
392
378 def lookup(self, key):
393 def lookup(self, key):
379 return self._repo.lookup(key)
394 return self._repo.lookup(key)
380
395
381 def pushkey(self, namespace, key, old, new):
396 def pushkey(self, namespace, key, old, new):
382 return self._repo.pushkey(namespace, key, old, new)
397 return self._repo.pushkey(namespace, key, old, new)
383
398
384 def stream_out(self):
399 def stream_out(self):
385 raise error.Abort(_(b'cannot perform stream clone against local peer'))
400 raise error.Abort(_(b'cannot perform stream clone against local peer'))
386
401
387 def unbundle(self, bundle, heads, url):
402 def unbundle(self, bundle, heads, url):
388 """apply a bundle on a repo
403 """apply a bundle on a repo
389
404
390 This function handles the repo locking itself."""
405 This function handles the repo locking itself."""
391 try:
406 try:
392 try:
407 try:
393 bundle = exchange.readbundle(self.ui, bundle, None)
408 bundle = exchange.readbundle(self.ui, bundle, None)
394 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
409 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
395 if util.safehasattr(ret, b'getchunks'):
410 if util.safehasattr(ret, b'getchunks'):
396 # This is a bundle20 object, turn it into an unbundler.
411 # This is a bundle20 object, turn it into an unbundler.
397 # This little dance should be dropped eventually when the
412 # This little dance should be dropped eventually when the
398 # API is finally improved.
413 # API is finally improved.
399 stream = util.chunkbuffer(ret.getchunks())
414 stream = util.chunkbuffer(ret.getchunks())
400 ret = bundle2.getunbundler(self.ui, stream)
415 ret = bundle2.getunbundler(self.ui, stream)
401 return ret
416 return ret
402 except Exception as exc:
417 except Exception as exc:
403 # If the exception contains output salvaged from a bundle2
418 # If the exception contains output salvaged from a bundle2
404 # reply, we need to make sure it is printed before continuing
419 # reply, we need to make sure it is printed before continuing
405 # to fail. So we build a bundle2 with such output and consume
420 # to fail. So we build a bundle2 with such output and consume
406 # it directly.
421 # it directly.
407 #
422 #
408 # This is not very elegant but allows a "simple" solution for
423 # This is not very elegant but allows a "simple" solution for
409 # issue4594
424 # issue4594
410 output = getattr(exc, '_bundle2salvagedoutput', ())
425 output = getattr(exc, '_bundle2salvagedoutput', ())
411 if output:
426 if output:
412 bundler = bundle2.bundle20(self._repo.ui)
427 bundler = bundle2.bundle20(self._repo.ui)
413 for out in output:
428 for out in output:
414 bundler.addpart(out)
429 bundler.addpart(out)
415 stream = util.chunkbuffer(bundler.getchunks())
430 stream = util.chunkbuffer(bundler.getchunks())
416 b = bundle2.getunbundler(self.ui, stream)
431 b = bundle2.getunbundler(self.ui, stream)
417 bundle2.processbundle(self._repo, b)
432 bundle2.processbundle(self._repo, b)
418 raise
433 raise
419 except error.PushRaced as exc:
434 except error.PushRaced as exc:
420 raise error.ResponseError(
435 raise error.ResponseError(
421 _(b'push failed:'), stringutil.forcebytestr(exc)
436 _(b'push failed:'), stringutil.forcebytestr(exc)
422 )
437 )
423
438
424 # End of _basewirecommands interface.
439 # End of _basewirecommands interface.
425
440
426 # Begin of peer interface.
441 # Begin of peer interface.
427
442
428 def commandexecutor(self):
443 def commandexecutor(self):
429 return localcommandexecutor(self)
444 return localcommandexecutor(self)
430
445
431 # End of peer interface.
446 # End of peer interface.
432
447
433
448
434 @interfaceutil.implementer(repository.ipeerlegacycommands)
449 @interfaceutil.implementer(repository.ipeerlegacycommands)
435 class locallegacypeer(localpeer):
450 class locallegacypeer(localpeer):
436 """peer extension which implements legacy methods too; used for tests with
451 """peer extension which implements legacy methods too; used for tests with
437 restricted capabilities"""
452 restricted capabilities"""
438
453
439 def __init__(self, repo):
454 def __init__(self, repo):
440 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
455 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
441
456
442 # Begin of baselegacywirecommands interface.
457 # Begin of baselegacywirecommands interface.
443
458
444 def between(self, pairs):
459 def between(self, pairs):
445 return self._repo.between(pairs)
460 return self._repo.between(pairs)
446
461
447 def branches(self, nodes):
462 def branches(self, nodes):
448 return self._repo.branches(nodes)
463 return self._repo.branches(nodes)
449
464
450 def changegroup(self, nodes, source):
465 def changegroup(self, nodes, source):
451 outgoing = discovery.outgoing(
466 outgoing = discovery.outgoing(
452 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
467 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
453 )
468 )
454 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
469 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
455
470
456 def changegroupsubset(self, bases, heads, source):
471 def changegroupsubset(self, bases, heads, source):
457 outgoing = discovery.outgoing(
472 outgoing = discovery.outgoing(
458 self._repo, missingroots=bases, ancestorsof=heads
473 self._repo, missingroots=bases, ancestorsof=heads
459 )
474 )
460 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
475 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
461
476
462 # End of baselegacywirecommands interface.
477 # End of baselegacywirecommands interface.
463
478
464
479
465 # Functions receiving (ui, features) that extensions can register to impact
480 # Functions receiving (ui, features) that extensions can register to impact
466 # the ability to load repositories with custom requirements. Only
481 # the ability to load repositories with custom requirements. Only
467 # functions defined in loaded extensions are called.
482 # functions defined in loaded extensions are called.
468 #
483 #
469 # The function receives a set of requirement strings that the repository
484 # The function receives a set of requirement strings that the repository
470 # is capable of opening. Functions will typically add elements to the
485 # is capable of opening. Functions will typically add elements to the
471 # set to reflect that the extension knows how to handle that requirements.
486 # set to reflect that the extension knows how to handle that requirements.
472 featuresetupfuncs = set()
487 featuresetupfuncs = set()
473
488
474
489
475 def _getsharedvfs(hgvfs, requirements):
490 def _getsharedvfs(hgvfs, requirements):
476 """returns the vfs object pointing to root of shared source
491 """returns the vfs object pointing to root of shared source
477 repo for a shared repository
492 repo for a shared repository
478
493
479 hgvfs is vfs pointing at .hg/ of current repo (shared one)
494 hgvfs is vfs pointing at .hg/ of current repo (shared one)
480 requirements is a set of requirements of current repo (shared one)
495 requirements is a set of requirements of current repo (shared one)
481 """
496 """
482 # The ``shared`` or ``relshared`` requirements indicate the
497 # The ``shared`` or ``relshared`` requirements indicate the
483 # store lives in the path contained in the ``.hg/sharedpath`` file.
498 # store lives in the path contained in the ``.hg/sharedpath`` file.
484 # This is an absolute path for ``shared`` and relative to
499 # This is an absolute path for ``shared`` and relative to
485 # ``.hg/`` for ``relshared``.
500 # ``.hg/`` for ``relshared``.
486 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
501 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
487 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
502 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
488 sharedpath = util.normpath(hgvfs.join(sharedpath))
503 sharedpath = util.normpath(hgvfs.join(sharedpath))
489
504
490 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
505 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
491
506
492 if not sharedvfs.exists():
507 if not sharedvfs.exists():
493 raise error.RepoError(
508 raise error.RepoError(
494 _(b'.hg/sharedpath points to nonexistent directory %s')
509 _(b'.hg/sharedpath points to nonexistent directory %s')
495 % sharedvfs.base
510 % sharedvfs.base
496 )
511 )
497 return sharedvfs
512 return sharedvfs
498
513
499
514
500 def _readrequires(vfs, allowmissing):
515 def _readrequires(vfs, allowmissing):
501 """reads the require file present at root of this vfs
516 """reads the require file present at root of this vfs
502 and return a set of requirements
517 and return a set of requirements
503
518
504 If allowmissing is True, we suppress ENOENT if raised"""
519 If allowmissing is True, we suppress ENOENT if raised"""
505 # requires file contains a newline-delimited list of
520 # requires file contains a newline-delimited list of
506 # features/capabilities the opener (us) must have in order to use
521 # features/capabilities the opener (us) must have in order to use
507 # the repository. This file was introduced in Mercurial 0.9.2,
522 # the repository. This file was introduced in Mercurial 0.9.2,
508 # which means very old repositories may not have one. We assume
523 # which means very old repositories may not have one. We assume
509 # a missing file translates to no requirements.
524 # a missing file translates to no requirements.
510 try:
525 try:
511 requirements = set(vfs.read(b'requires').splitlines())
526 requirements = set(vfs.read(b'requires').splitlines())
512 except IOError as e:
527 except IOError as e:
513 if not (allowmissing and e.errno == errno.ENOENT):
528 if not (allowmissing and e.errno == errno.ENOENT):
514 raise
529 raise
515 requirements = set()
530 requirements = set()
516 return requirements
531 return requirements
517
532
518
533
519 def makelocalrepository(baseui, path, intents=None):
534 def makelocalrepository(baseui, path, intents=None):
520 """Create a local repository object.
535 """Create a local repository object.
521
536
522 Given arguments needed to construct a local repository, this function
537 Given arguments needed to construct a local repository, this function
523 performs various early repository loading functionality (such as
538 performs various early repository loading functionality (such as
524 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
539 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
525 the repository can be opened, derives a type suitable for representing
540 the repository can be opened, derives a type suitable for representing
526 that repository, and returns an instance of it.
541 that repository, and returns an instance of it.
527
542
528 The returned object conforms to the ``repository.completelocalrepository``
543 The returned object conforms to the ``repository.completelocalrepository``
529 interface.
544 interface.
530
545
531 The repository type is derived by calling a series of factory functions
546 The repository type is derived by calling a series of factory functions
532 for each aspect/interface of the final repository. These are defined by
547 for each aspect/interface of the final repository. These are defined by
533 ``REPO_INTERFACES``.
548 ``REPO_INTERFACES``.
534
549
535 Each factory function is called to produce a type implementing a specific
550 Each factory function is called to produce a type implementing a specific
536 interface. The cumulative list of returned types will be combined into a
551 interface. The cumulative list of returned types will be combined into a
537 new type and that type will be instantiated to represent the local
552 new type and that type will be instantiated to represent the local
538 repository.
553 repository.
539
554
540 The factory functions each receive various state that may be consulted
555 The factory functions each receive various state that may be consulted
541 as part of deriving a type.
556 as part of deriving a type.
542
557
543 Extensions should wrap these factory functions to customize repository type
558 Extensions should wrap these factory functions to customize repository type
544 creation. Note that an extension's wrapped function may be called even if
559 creation. Note that an extension's wrapped function may be called even if
545 that extension is not loaded for the repo being constructed. Extensions
560 that extension is not loaded for the repo being constructed. Extensions
546 should check if their ``__name__`` appears in the
561 should check if their ``__name__`` appears in the
547 ``extensionmodulenames`` set passed to the factory function and no-op if
562 ``extensionmodulenames`` set passed to the factory function and no-op if
548 not.
563 not.
549 """
564 """
550 ui = baseui.copy()
565 ui = baseui.copy()
551 # Prevent copying repo configuration.
566 # Prevent copying repo configuration.
552 ui.copy = baseui.copy
567 ui.copy = baseui.copy
553
568
554 # Working directory VFS rooted at repository root.
569 # Working directory VFS rooted at repository root.
555 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
570 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
556
571
557 # Main VFS for .hg/ directory.
572 # Main VFS for .hg/ directory.
558 hgpath = wdirvfs.join(b'.hg')
573 hgpath = wdirvfs.join(b'.hg')
559 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
574 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
560 # Whether this repository is shared one or not
575 # Whether this repository is shared one or not
561 shared = False
576 shared = False
562 # If this repository is shared, vfs pointing to shared repo
577 # If this repository is shared, vfs pointing to shared repo
563 sharedvfs = None
578 sharedvfs = None
564
579
565 # The .hg/ path should exist and should be a directory. All other
580 # The .hg/ path should exist and should be a directory. All other
566 # cases are errors.
581 # cases are errors.
567 if not hgvfs.isdir():
582 if not hgvfs.isdir():
568 try:
583 try:
569 hgvfs.stat()
584 hgvfs.stat()
570 except OSError as e:
585 except OSError as e:
571 if e.errno != errno.ENOENT:
586 if e.errno != errno.ENOENT:
572 raise
587 raise
573 except ValueError as e:
588 except ValueError as e:
574 # Can be raised on Python 3.8 when path is invalid.
589 # Can be raised on Python 3.8 when path is invalid.
575 raise error.Abort(
590 raise error.Abort(
576 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
591 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
577 )
592 )
578
593
579 raise error.RepoError(_(b'repository %s not found') % path)
594 raise error.RepoError(_(b'repository %s not found') % path)
580
595
581 requirements = _readrequires(hgvfs, True)
596 requirements = _readrequires(hgvfs, True)
582 shared = (
597 shared = (
583 requirementsmod.SHARED_REQUIREMENT in requirements
598 requirementsmod.SHARED_REQUIREMENT in requirements
584 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
599 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
585 )
600 )
586 storevfs = None
601 storevfs = None
587 if shared:
602 if shared:
588 # This is a shared repo
603 # This is a shared repo
589 sharedvfs = _getsharedvfs(hgvfs, requirements)
604 sharedvfs = _getsharedvfs(hgvfs, requirements)
590 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
605 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
591 else:
606 else:
592 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
607 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
593
608
594 # if .hg/requires contains the sharesafe requirement, it means
609 # if .hg/requires contains the sharesafe requirement, it means
595 # there exists a `.hg/store/requires` too and we should read it
610 # there exists a `.hg/store/requires` too and we should read it
596 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
611 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
597 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
612 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
598 # is not present, refer checkrequirementscompat() for that
613 # is not present, refer checkrequirementscompat() for that
599 #
614 #
600 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
615 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
601 # repository was shared the old way. We check the share source .hg/requires
616 # repository was shared the old way. We check the share source .hg/requires
602 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
617 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
603 # to be reshared
618 # to be reshared
604 hint = _(b"see `hg help config.format.use-share-safe` for more information")
619 hint = _(b"see `hg help config.format.use-share-safe` for more information")
605 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
620 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
606
621
607 if (
622 if (
608 shared
623 shared
609 and requirementsmod.SHARESAFE_REQUIREMENT
624 and requirementsmod.SHARESAFE_REQUIREMENT
610 not in _readrequires(sharedvfs, True)
625 not in _readrequires(sharedvfs, True)
611 ):
626 ):
612 mismatch_warn = ui.configbool(
627 mismatch_warn = ui.configbool(
613 b'share', b'safe-mismatch.source-not-safe.warn'
628 b'share', b'safe-mismatch.source-not-safe.warn'
614 )
629 )
615 mismatch_config = ui.config(
630 mismatch_config = ui.config(
616 b'share', b'safe-mismatch.source-not-safe'
631 b'share', b'safe-mismatch.source-not-safe'
617 )
632 )
618 if mismatch_config in (
633 if mismatch_config in (
619 b'downgrade-allow',
634 b'downgrade-allow',
620 b'allow',
635 b'allow',
621 b'downgrade-abort',
636 b'downgrade-abort',
622 ):
637 ):
623 # prevent cyclic import localrepo -> upgrade -> localrepo
638 # prevent cyclic import localrepo -> upgrade -> localrepo
624 from . import upgrade
639 from . import upgrade
625
640
626 upgrade.downgrade_share_to_non_safe(
641 upgrade.downgrade_share_to_non_safe(
627 ui,
642 ui,
628 hgvfs,
643 hgvfs,
629 sharedvfs,
644 sharedvfs,
630 requirements,
645 requirements,
631 mismatch_config,
646 mismatch_config,
632 mismatch_warn,
647 mismatch_warn,
633 )
648 )
634 elif mismatch_config == b'abort':
649 elif mismatch_config == b'abort':
635 raise error.Abort(
650 raise error.Abort(
636 _(b"share source does not support share-safe requirement"),
651 _(b"share source does not support share-safe requirement"),
637 hint=hint,
652 hint=hint,
638 )
653 )
639 else:
654 else:
640 raise error.Abort(
655 raise error.Abort(
641 _(
656 _(
642 b"share-safe mismatch with source.\nUnrecognized"
657 b"share-safe mismatch with source.\nUnrecognized"
643 b" value '%s' of `share.safe-mismatch.source-not-safe`"
658 b" value '%s' of `share.safe-mismatch.source-not-safe`"
644 b" set."
659 b" set."
645 )
660 )
646 % mismatch_config,
661 % mismatch_config,
647 hint=hint,
662 hint=hint,
648 )
663 )
649 else:
664 else:
650 requirements |= _readrequires(storevfs, False)
665 requirements |= _readrequires(storevfs, False)
651 elif shared:
666 elif shared:
652 sourcerequires = _readrequires(sharedvfs, False)
667 sourcerequires = _readrequires(sharedvfs, False)
653 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
668 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
654 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
669 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
655 mismatch_warn = ui.configbool(
670 mismatch_warn = ui.configbool(
656 b'share', b'safe-mismatch.source-safe.warn'
671 b'share', b'safe-mismatch.source-safe.warn'
657 )
672 )
658 if mismatch_config in (
673 if mismatch_config in (
659 b'upgrade-allow',
674 b'upgrade-allow',
660 b'allow',
675 b'allow',
661 b'upgrade-abort',
676 b'upgrade-abort',
662 ):
677 ):
663 # prevent cyclic import localrepo -> upgrade -> localrepo
678 # prevent cyclic import localrepo -> upgrade -> localrepo
664 from . import upgrade
679 from . import upgrade
665
680
666 upgrade.upgrade_share_to_safe(
681 upgrade.upgrade_share_to_safe(
667 ui,
682 ui,
668 hgvfs,
683 hgvfs,
669 storevfs,
684 storevfs,
670 requirements,
685 requirements,
671 mismatch_config,
686 mismatch_config,
672 mismatch_warn,
687 mismatch_warn,
673 )
688 )
674 elif mismatch_config == b'abort':
689 elif mismatch_config == b'abort':
675 raise error.Abort(
690 raise error.Abort(
676 _(
691 _(
677 b'version mismatch: source uses share-safe'
692 b'version mismatch: source uses share-safe'
678 b' functionality while the current share does not'
693 b' functionality while the current share does not'
679 ),
694 ),
680 hint=hint,
695 hint=hint,
681 )
696 )
682 else:
697 else:
683 raise error.Abort(
698 raise error.Abort(
684 _(
699 _(
685 b"share-safe mismatch with source.\nUnrecognized"
700 b"share-safe mismatch with source.\nUnrecognized"
686 b" value '%s' of `share.safe-mismatch.source-safe` set."
701 b" value '%s' of `share.safe-mismatch.source-safe` set."
687 )
702 )
688 % mismatch_config,
703 % mismatch_config,
689 hint=hint,
704 hint=hint,
690 )
705 )
691
706
692 # The .hg/hgrc file may load extensions or contain config options
707 # The .hg/hgrc file may load extensions or contain config options
693 # that influence repository construction. Attempt to load it and
708 # that influence repository construction. Attempt to load it and
694 # process any new extensions that it may have pulled in.
709 # process any new extensions that it may have pulled in.
695 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
710 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
696 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
711 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
697 extensions.loadall(ui)
712 extensions.loadall(ui)
698 extensions.populateui(ui)
713 extensions.populateui(ui)
699
714
700 # Set of module names of extensions loaded for this repository.
715 # Set of module names of extensions loaded for this repository.
701 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
716 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
702
717
703 supportedrequirements = gathersupportedrequirements(ui)
718 supportedrequirements = gathersupportedrequirements(ui)
704
719
705 # We first validate the requirements are known.
720 # We first validate the requirements are known.
706 ensurerequirementsrecognized(requirements, supportedrequirements)
721 ensurerequirementsrecognized(requirements, supportedrequirements)
707
722
708 # Then we validate that the known set is reasonable to use together.
723 # Then we validate that the known set is reasonable to use together.
709 ensurerequirementscompatible(ui, requirements)
724 ensurerequirementscompatible(ui, requirements)
710
725
711 # TODO there are unhandled edge cases related to opening repositories with
726 # TODO there are unhandled edge cases related to opening repositories with
712 # shared storage. If storage is shared, we should also test for requirements
727 # shared storage. If storage is shared, we should also test for requirements
713 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
728 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
714 # that repo, as that repo may load extensions needed to open it. This is a
729 # that repo, as that repo may load extensions needed to open it. This is a
715 # bit complicated because we don't want the other hgrc to overwrite settings
730 # bit complicated because we don't want the other hgrc to overwrite settings
716 # in this hgrc.
731 # in this hgrc.
717 #
732 #
718 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
733 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
719 # file when sharing repos. But if a requirement is added after the share is
734 # file when sharing repos. But if a requirement is added after the share is
720 # performed, thereby introducing a new requirement for the opener, we may
735 # performed, thereby introducing a new requirement for the opener, we may
721 # will not see that and could encounter a run-time error interacting with
736 # will not see that and could encounter a run-time error interacting with
722 # that shared store since it has an unknown-to-us requirement.
737 # that shared store since it has an unknown-to-us requirement.
723
738
724 # At this point, we know we should be capable of opening the repository.
739 # At this point, we know we should be capable of opening the repository.
725 # Now get on with doing that.
740 # Now get on with doing that.
726
741
727 features = set()
742 features = set()
728
743
729 # The "store" part of the repository holds versioned data. How it is
744 # The "store" part of the repository holds versioned data. How it is
730 # accessed is determined by various requirements. If `shared` or
745 # accessed is determined by various requirements. If `shared` or
731 # `relshared` requirements are present, this indicates current repository
746 # `relshared` requirements are present, this indicates current repository
732 # is a share and store exists in path mentioned in `.hg/sharedpath`
747 # is a share and store exists in path mentioned in `.hg/sharedpath`
733 if shared:
748 if shared:
734 storebasepath = sharedvfs.base
749 storebasepath = sharedvfs.base
735 cachepath = sharedvfs.join(b'cache')
750 cachepath = sharedvfs.join(b'cache')
736 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
751 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
737 else:
752 else:
738 storebasepath = hgvfs.base
753 storebasepath = hgvfs.base
739 cachepath = hgvfs.join(b'cache')
754 cachepath = hgvfs.join(b'cache')
740 wcachepath = hgvfs.join(b'wcache')
755 wcachepath = hgvfs.join(b'wcache')
741
756
742 # The store has changed over time and the exact layout is dictated by
757 # The store has changed over time and the exact layout is dictated by
743 # requirements. The store interface abstracts differences across all
758 # requirements. The store interface abstracts differences across all
744 # of them.
759 # of them.
745 store = makestore(
760 store = makestore(
746 requirements,
761 requirements,
747 storebasepath,
762 storebasepath,
748 lambda base: vfsmod.vfs(base, cacheaudited=True),
763 lambda base: vfsmod.vfs(base, cacheaudited=True),
749 )
764 )
750 hgvfs.createmode = store.createmode
765 hgvfs.createmode = store.createmode
751
766
752 storevfs = store.vfs
767 storevfs = store.vfs
753 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
768 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
754
769
755 if (
770 if (
756 requirementsmod.REVLOGV2_REQUIREMENT in requirements
771 requirementsmod.REVLOGV2_REQUIREMENT in requirements
757 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
772 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
758 ):
773 ):
759 features.add(repository.REPO_FEATURE_SIDE_DATA)
774 features.add(repository.REPO_FEATURE_SIDE_DATA)
760 # the revlogv2 docket introduced race condition that we need to fix
775 # the revlogv2 docket introduced race condition that we need to fix
761 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
776 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
762
777
763 # The cache vfs is used to manage cache files.
778 # The cache vfs is used to manage cache files.
764 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
779 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
765 cachevfs.createmode = store.createmode
780 cachevfs.createmode = store.createmode
766 # The cache vfs is used to manage cache files related to the working copy
781 # The cache vfs is used to manage cache files related to the working copy
767 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
782 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
768 wcachevfs.createmode = store.createmode
783 wcachevfs.createmode = store.createmode
769
784
770 # Now resolve the type for the repository object. We do this by repeatedly
785 # Now resolve the type for the repository object. We do this by repeatedly
771 # calling a factory function to produces types for specific aspects of the
786 # calling a factory function to produces types for specific aspects of the
772 # repo's operation. The aggregate returned types are used as base classes
787 # repo's operation. The aggregate returned types are used as base classes
773 # for a dynamically-derived type, which will represent our new repository.
788 # for a dynamically-derived type, which will represent our new repository.
774
789
775 bases = []
790 bases = []
776 extrastate = {}
791 extrastate = {}
777
792
778 for iface, fn in REPO_INTERFACES:
793 for iface, fn in REPO_INTERFACES:
779 # We pass all potentially useful state to give extensions tons of
794 # We pass all potentially useful state to give extensions tons of
780 # flexibility.
795 # flexibility.
781 typ = fn()(
796 typ = fn()(
782 ui=ui,
797 ui=ui,
783 intents=intents,
798 intents=intents,
784 requirements=requirements,
799 requirements=requirements,
785 features=features,
800 features=features,
786 wdirvfs=wdirvfs,
801 wdirvfs=wdirvfs,
787 hgvfs=hgvfs,
802 hgvfs=hgvfs,
788 store=store,
803 store=store,
789 storevfs=storevfs,
804 storevfs=storevfs,
790 storeoptions=storevfs.options,
805 storeoptions=storevfs.options,
791 cachevfs=cachevfs,
806 cachevfs=cachevfs,
792 wcachevfs=wcachevfs,
807 wcachevfs=wcachevfs,
793 extensionmodulenames=extensionmodulenames,
808 extensionmodulenames=extensionmodulenames,
794 extrastate=extrastate,
809 extrastate=extrastate,
795 baseclasses=bases,
810 baseclasses=bases,
796 )
811 )
797
812
798 if not isinstance(typ, type):
813 if not isinstance(typ, type):
799 raise error.ProgrammingError(
814 raise error.ProgrammingError(
800 b'unable to construct type for %s' % iface
815 b'unable to construct type for %s' % iface
801 )
816 )
802
817
803 bases.append(typ)
818 bases.append(typ)
804
819
805 # type() allows you to use characters in type names that wouldn't be
820 # type() allows you to use characters in type names that wouldn't be
806 # recognized as Python symbols in source code. We abuse that to add
821 # recognized as Python symbols in source code. We abuse that to add
807 # rich information about our constructed repo.
822 # rich information about our constructed repo.
808 name = pycompat.sysstr(
823 name = pycompat.sysstr(
809 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
824 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
810 )
825 )
811
826
812 cls = type(name, tuple(bases), {})
827 cls = type(name, tuple(bases), {})
813
828
814 return cls(
829 return cls(
815 baseui=baseui,
830 baseui=baseui,
816 ui=ui,
831 ui=ui,
817 origroot=path,
832 origroot=path,
818 wdirvfs=wdirvfs,
833 wdirvfs=wdirvfs,
819 hgvfs=hgvfs,
834 hgvfs=hgvfs,
820 requirements=requirements,
835 requirements=requirements,
821 supportedrequirements=supportedrequirements,
836 supportedrequirements=supportedrequirements,
822 sharedpath=storebasepath,
837 sharedpath=storebasepath,
823 store=store,
838 store=store,
824 cachevfs=cachevfs,
839 cachevfs=cachevfs,
825 wcachevfs=wcachevfs,
840 wcachevfs=wcachevfs,
826 features=features,
841 features=features,
827 intents=intents,
842 intents=intents,
828 )
843 )
829
844
830
845
831 def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None):
846 def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None):
832 """Load hgrc files/content into a ui instance.
847 """Load hgrc files/content into a ui instance.
833
848
834 This is called during repository opening to load any additional
849 This is called during repository opening to load any additional
835 config files or settings relevant to the current repository.
850 config files or settings relevant to the current repository.
836
851
837 Returns a bool indicating whether any additional configs were loaded.
852 Returns a bool indicating whether any additional configs were loaded.
838
853
839 Extensions should monkeypatch this function to modify how per-repo
854 Extensions should monkeypatch this function to modify how per-repo
840 configs are loaded. For example, an extension may wish to pull in
855 configs are loaded. For example, an extension may wish to pull in
841 configs from alternate files or sources.
856 configs from alternate files or sources.
842
857
843 sharedvfs is vfs object pointing to source repo if the current one is a
858 sharedvfs is vfs object pointing to source repo if the current one is a
844 shared one
859 shared one
845 """
860 """
846 if not rcutil.use_repo_hgrc():
861 if not rcutil.use_repo_hgrc():
847 return False
862 return False
848
863
849 ret = False
864 ret = False
850 # first load config from shared source if we has to
865 # first load config from shared source if we has to
851 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
866 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
852 try:
867 try:
853 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
868 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
854 ret = True
869 ret = True
855 except IOError:
870 except IOError:
856 pass
871 pass
857
872
858 try:
873 try:
859 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
874 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
860 ret = True
875 ret = True
861 except IOError:
876 except IOError:
862 pass
877 pass
863
878
864 try:
879 try:
865 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
880 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
866 ret = True
881 ret = True
867 except IOError:
882 except IOError:
868 pass
883 pass
869
884
870 return ret
885 return ret
871
886
872
887
873 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
888 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
874 """Perform additional actions after .hg/hgrc is loaded.
889 """Perform additional actions after .hg/hgrc is loaded.
875
890
876 This function is called during repository loading immediately after
891 This function is called during repository loading immediately after
877 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
892 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
878
893
879 The function can be used to validate configs, automatically add
894 The function can be used to validate configs, automatically add
880 options (including extensions) based on requirements, etc.
895 options (including extensions) based on requirements, etc.
881 """
896 """
882
897
883 # Map of requirements to list of extensions to load automatically when
898 # Map of requirements to list of extensions to load automatically when
884 # requirement is present.
899 # requirement is present.
885 autoextensions = {
900 autoextensions = {
886 b'git': [b'git'],
901 b'git': [b'git'],
887 b'largefiles': [b'largefiles'],
902 b'largefiles': [b'largefiles'],
888 b'lfs': [b'lfs'],
903 b'lfs': [b'lfs'],
889 }
904 }
890
905
891 for requirement, names in sorted(autoextensions.items()):
906 for requirement, names in sorted(autoextensions.items()):
892 if requirement not in requirements:
907 if requirement not in requirements:
893 continue
908 continue
894
909
895 for name in names:
910 for name in names:
896 if not ui.hasconfig(b'extensions', name):
911 if not ui.hasconfig(b'extensions', name):
897 ui.setconfig(b'extensions', name, b'', source=b'autoload')
912 ui.setconfig(b'extensions', name, b'', source=b'autoload')
898
913
899
914
900 def gathersupportedrequirements(ui):
915 def gathersupportedrequirements(ui):
901 """Determine the complete set of recognized requirements."""
916 """Determine the complete set of recognized requirements."""
902 # Start with all requirements supported by this file.
917 # Start with all requirements supported by this file.
903 supported = set(localrepository._basesupported)
918 supported = set(localrepository._basesupported)
904
919
905 if dirstate.SUPPORTS_DIRSTATE_V2:
920 if dirstate.SUPPORTS_DIRSTATE_V2:
906 supported.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
921 supported.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
907
922
908 # Execute ``featuresetupfuncs`` entries if they belong to an extension
923 # Execute ``featuresetupfuncs`` entries if they belong to an extension
909 # relevant to this ui instance.
924 # relevant to this ui instance.
910 modules = {m.__name__ for n, m in extensions.extensions(ui)}
925 modules = {m.__name__ for n, m in extensions.extensions(ui)}
911
926
912 for fn in featuresetupfuncs:
927 for fn in featuresetupfuncs:
913 if fn.__module__ in modules:
928 if fn.__module__ in modules:
914 fn(ui, supported)
929 fn(ui, supported)
915
930
916 # Add derived requirements from registered compression engines.
931 # Add derived requirements from registered compression engines.
917 for name in util.compengines:
932 for name in util.compengines:
918 engine = util.compengines[name]
933 engine = util.compengines[name]
919 if engine.available() and engine.revlogheader():
934 if engine.available() and engine.revlogheader():
920 supported.add(b'exp-compression-%s' % name)
935 supported.add(b'exp-compression-%s' % name)
921 if engine.name() == b'zstd':
936 if engine.name() == b'zstd':
922 supported.add(b'revlog-compression-zstd')
937 supported.add(b'revlog-compression-zstd')
923
938
924 return supported
939 return supported
925
940
926
941
927 def ensurerequirementsrecognized(requirements, supported):
942 def ensurerequirementsrecognized(requirements, supported):
928 """Validate that a set of local requirements is recognized.
943 """Validate that a set of local requirements is recognized.
929
944
930 Receives a set of requirements. Raises an ``error.RepoError`` if there
945 Receives a set of requirements. Raises an ``error.RepoError`` if there
931 exists any requirement in that set that currently loaded code doesn't
946 exists any requirement in that set that currently loaded code doesn't
932 recognize.
947 recognize.
933
948
934 Returns a set of supported requirements.
949 Returns a set of supported requirements.
935 """
950 """
936 missing = set()
951 missing = set()
937
952
938 for requirement in requirements:
953 for requirement in requirements:
939 if requirement in supported:
954 if requirement in supported:
940 continue
955 continue
941
956
942 if not requirement or not requirement[0:1].isalnum():
957 if not requirement or not requirement[0:1].isalnum():
943 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
958 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
944
959
945 missing.add(requirement)
960 missing.add(requirement)
946
961
947 if missing:
962 if missing:
948 raise error.RequirementError(
963 raise error.RequirementError(
949 _(b'repository requires features unknown to this Mercurial: %s')
964 _(b'repository requires features unknown to this Mercurial: %s')
950 % b' '.join(sorted(missing)),
965 % b' '.join(sorted(missing)),
951 hint=_(
966 hint=_(
952 b'see https://mercurial-scm.org/wiki/MissingRequirement '
967 b'see https://mercurial-scm.org/wiki/MissingRequirement '
953 b'for more information'
968 b'for more information'
954 ),
969 ),
955 )
970 )
956
971
957
972
958 def ensurerequirementscompatible(ui, requirements):
973 def ensurerequirementscompatible(ui, requirements):
959 """Validates that a set of recognized requirements is mutually compatible.
974 """Validates that a set of recognized requirements is mutually compatible.
960
975
961 Some requirements may not be compatible with others or require
976 Some requirements may not be compatible with others or require
962 config options that aren't enabled. This function is called during
977 config options that aren't enabled. This function is called during
963 repository opening to ensure that the set of requirements needed
978 repository opening to ensure that the set of requirements needed
964 to open a repository is sane and compatible with config options.
979 to open a repository is sane and compatible with config options.
965
980
966 Extensions can monkeypatch this function to perform additional
981 Extensions can monkeypatch this function to perform additional
967 checking.
982 checking.
968
983
969 ``error.RepoError`` should be raised on failure.
984 ``error.RepoError`` should be raised on failure.
970 """
985 """
971 if (
986 if (
972 requirementsmod.SPARSE_REQUIREMENT in requirements
987 requirementsmod.SPARSE_REQUIREMENT in requirements
973 and not sparse.enabled
988 and not sparse.enabled
974 ):
989 ):
975 raise error.RepoError(
990 raise error.RepoError(
976 _(
991 _(
977 b'repository is using sparse feature but '
992 b'repository is using sparse feature but '
978 b'sparse is not enabled; enable the '
993 b'sparse is not enabled; enable the '
979 b'"sparse" extensions to access'
994 b'"sparse" extensions to access'
980 )
995 )
981 )
996 )
982
997
983
998
984 def makestore(requirements, path, vfstype):
999 def makestore(requirements, path, vfstype):
985 """Construct a storage object for a repository."""
1000 """Construct a storage object for a repository."""
986 if requirementsmod.STORE_REQUIREMENT in requirements:
1001 if requirementsmod.STORE_REQUIREMENT in requirements:
987 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
1002 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
988 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1003 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
989 return storemod.fncachestore(path, vfstype, dotencode)
1004 return storemod.fncachestore(path, vfstype, dotencode)
990
1005
991 return storemod.encodedstore(path, vfstype)
1006 return storemod.encodedstore(path, vfstype)
992
1007
993 return storemod.basicstore(path, vfstype)
1008 return storemod.basicstore(path, vfstype)
994
1009
995
1010
996 def resolvestorevfsoptions(ui, requirements, features):
1011 def resolvestorevfsoptions(ui, requirements, features):
997 """Resolve the options to pass to the store vfs opener.
1012 """Resolve the options to pass to the store vfs opener.
998
1013
999 The returned dict is used to influence behavior of the storage layer.
1014 The returned dict is used to influence behavior of the storage layer.
1000 """
1015 """
1001 options = {}
1016 options = {}
1002
1017
1003 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1018 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1004 options[b'treemanifest'] = True
1019 options[b'treemanifest'] = True
1005
1020
1006 # experimental config: format.manifestcachesize
1021 # experimental config: format.manifestcachesize
1007 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1022 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1008 if manifestcachesize is not None:
1023 if manifestcachesize is not None:
1009 options[b'manifestcachesize'] = manifestcachesize
1024 options[b'manifestcachesize'] = manifestcachesize
1010
1025
1011 # In the absence of another requirement superseding a revlog-related
1026 # In the absence of another requirement superseding a revlog-related
1012 # requirement, we have to assume the repo is using revlog version 0.
1027 # requirement, we have to assume the repo is using revlog version 0.
1013 # This revlog format is super old and we don't bother trying to parse
1028 # This revlog format is super old and we don't bother trying to parse
1014 # opener options for it because those options wouldn't do anything
1029 # opener options for it because those options wouldn't do anything
1015 # meaningful on such old repos.
1030 # meaningful on such old repos.
1016 if (
1031 if (
1017 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1032 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1018 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1033 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1019 ):
1034 ):
1020 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1035 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1021 else: # explicitly mark repo as using revlogv0
1036 else: # explicitly mark repo as using revlogv0
1022 options[b'revlogv0'] = True
1037 options[b'revlogv0'] = True
1023
1038
1024 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1039 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1025 options[b'copies-storage'] = b'changeset-sidedata'
1040 options[b'copies-storage'] = b'changeset-sidedata'
1026 else:
1041 else:
1027 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1042 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1028 copiesextramode = (b'changeset-only', b'compatibility')
1043 copiesextramode = (b'changeset-only', b'compatibility')
1029 if writecopiesto in copiesextramode:
1044 if writecopiesto in copiesextramode:
1030 options[b'copies-storage'] = b'extra'
1045 options[b'copies-storage'] = b'extra'
1031
1046
1032 return options
1047 return options
1033
1048
1034
1049
1035 def resolverevlogstorevfsoptions(ui, requirements, features):
1050 def resolverevlogstorevfsoptions(ui, requirements, features):
1036 """Resolve opener options specific to revlogs."""
1051 """Resolve opener options specific to revlogs."""
1037
1052
1038 options = {}
1053 options = {}
1039 options[b'flagprocessors'] = {}
1054 options[b'flagprocessors'] = {}
1040
1055
1041 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1056 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1042 options[b'revlogv1'] = True
1057 options[b'revlogv1'] = True
1043 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1058 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1044 options[b'revlogv2'] = True
1059 options[b'revlogv2'] = True
1045 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1060 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1046 options[b'changelogv2'] = True
1061 options[b'changelogv2'] = True
1047
1062
1048 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1063 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1049 options[b'generaldelta'] = True
1064 options[b'generaldelta'] = True
1050
1065
1051 # experimental config: format.chunkcachesize
1066 # experimental config: format.chunkcachesize
1052 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1067 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1053 if chunkcachesize is not None:
1068 if chunkcachesize is not None:
1054 options[b'chunkcachesize'] = chunkcachesize
1069 options[b'chunkcachesize'] = chunkcachesize
1055
1070
1056 deltabothparents = ui.configbool(
1071 deltabothparents = ui.configbool(
1057 b'storage', b'revlog.optimize-delta-parent-choice'
1072 b'storage', b'revlog.optimize-delta-parent-choice'
1058 )
1073 )
1059 options[b'deltabothparents'] = deltabothparents
1074 options[b'deltabothparents'] = deltabothparents
1060
1075
1061 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1076 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1062 options[b'issue6528.fix-incoming'] = issue6528
1077 options[b'issue6528.fix-incoming'] = issue6528
1063
1078
1064 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1079 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1065 lazydeltabase = False
1080 lazydeltabase = False
1066 if lazydelta:
1081 if lazydelta:
1067 lazydeltabase = ui.configbool(
1082 lazydeltabase = ui.configbool(
1068 b'storage', b'revlog.reuse-external-delta-parent'
1083 b'storage', b'revlog.reuse-external-delta-parent'
1069 )
1084 )
1070 if lazydeltabase is None:
1085 if lazydeltabase is None:
1071 lazydeltabase = not scmutil.gddeltaconfig(ui)
1086 lazydeltabase = not scmutil.gddeltaconfig(ui)
1072 options[b'lazydelta'] = lazydelta
1087 options[b'lazydelta'] = lazydelta
1073 options[b'lazydeltabase'] = lazydeltabase
1088 options[b'lazydeltabase'] = lazydeltabase
1074
1089
1075 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1090 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1076 if 0 <= chainspan:
1091 if 0 <= chainspan:
1077 options[b'maxdeltachainspan'] = chainspan
1092 options[b'maxdeltachainspan'] = chainspan
1078
1093
1079 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1094 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1080 if mmapindexthreshold is not None:
1095 if mmapindexthreshold is not None:
1081 options[b'mmapindexthreshold'] = mmapindexthreshold
1096 options[b'mmapindexthreshold'] = mmapindexthreshold
1082
1097
1083 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1098 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1084 srdensitythres = float(
1099 srdensitythres = float(
1085 ui.config(b'experimental', b'sparse-read.density-threshold')
1100 ui.config(b'experimental', b'sparse-read.density-threshold')
1086 )
1101 )
1087 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1102 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1088 options[b'with-sparse-read'] = withsparseread
1103 options[b'with-sparse-read'] = withsparseread
1089 options[b'sparse-read-density-threshold'] = srdensitythres
1104 options[b'sparse-read-density-threshold'] = srdensitythres
1090 options[b'sparse-read-min-gap-size'] = srmingapsize
1105 options[b'sparse-read-min-gap-size'] = srmingapsize
1091
1106
1092 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1107 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1093 options[b'sparse-revlog'] = sparserevlog
1108 options[b'sparse-revlog'] = sparserevlog
1094 if sparserevlog:
1109 if sparserevlog:
1095 options[b'generaldelta'] = True
1110 options[b'generaldelta'] = True
1096
1111
1097 maxchainlen = None
1112 maxchainlen = None
1098 if sparserevlog:
1113 if sparserevlog:
1099 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1114 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1100 # experimental config: format.maxchainlen
1115 # experimental config: format.maxchainlen
1101 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1116 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1102 if maxchainlen is not None:
1117 if maxchainlen is not None:
1103 options[b'maxchainlen'] = maxchainlen
1118 options[b'maxchainlen'] = maxchainlen
1104
1119
1105 for r in requirements:
1120 for r in requirements:
1106 # we allow multiple compression engine requirement to co-exist because
1121 # we allow multiple compression engine requirement to co-exist because
1107 # strickly speaking, revlog seems to support mixed compression style.
1122 # strickly speaking, revlog seems to support mixed compression style.
1108 #
1123 #
1109 # The compression used for new entries will be "the last one"
1124 # The compression used for new entries will be "the last one"
1110 prefix = r.startswith
1125 prefix = r.startswith
1111 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1126 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1112 options[b'compengine'] = r.split(b'-', 2)[2]
1127 options[b'compengine'] = r.split(b'-', 2)[2]
1113
1128
1114 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1129 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1115 if options[b'zlib.level'] is not None:
1130 if options[b'zlib.level'] is not None:
1116 if not (0 <= options[b'zlib.level'] <= 9):
1131 if not (0 <= options[b'zlib.level'] <= 9):
1117 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1132 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1118 raise error.Abort(msg % options[b'zlib.level'])
1133 raise error.Abort(msg % options[b'zlib.level'])
1119 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1134 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1120 if options[b'zstd.level'] is not None:
1135 if options[b'zstd.level'] is not None:
1121 if not (0 <= options[b'zstd.level'] <= 22):
1136 if not (0 <= options[b'zstd.level'] <= 22):
1122 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1137 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1123 raise error.Abort(msg % options[b'zstd.level'])
1138 raise error.Abort(msg % options[b'zstd.level'])
1124
1139
1125 if requirementsmod.NARROW_REQUIREMENT in requirements:
1140 if requirementsmod.NARROW_REQUIREMENT in requirements:
1126 options[b'enableellipsis'] = True
1141 options[b'enableellipsis'] = True
1127
1142
1128 if ui.configbool(b'experimental', b'rust.index'):
1143 if ui.configbool(b'experimental', b'rust.index'):
1129 options[b'rust.index'] = True
1144 options[b'rust.index'] = True
1130 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1145 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1131 slow_path = ui.config(
1146 slow_path = ui.config(
1132 b'storage', b'revlog.persistent-nodemap.slow-path'
1147 b'storage', b'revlog.persistent-nodemap.slow-path'
1133 )
1148 )
1134 if slow_path not in (b'allow', b'warn', b'abort'):
1149 if slow_path not in (b'allow', b'warn', b'abort'):
1135 default = ui.config_default(
1150 default = ui.config_default(
1136 b'storage', b'revlog.persistent-nodemap.slow-path'
1151 b'storage', b'revlog.persistent-nodemap.slow-path'
1137 )
1152 )
1138 msg = _(
1153 msg = _(
1139 b'unknown value for config '
1154 b'unknown value for config '
1140 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1155 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1141 )
1156 )
1142 ui.warn(msg % slow_path)
1157 ui.warn(msg % slow_path)
1143 if not ui.quiet:
1158 if not ui.quiet:
1144 ui.warn(_(b'falling back to default value: %s\n') % default)
1159 ui.warn(_(b'falling back to default value: %s\n') % default)
1145 slow_path = default
1160 slow_path = default
1146
1161
1147 msg = _(
1162 msg = _(
1148 b"accessing `persistent-nodemap` repository without associated "
1163 b"accessing `persistent-nodemap` repository without associated "
1149 b"fast implementation."
1164 b"fast implementation."
1150 )
1165 )
1151 hint = _(
1166 hint = _(
1152 b"check `hg help config.format.use-persistent-nodemap` "
1167 b"check `hg help config.format.use-persistent-nodemap` "
1153 b"for details"
1168 b"for details"
1154 )
1169 )
1155 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1170 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1156 if slow_path == b'warn':
1171 if slow_path == b'warn':
1157 msg = b"warning: " + msg + b'\n'
1172 msg = b"warning: " + msg + b'\n'
1158 ui.warn(msg)
1173 ui.warn(msg)
1159 if not ui.quiet:
1174 if not ui.quiet:
1160 hint = b'(' + hint + b')\n'
1175 hint = b'(' + hint + b')\n'
1161 ui.warn(hint)
1176 ui.warn(hint)
1162 if slow_path == b'abort':
1177 if slow_path == b'abort':
1163 raise error.Abort(msg, hint=hint)
1178 raise error.Abort(msg, hint=hint)
1164 options[b'persistent-nodemap'] = True
1179 options[b'persistent-nodemap'] = True
1165 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1180 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1166 options[b'persistent-nodemap.mmap'] = True
1181 options[b'persistent-nodemap.mmap'] = True
1167 if ui.configbool(b'devel', b'persistent-nodemap'):
1182 if ui.configbool(b'devel', b'persistent-nodemap'):
1168 options[b'devel-force-nodemap'] = True
1183 options[b'devel-force-nodemap'] = True
1169
1184
1170 return options
1185 return options
1171
1186
1172
1187
1173 def makemain(**kwargs):
1188 def makemain(**kwargs):
1174 """Produce a type conforming to ``ilocalrepositorymain``."""
1189 """Produce a type conforming to ``ilocalrepositorymain``."""
1175 return localrepository
1190 return localrepository
1176
1191
1177
1192
1178 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1193 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1179 class revlogfilestorage(object):
1194 class revlogfilestorage(object):
1180 """File storage when using revlogs."""
1195 """File storage when using revlogs."""
1181
1196
1182 def file(self, path):
1197 def file(self, path):
1183 if path.startswith(b'/'):
1198 if path.startswith(b'/'):
1184 path = path[1:]
1199 path = path[1:]
1185
1200
1186 return filelog.filelog(self.svfs, path)
1201 return filelog.filelog(self.svfs, path)
1187
1202
1188
1203
1189 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1204 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1190 class revlognarrowfilestorage(object):
1205 class revlognarrowfilestorage(object):
1191 """File storage when using revlogs and narrow files."""
1206 """File storage when using revlogs and narrow files."""
1192
1207
1193 def file(self, path):
1208 def file(self, path):
1194 if path.startswith(b'/'):
1209 if path.startswith(b'/'):
1195 path = path[1:]
1210 path = path[1:]
1196
1211
1197 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1212 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1198
1213
1199
1214
1200 def makefilestorage(requirements, features, **kwargs):
1215 def makefilestorage(requirements, features, **kwargs):
1201 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1216 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1202 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1217 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1203 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1218 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1204
1219
1205 if requirementsmod.NARROW_REQUIREMENT in requirements:
1220 if requirementsmod.NARROW_REQUIREMENT in requirements:
1206 return revlognarrowfilestorage
1221 return revlognarrowfilestorage
1207 else:
1222 else:
1208 return revlogfilestorage
1223 return revlogfilestorage
1209
1224
1210
1225
1211 # List of repository interfaces and factory functions for them. Each
1226 # List of repository interfaces and factory functions for them. Each
1212 # will be called in order during ``makelocalrepository()`` to iteratively
1227 # will be called in order during ``makelocalrepository()`` to iteratively
1213 # derive the final type for a local repository instance. We capture the
1228 # derive the final type for a local repository instance. We capture the
1214 # function as a lambda so we don't hold a reference and the module-level
1229 # function as a lambda so we don't hold a reference and the module-level
1215 # functions can be wrapped.
1230 # functions can be wrapped.
1216 REPO_INTERFACES = [
1231 REPO_INTERFACES = [
1217 (repository.ilocalrepositorymain, lambda: makemain),
1232 (repository.ilocalrepositorymain, lambda: makemain),
1218 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1233 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1219 ]
1234 ]
1220
1235
1221
1236
1222 @interfaceutil.implementer(repository.ilocalrepositorymain)
1237 @interfaceutil.implementer(repository.ilocalrepositorymain)
1223 class localrepository(object):
1238 class localrepository(object):
1224 """Main class for representing local repositories.
1239 """Main class for representing local repositories.
1225
1240
1226 All local repositories are instances of this class.
1241 All local repositories are instances of this class.
1227
1242
1228 Constructed on its own, instances of this class are not usable as
1243 Constructed on its own, instances of this class are not usable as
1229 repository objects. To obtain a usable repository object, call
1244 repository objects. To obtain a usable repository object, call
1230 ``hg.repository()``, ``localrepo.instance()``, or
1245 ``hg.repository()``, ``localrepo.instance()``, or
1231 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1246 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1232 ``instance()`` adds support for creating new repositories.
1247 ``instance()`` adds support for creating new repositories.
1233 ``hg.repository()`` adds more extension integration, including calling
1248 ``hg.repository()`` adds more extension integration, including calling
1234 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1249 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1235 used.
1250 used.
1236 """
1251 """
1237
1252
1238 # obsolete experimental requirements:
1253 # obsolete experimental requirements:
1239 # - manifestv2: An experimental new manifest format that allowed
1254 # - manifestv2: An experimental new manifest format that allowed
1240 # for stem compression of long paths. Experiment ended up not
1255 # for stem compression of long paths. Experiment ended up not
1241 # being successful (repository sizes went up due to worse delta
1256 # being successful (repository sizes went up due to worse delta
1242 # chains), and the code was deleted in 4.6.
1257 # chains), and the code was deleted in 4.6.
1243 supportedformats = {
1258 supportedformats = {
1244 requirementsmod.REVLOGV1_REQUIREMENT,
1259 requirementsmod.REVLOGV1_REQUIREMENT,
1245 requirementsmod.GENERALDELTA_REQUIREMENT,
1260 requirementsmod.GENERALDELTA_REQUIREMENT,
1246 requirementsmod.TREEMANIFEST_REQUIREMENT,
1261 requirementsmod.TREEMANIFEST_REQUIREMENT,
1247 requirementsmod.COPIESSDC_REQUIREMENT,
1262 requirementsmod.COPIESSDC_REQUIREMENT,
1248 requirementsmod.REVLOGV2_REQUIREMENT,
1263 requirementsmod.REVLOGV2_REQUIREMENT,
1249 requirementsmod.CHANGELOGV2_REQUIREMENT,
1264 requirementsmod.CHANGELOGV2_REQUIREMENT,
1250 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1265 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1251 requirementsmod.NODEMAP_REQUIREMENT,
1266 requirementsmod.NODEMAP_REQUIREMENT,
1252 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1267 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1253 requirementsmod.SHARESAFE_REQUIREMENT,
1268 requirementsmod.SHARESAFE_REQUIREMENT,
1254 }
1269 }
1255 _basesupported = supportedformats | {
1270 _basesupported = supportedformats | {
1256 requirementsmod.STORE_REQUIREMENT,
1271 requirementsmod.STORE_REQUIREMENT,
1257 requirementsmod.FNCACHE_REQUIREMENT,
1272 requirementsmod.FNCACHE_REQUIREMENT,
1258 requirementsmod.SHARED_REQUIREMENT,
1273 requirementsmod.SHARED_REQUIREMENT,
1259 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1274 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1260 requirementsmod.DOTENCODE_REQUIREMENT,
1275 requirementsmod.DOTENCODE_REQUIREMENT,
1261 requirementsmod.SPARSE_REQUIREMENT,
1276 requirementsmod.SPARSE_REQUIREMENT,
1262 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1277 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1263 }
1278 }
1264
1279
1265 # list of prefix for file which can be written without 'wlock'
1280 # list of prefix for file which can be written without 'wlock'
1266 # Extensions should extend this list when needed
1281 # Extensions should extend this list when needed
1267 _wlockfreeprefix = {
1282 _wlockfreeprefix = {
1268 # We migh consider requiring 'wlock' for the next
1283 # We migh consider requiring 'wlock' for the next
1269 # two, but pretty much all the existing code assume
1284 # two, but pretty much all the existing code assume
1270 # wlock is not needed so we keep them excluded for
1285 # wlock is not needed so we keep them excluded for
1271 # now.
1286 # now.
1272 b'hgrc',
1287 b'hgrc',
1273 b'requires',
1288 b'requires',
1274 # XXX cache is a complicatged business someone
1289 # XXX cache is a complicatged business someone
1275 # should investigate this in depth at some point
1290 # should investigate this in depth at some point
1276 b'cache/',
1291 b'cache/',
1277 # XXX shouldn't be dirstate covered by the wlock?
1292 # XXX shouldn't be dirstate covered by the wlock?
1278 b'dirstate',
1293 b'dirstate',
1279 # XXX bisect was still a bit too messy at the time
1294 # XXX bisect was still a bit too messy at the time
1280 # this changeset was introduced. Someone should fix
1295 # this changeset was introduced. Someone should fix
1281 # the remainig bit and drop this line
1296 # the remainig bit and drop this line
1282 b'bisect.state',
1297 b'bisect.state',
1283 }
1298 }
1284
1299
1285 def __init__(
1300 def __init__(
1286 self,
1301 self,
1287 baseui,
1302 baseui,
1288 ui,
1303 ui,
1289 origroot,
1304 origroot,
1290 wdirvfs,
1305 wdirvfs,
1291 hgvfs,
1306 hgvfs,
1292 requirements,
1307 requirements,
1293 supportedrequirements,
1308 supportedrequirements,
1294 sharedpath,
1309 sharedpath,
1295 store,
1310 store,
1296 cachevfs,
1311 cachevfs,
1297 wcachevfs,
1312 wcachevfs,
1298 features,
1313 features,
1299 intents=None,
1314 intents=None,
1300 ):
1315 ):
1301 """Create a new local repository instance.
1316 """Create a new local repository instance.
1302
1317
1303 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1318 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1304 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1319 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1305 object.
1320 object.
1306
1321
1307 Arguments:
1322 Arguments:
1308
1323
1309 baseui
1324 baseui
1310 ``ui.ui`` instance that ``ui`` argument was based off of.
1325 ``ui.ui`` instance that ``ui`` argument was based off of.
1311
1326
1312 ui
1327 ui
1313 ``ui.ui`` instance for use by the repository.
1328 ``ui.ui`` instance for use by the repository.
1314
1329
1315 origroot
1330 origroot
1316 ``bytes`` path to working directory root of this repository.
1331 ``bytes`` path to working directory root of this repository.
1317
1332
1318 wdirvfs
1333 wdirvfs
1319 ``vfs.vfs`` rooted at the working directory.
1334 ``vfs.vfs`` rooted at the working directory.
1320
1335
1321 hgvfs
1336 hgvfs
1322 ``vfs.vfs`` rooted at .hg/
1337 ``vfs.vfs`` rooted at .hg/
1323
1338
1324 requirements
1339 requirements
1325 ``set`` of bytestrings representing repository opening requirements.
1340 ``set`` of bytestrings representing repository opening requirements.
1326
1341
1327 supportedrequirements
1342 supportedrequirements
1328 ``set`` of bytestrings representing repository requirements that we
1343 ``set`` of bytestrings representing repository requirements that we
1329 know how to open. May be a supetset of ``requirements``.
1344 know how to open. May be a supetset of ``requirements``.
1330
1345
1331 sharedpath
1346 sharedpath
1332 ``bytes`` Defining path to storage base directory. Points to a
1347 ``bytes`` Defining path to storage base directory. Points to a
1333 ``.hg/`` directory somewhere.
1348 ``.hg/`` directory somewhere.
1334
1349
1335 store
1350 store
1336 ``store.basicstore`` (or derived) instance providing access to
1351 ``store.basicstore`` (or derived) instance providing access to
1337 versioned storage.
1352 versioned storage.
1338
1353
1339 cachevfs
1354 cachevfs
1340 ``vfs.vfs`` used for cache files.
1355 ``vfs.vfs`` used for cache files.
1341
1356
1342 wcachevfs
1357 wcachevfs
1343 ``vfs.vfs`` used for cache files related to the working copy.
1358 ``vfs.vfs`` used for cache files related to the working copy.
1344
1359
1345 features
1360 features
1346 ``set`` of bytestrings defining features/capabilities of this
1361 ``set`` of bytestrings defining features/capabilities of this
1347 instance.
1362 instance.
1348
1363
1349 intents
1364 intents
1350 ``set`` of system strings indicating what this repo will be used
1365 ``set`` of system strings indicating what this repo will be used
1351 for.
1366 for.
1352 """
1367 """
1353 self.baseui = baseui
1368 self.baseui = baseui
1354 self.ui = ui
1369 self.ui = ui
1355 self.origroot = origroot
1370 self.origroot = origroot
1356 # vfs rooted at working directory.
1371 # vfs rooted at working directory.
1357 self.wvfs = wdirvfs
1372 self.wvfs = wdirvfs
1358 self.root = wdirvfs.base
1373 self.root = wdirvfs.base
1359 # vfs rooted at .hg/. Used to access most non-store paths.
1374 # vfs rooted at .hg/. Used to access most non-store paths.
1360 self.vfs = hgvfs
1375 self.vfs = hgvfs
1361 self.path = hgvfs.base
1376 self.path = hgvfs.base
1362 self.requirements = requirements
1377 self.requirements = requirements
1363 self.nodeconstants = sha1nodeconstants
1378 self.nodeconstants = sha1nodeconstants
1364 self.nullid = self.nodeconstants.nullid
1379 self.nullid = self.nodeconstants.nullid
1365 self.supported = supportedrequirements
1380 self.supported = supportedrequirements
1366 self.sharedpath = sharedpath
1381 self.sharedpath = sharedpath
1367 self.store = store
1382 self.store = store
1368 self.cachevfs = cachevfs
1383 self.cachevfs = cachevfs
1369 self.wcachevfs = wcachevfs
1384 self.wcachevfs = wcachevfs
1370 self.features = features
1385 self.features = features
1371
1386
1372 self.filtername = None
1387 self.filtername = None
1373
1388
1374 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1389 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1375 b'devel', b'check-locks'
1390 b'devel', b'check-locks'
1376 ):
1391 ):
1377 self.vfs.audit = self._getvfsward(self.vfs.audit)
1392 self.vfs.audit = self._getvfsward(self.vfs.audit)
1378 # A list of callback to shape the phase if no data were found.
1393 # A list of callback to shape the phase if no data were found.
1379 # Callback are in the form: func(repo, roots) --> processed root.
1394 # Callback are in the form: func(repo, roots) --> processed root.
1380 # This list it to be filled by extension during repo setup
1395 # This list it to be filled by extension during repo setup
1381 self._phasedefaults = []
1396 self._phasedefaults = []
1382
1397
1383 color.setup(self.ui)
1398 color.setup(self.ui)
1384
1399
1385 self.spath = self.store.path
1400 self.spath = self.store.path
1386 self.svfs = self.store.vfs
1401 self.svfs = self.store.vfs
1387 self.sjoin = self.store.join
1402 self.sjoin = self.store.join
1388 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1403 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1389 b'devel', b'check-locks'
1404 b'devel', b'check-locks'
1390 ):
1405 ):
1391 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1406 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1392 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1407 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1393 else: # standard vfs
1408 else: # standard vfs
1394 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1409 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1395
1410
1396 self._dirstatevalidatewarned = False
1411 self._dirstatevalidatewarned = False
1397
1412
1398 self._branchcaches = branchmap.BranchMapCache()
1413 self._branchcaches = branchmap.BranchMapCache()
1399 self._revbranchcache = None
1414 self._revbranchcache = None
1400 self._filterpats = {}
1415 self._filterpats = {}
1401 self._datafilters = {}
1416 self._datafilters = {}
1402 self._transref = self._lockref = self._wlockref = None
1417 self._transref = self._lockref = self._wlockref = None
1403
1418
1404 # A cache for various files under .hg/ that tracks file changes,
1419 # A cache for various files under .hg/ that tracks file changes,
1405 # (used by the filecache decorator)
1420 # (used by the filecache decorator)
1406 #
1421 #
1407 # Maps a property name to its util.filecacheentry
1422 # Maps a property name to its util.filecacheentry
1408 self._filecache = {}
1423 self._filecache = {}
1409
1424
1410 # hold sets of revision to be filtered
1425 # hold sets of revision to be filtered
1411 # should be cleared when something might have changed the filter value:
1426 # should be cleared when something might have changed the filter value:
1412 # - new changesets,
1427 # - new changesets,
1413 # - phase change,
1428 # - phase change,
1414 # - new obsolescence marker,
1429 # - new obsolescence marker,
1415 # - working directory parent change,
1430 # - working directory parent change,
1416 # - bookmark changes
1431 # - bookmark changes
1417 self.filteredrevcache = {}
1432 self.filteredrevcache = {}
1418
1433
1419 # post-dirstate-status hooks
1434 # post-dirstate-status hooks
1420 self._postdsstatus = []
1435 self._postdsstatus = []
1421
1436
1422 # generic mapping between names and nodes
1437 # generic mapping between names and nodes
1423 self.names = namespaces.namespaces()
1438 self.names = namespaces.namespaces()
1424
1439
1425 # Key to signature value.
1440 # Key to signature value.
1426 self._sparsesignaturecache = {}
1441 self._sparsesignaturecache = {}
1427 # Signature to cached matcher instance.
1442 # Signature to cached matcher instance.
1428 self._sparsematchercache = {}
1443 self._sparsematchercache = {}
1429
1444
1430 self._extrafilterid = repoview.extrafilter(ui)
1445 self._extrafilterid = repoview.extrafilter(ui)
1431
1446
1432 self.filecopiesmode = None
1447 self.filecopiesmode = None
1433 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1448 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1434 self.filecopiesmode = b'changeset-sidedata'
1449 self.filecopiesmode = b'changeset-sidedata'
1435
1450
1436 self._wanted_sidedata = set()
1451 self._wanted_sidedata = set()
1437 self._sidedata_computers = {}
1452 self._sidedata_computers = {}
1438 sidedatamod.set_sidedata_spec_for_repo(self)
1453 sidedatamod.set_sidedata_spec_for_repo(self)
1439
1454
1440 def _getvfsward(self, origfunc):
1455 def _getvfsward(self, origfunc):
1441 """build a ward for self.vfs"""
1456 """build a ward for self.vfs"""
1442 rref = weakref.ref(self)
1457 rref = weakref.ref(self)
1443
1458
1444 def checkvfs(path, mode=None):
1459 def checkvfs(path, mode=None):
1445 ret = origfunc(path, mode=mode)
1460 ret = origfunc(path, mode=mode)
1446 repo = rref()
1461 repo = rref()
1447 if (
1462 if (
1448 repo is None
1463 repo is None
1449 or not util.safehasattr(repo, b'_wlockref')
1464 or not util.safehasattr(repo, b'_wlockref')
1450 or not util.safehasattr(repo, b'_lockref')
1465 or not util.safehasattr(repo, b'_lockref')
1451 ):
1466 ):
1452 return
1467 return
1453 if mode in (None, b'r', b'rb'):
1468 if mode in (None, b'r', b'rb'):
1454 return
1469 return
1455 if path.startswith(repo.path):
1470 if path.startswith(repo.path):
1456 # truncate name relative to the repository (.hg)
1471 # truncate name relative to the repository (.hg)
1457 path = path[len(repo.path) + 1 :]
1472 path = path[len(repo.path) + 1 :]
1458 if path.startswith(b'cache/'):
1473 if path.startswith(b'cache/'):
1459 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1474 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1460 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1475 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1461 # path prefixes covered by 'lock'
1476 # path prefixes covered by 'lock'
1462 vfs_path_prefixes = (
1477 vfs_path_prefixes = (
1463 b'journal.',
1478 b'journal.',
1464 b'undo.',
1479 b'undo.',
1465 b'strip-backup/',
1480 b'strip-backup/',
1466 b'cache/',
1481 b'cache/',
1467 )
1482 )
1468 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1483 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1469 if repo._currentlock(repo._lockref) is None:
1484 if repo._currentlock(repo._lockref) is None:
1470 repo.ui.develwarn(
1485 repo.ui.develwarn(
1471 b'write with no lock: "%s"' % path,
1486 b'write with no lock: "%s"' % path,
1472 stacklevel=3,
1487 stacklevel=3,
1473 config=b'check-locks',
1488 config=b'check-locks',
1474 )
1489 )
1475 elif repo._currentlock(repo._wlockref) is None:
1490 elif repo._currentlock(repo._wlockref) is None:
1476 # rest of vfs files are covered by 'wlock'
1491 # rest of vfs files are covered by 'wlock'
1477 #
1492 #
1478 # exclude special files
1493 # exclude special files
1479 for prefix in self._wlockfreeprefix:
1494 for prefix in self._wlockfreeprefix:
1480 if path.startswith(prefix):
1495 if path.startswith(prefix):
1481 return
1496 return
1482 repo.ui.develwarn(
1497 repo.ui.develwarn(
1483 b'write with no wlock: "%s"' % path,
1498 b'write with no wlock: "%s"' % path,
1484 stacklevel=3,
1499 stacklevel=3,
1485 config=b'check-locks',
1500 config=b'check-locks',
1486 )
1501 )
1487 return ret
1502 return ret
1488
1503
1489 return checkvfs
1504 return checkvfs
1490
1505
1491 def _getsvfsward(self, origfunc):
1506 def _getsvfsward(self, origfunc):
1492 """build a ward for self.svfs"""
1507 """build a ward for self.svfs"""
1493 rref = weakref.ref(self)
1508 rref = weakref.ref(self)
1494
1509
1495 def checksvfs(path, mode=None):
1510 def checksvfs(path, mode=None):
1496 ret = origfunc(path, mode=mode)
1511 ret = origfunc(path, mode=mode)
1497 repo = rref()
1512 repo = rref()
1498 if repo is None or not util.safehasattr(repo, b'_lockref'):
1513 if repo is None or not util.safehasattr(repo, b'_lockref'):
1499 return
1514 return
1500 if mode in (None, b'r', b'rb'):
1515 if mode in (None, b'r', b'rb'):
1501 return
1516 return
1502 if path.startswith(repo.sharedpath):
1517 if path.startswith(repo.sharedpath):
1503 # truncate name relative to the repository (.hg)
1518 # truncate name relative to the repository (.hg)
1504 path = path[len(repo.sharedpath) + 1 :]
1519 path = path[len(repo.sharedpath) + 1 :]
1505 if repo._currentlock(repo._lockref) is None:
1520 if repo._currentlock(repo._lockref) is None:
1506 repo.ui.develwarn(
1521 repo.ui.develwarn(
1507 b'write with no lock: "%s"' % path, stacklevel=4
1522 b'write with no lock: "%s"' % path, stacklevel=4
1508 )
1523 )
1509 return ret
1524 return ret
1510
1525
1511 return checksvfs
1526 return checksvfs
1512
1527
1513 def close(self):
1528 def close(self):
1514 self._writecaches()
1529 self._writecaches()
1515
1530
1516 def _writecaches(self):
1531 def _writecaches(self):
1517 if self._revbranchcache:
1532 if self._revbranchcache:
1518 self._revbranchcache.write()
1533 self._revbranchcache.write()
1519
1534
1520 def _restrictcapabilities(self, caps):
1535 def _restrictcapabilities(self, caps):
1521 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1536 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1522 caps = set(caps)
1537 caps = set(caps)
1523 capsblob = bundle2.encodecaps(
1538 capsblob = bundle2.encodecaps(
1524 bundle2.getrepocaps(self, role=b'client')
1539 bundle2.getrepocaps(self, role=b'client')
1525 )
1540 )
1526 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1541 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1527 if self.ui.configbool(b'experimental', b'narrow'):
1542 if self.ui.configbool(b'experimental', b'narrow'):
1528 caps.add(wireprototypes.NARROWCAP)
1543 caps.add(wireprototypes.NARROWCAP)
1529 return caps
1544 return caps
1530
1545
1531 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1546 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1532 # self -> auditor -> self._checknested -> self
1547 # self -> auditor -> self._checknested -> self
1533
1548
1534 @property
1549 @property
1535 def auditor(self):
1550 def auditor(self):
1536 # This is only used by context.workingctx.match in order to
1551 # This is only used by context.workingctx.match in order to
1537 # detect files in subrepos.
1552 # detect files in subrepos.
1538 return pathutil.pathauditor(self.root, callback=self._checknested)
1553 return pathutil.pathauditor(self.root, callback=self._checknested)
1539
1554
1540 @property
1555 @property
1541 def nofsauditor(self):
1556 def nofsauditor(self):
1542 # This is only used by context.basectx.match in order to detect
1557 # This is only used by context.basectx.match in order to detect
1543 # files in subrepos.
1558 # files in subrepos.
1544 return pathutil.pathauditor(
1559 return pathutil.pathauditor(
1545 self.root, callback=self._checknested, realfs=False, cached=True
1560 self.root, callback=self._checknested, realfs=False, cached=True
1546 )
1561 )
1547
1562
1548 def _checknested(self, path):
1563 def _checknested(self, path):
1549 """Determine if path is a legal nested repository."""
1564 """Determine if path is a legal nested repository."""
1550 if not path.startswith(self.root):
1565 if not path.startswith(self.root):
1551 return False
1566 return False
1552 subpath = path[len(self.root) + 1 :]
1567 subpath = path[len(self.root) + 1 :]
1553 normsubpath = util.pconvert(subpath)
1568 normsubpath = util.pconvert(subpath)
1554
1569
1555 # XXX: Checking against the current working copy is wrong in
1570 # XXX: Checking against the current working copy is wrong in
1556 # the sense that it can reject things like
1571 # the sense that it can reject things like
1557 #
1572 #
1558 # $ hg cat -r 10 sub/x.txt
1573 # $ hg cat -r 10 sub/x.txt
1559 #
1574 #
1560 # if sub/ is no longer a subrepository in the working copy
1575 # if sub/ is no longer a subrepository in the working copy
1561 # parent revision.
1576 # parent revision.
1562 #
1577 #
1563 # However, it can of course also allow things that would have
1578 # However, it can of course also allow things that would have
1564 # been rejected before, such as the above cat command if sub/
1579 # been rejected before, such as the above cat command if sub/
1565 # is a subrepository now, but was a normal directory before.
1580 # is a subrepository now, but was a normal directory before.
1566 # The old path auditor would have rejected by mistake since it
1581 # The old path auditor would have rejected by mistake since it
1567 # panics when it sees sub/.hg/.
1582 # panics when it sees sub/.hg/.
1568 #
1583 #
1569 # All in all, checking against the working copy seems sensible
1584 # All in all, checking against the working copy seems sensible
1570 # since we want to prevent access to nested repositories on
1585 # since we want to prevent access to nested repositories on
1571 # the filesystem *now*.
1586 # the filesystem *now*.
1572 ctx = self[None]
1587 ctx = self[None]
1573 parts = util.splitpath(subpath)
1588 parts = util.splitpath(subpath)
1574 while parts:
1589 while parts:
1575 prefix = b'/'.join(parts)
1590 prefix = b'/'.join(parts)
1576 if prefix in ctx.substate:
1591 if prefix in ctx.substate:
1577 if prefix == normsubpath:
1592 if prefix == normsubpath:
1578 return True
1593 return True
1579 else:
1594 else:
1580 sub = ctx.sub(prefix)
1595 sub = ctx.sub(prefix)
1581 return sub.checknested(subpath[len(prefix) + 1 :])
1596 return sub.checknested(subpath[len(prefix) + 1 :])
1582 else:
1597 else:
1583 parts.pop()
1598 parts.pop()
1584 return False
1599 return False
1585
1600
1586 def peer(self):
1601 def peer(self):
1587 return localpeer(self) # not cached to avoid reference cycle
1602 return localpeer(self) # not cached to avoid reference cycle
1588
1603
1589 def unfiltered(self):
1604 def unfiltered(self):
1590 """Return unfiltered version of the repository
1605 """Return unfiltered version of the repository
1591
1606
1592 Intended to be overwritten by filtered repo."""
1607 Intended to be overwritten by filtered repo."""
1593 return self
1608 return self
1594
1609
1595 def filtered(self, name, visibilityexceptions=None):
1610 def filtered(self, name, visibilityexceptions=None):
1596 """Return a filtered version of a repository
1611 """Return a filtered version of a repository
1597
1612
1598 The `name` parameter is the identifier of the requested view. This
1613 The `name` parameter is the identifier of the requested view. This
1599 will return a repoview object set "exactly" to the specified view.
1614 will return a repoview object set "exactly" to the specified view.
1600
1615
1601 This function does not apply recursive filtering to a repository. For
1616 This function does not apply recursive filtering to a repository. For
1602 example calling `repo.filtered("served")` will return a repoview using
1617 example calling `repo.filtered("served")` will return a repoview using
1603 the "served" view, regardless of the initial view used by `repo`.
1618 the "served" view, regardless of the initial view used by `repo`.
1604
1619
1605 In other word, there is always only one level of `repoview` "filtering".
1620 In other word, there is always only one level of `repoview` "filtering".
1606 """
1621 """
1607 if self._extrafilterid is not None and b'%' not in name:
1622 if self._extrafilterid is not None and b'%' not in name:
1608 name = name + b'%' + self._extrafilterid
1623 name = name + b'%' + self._extrafilterid
1609
1624
1610 cls = repoview.newtype(self.unfiltered().__class__)
1625 cls = repoview.newtype(self.unfiltered().__class__)
1611 return cls(self, name, visibilityexceptions)
1626 return cls(self, name, visibilityexceptions)
1612
1627
1613 @mixedrepostorecache(
1628 @mixedrepostorecache(
1614 (b'bookmarks', b'plain'),
1629 (b'bookmarks', b'plain'),
1615 (b'bookmarks.current', b'plain'),
1630 (b'bookmarks.current', b'plain'),
1616 (b'bookmarks', b''),
1631 (b'bookmarks', b''),
1617 (b'00changelog.i', b''),
1632 (b'00changelog.i', b''),
1618 )
1633 )
1619 def _bookmarks(self):
1634 def _bookmarks(self):
1620 # Since the multiple files involved in the transaction cannot be
1635 # Since the multiple files involved in the transaction cannot be
1621 # written atomically (with current repository format), there is a race
1636 # written atomically (with current repository format), there is a race
1622 # condition here.
1637 # condition here.
1623 #
1638 #
1624 # 1) changelog content A is read
1639 # 1) changelog content A is read
1625 # 2) outside transaction update changelog to content B
1640 # 2) outside transaction update changelog to content B
1626 # 3) outside transaction update bookmark file referring to content B
1641 # 3) outside transaction update bookmark file referring to content B
1627 # 4) bookmarks file content is read and filtered against changelog-A
1642 # 4) bookmarks file content is read and filtered against changelog-A
1628 #
1643 #
1629 # When this happens, bookmarks against nodes missing from A are dropped.
1644 # When this happens, bookmarks against nodes missing from A are dropped.
1630 #
1645 #
1631 # Having this happening during read is not great, but it become worse
1646 # Having this happening during read is not great, but it become worse
1632 # when this happen during write because the bookmarks to the "unknown"
1647 # when this happen during write because the bookmarks to the "unknown"
1633 # nodes will be dropped for good. However, writes happen within locks.
1648 # nodes will be dropped for good. However, writes happen within locks.
1634 # This locking makes it possible to have a race free consistent read.
1649 # This locking makes it possible to have a race free consistent read.
1635 # For this purpose data read from disc before locking are
1650 # For this purpose data read from disc before locking are
1636 # "invalidated" right after the locks are taken. This invalidations are
1651 # "invalidated" right after the locks are taken. This invalidations are
1637 # "light", the `filecache` mechanism keep the data in memory and will
1652 # "light", the `filecache` mechanism keep the data in memory and will
1638 # reuse them if the underlying files did not changed. Not parsing the
1653 # reuse them if the underlying files did not changed. Not parsing the
1639 # same data multiple times helps performances.
1654 # same data multiple times helps performances.
1640 #
1655 #
1641 # Unfortunately in the case describe above, the files tracked by the
1656 # Unfortunately in the case describe above, the files tracked by the
1642 # bookmarks file cache might not have changed, but the in-memory
1657 # bookmarks file cache might not have changed, but the in-memory
1643 # content is still "wrong" because we used an older changelog content
1658 # content is still "wrong" because we used an older changelog content
1644 # to process the on-disk data. So after locking, the changelog would be
1659 # to process the on-disk data. So after locking, the changelog would be
1645 # refreshed but `_bookmarks` would be preserved.
1660 # refreshed but `_bookmarks` would be preserved.
1646 # Adding `00changelog.i` to the list of tracked file is not
1661 # Adding `00changelog.i` to the list of tracked file is not
1647 # enough, because at the time we build the content for `_bookmarks` in
1662 # enough, because at the time we build the content for `_bookmarks` in
1648 # (4), the changelog file has already diverged from the content used
1663 # (4), the changelog file has already diverged from the content used
1649 # for loading `changelog` in (1)
1664 # for loading `changelog` in (1)
1650 #
1665 #
1651 # To prevent the issue, we force the changelog to be explicitly
1666 # To prevent the issue, we force the changelog to be explicitly
1652 # reloaded while computing `_bookmarks`. The data race can still happen
1667 # reloaded while computing `_bookmarks`. The data race can still happen
1653 # without the lock (with a narrower window), but it would no longer go
1668 # without the lock (with a narrower window), but it would no longer go
1654 # undetected during the lock time refresh.
1669 # undetected during the lock time refresh.
1655 #
1670 #
1656 # The new schedule is as follow
1671 # The new schedule is as follow
1657 #
1672 #
1658 # 1) filecache logic detect that `_bookmarks` needs to be computed
1673 # 1) filecache logic detect that `_bookmarks` needs to be computed
1659 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1674 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1660 # 3) We force `changelog` filecache to be tested
1675 # 3) We force `changelog` filecache to be tested
1661 # 4) cachestat for `changelog` are captured (for changelog)
1676 # 4) cachestat for `changelog` are captured (for changelog)
1662 # 5) `_bookmarks` is computed and cached
1677 # 5) `_bookmarks` is computed and cached
1663 #
1678 #
1664 # The step in (3) ensure we have a changelog at least as recent as the
1679 # The step in (3) ensure we have a changelog at least as recent as the
1665 # cache stat computed in (1). As a result at locking time:
1680 # cache stat computed in (1). As a result at locking time:
1666 # * if the changelog did not changed since (1) -> we can reuse the data
1681 # * if the changelog did not changed since (1) -> we can reuse the data
1667 # * otherwise -> the bookmarks get refreshed.
1682 # * otherwise -> the bookmarks get refreshed.
1668 self._refreshchangelog()
1683 self._refreshchangelog()
1669 return bookmarks.bmstore(self)
1684 return bookmarks.bmstore(self)
1670
1685
1671 def _refreshchangelog(self):
1686 def _refreshchangelog(self):
1672 """make sure the in memory changelog match the on-disk one"""
1687 """make sure the in memory changelog match the on-disk one"""
1673 if 'changelog' in vars(self) and self.currenttransaction() is None:
1688 if 'changelog' in vars(self) and self.currenttransaction() is None:
1674 del self.changelog
1689 del self.changelog
1675
1690
1676 @property
1691 @property
1677 def _activebookmark(self):
1692 def _activebookmark(self):
1678 return self._bookmarks.active
1693 return self._bookmarks.active
1679
1694
1680 # _phasesets depend on changelog. what we need is to call
1695 # _phasesets depend on changelog. what we need is to call
1681 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1696 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1682 # can't be easily expressed in filecache mechanism.
1697 # can't be easily expressed in filecache mechanism.
1683 @storecache(b'phaseroots', b'00changelog.i')
1698 @storecache(b'phaseroots', b'00changelog.i')
1684 def _phasecache(self):
1699 def _phasecache(self):
1685 return phases.phasecache(self, self._phasedefaults)
1700 return phases.phasecache(self, self._phasedefaults)
1686
1701
1687 @storecache(b'obsstore')
1702 @storecache(b'obsstore')
1688 def obsstore(self):
1703 def obsstore(self):
1689 return obsolete.makestore(self.ui, self)
1704 return obsolete.makestore(self.ui, self)
1690
1705
1691 @changelogcache()
1706 @changelogcache()
1692 def changelog(repo):
1707 def changelog(repo):
1693 # load dirstate before changelog to avoid race see issue6303
1708 # load dirstate before changelog to avoid race see issue6303
1694 repo.dirstate.prefetch_parents()
1709 repo.dirstate.prefetch_parents()
1695 return repo.store.changelog(
1710 return repo.store.changelog(
1696 txnutil.mayhavepending(repo.root),
1711 txnutil.mayhavepending(repo.root),
1697 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1712 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1698 )
1713 )
1699
1714
1700 @storecache(b'00manifest.i')
1715 @manifestlogcache()
1701 def manifestlog(self):
1716 def manifestlog(self):
1702 return self.store.manifestlog(self, self._storenarrowmatch)
1717 return self.store.manifestlog(self, self._storenarrowmatch)
1703
1718
1704 @repofilecache(b'dirstate')
1719 @repofilecache(b'dirstate')
1705 def dirstate(self):
1720 def dirstate(self):
1706 return self._makedirstate()
1721 return self._makedirstate()
1707
1722
1708 def _makedirstate(self):
1723 def _makedirstate(self):
1709 """Extension point for wrapping the dirstate per-repo."""
1724 """Extension point for wrapping the dirstate per-repo."""
1710 sparsematchfn = lambda: sparse.matcher(self)
1725 sparsematchfn = lambda: sparse.matcher(self)
1711 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1726 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1712 use_dirstate_v2 = v2_req in self.requirements
1727 use_dirstate_v2 = v2_req in self.requirements
1713
1728
1714 return dirstate.dirstate(
1729 return dirstate.dirstate(
1715 self.vfs,
1730 self.vfs,
1716 self.ui,
1731 self.ui,
1717 self.root,
1732 self.root,
1718 self._dirstatevalidate,
1733 self._dirstatevalidate,
1719 sparsematchfn,
1734 sparsematchfn,
1720 self.nodeconstants,
1735 self.nodeconstants,
1721 use_dirstate_v2,
1736 use_dirstate_v2,
1722 )
1737 )
1723
1738
1724 def _dirstatevalidate(self, node):
1739 def _dirstatevalidate(self, node):
1725 try:
1740 try:
1726 self.changelog.rev(node)
1741 self.changelog.rev(node)
1727 return node
1742 return node
1728 except error.LookupError:
1743 except error.LookupError:
1729 if not self._dirstatevalidatewarned:
1744 if not self._dirstatevalidatewarned:
1730 self._dirstatevalidatewarned = True
1745 self._dirstatevalidatewarned = True
1731 self.ui.warn(
1746 self.ui.warn(
1732 _(b"warning: ignoring unknown working parent %s!\n")
1747 _(b"warning: ignoring unknown working parent %s!\n")
1733 % short(node)
1748 % short(node)
1734 )
1749 )
1735 return self.nullid
1750 return self.nullid
1736
1751
1737 @storecache(narrowspec.FILENAME)
1752 @storecache(narrowspec.FILENAME)
1738 def narrowpats(self):
1753 def narrowpats(self):
1739 """matcher patterns for this repository's narrowspec
1754 """matcher patterns for this repository's narrowspec
1740
1755
1741 A tuple of (includes, excludes).
1756 A tuple of (includes, excludes).
1742 """
1757 """
1743 return narrowspec.load(self)
1758 return narrowspec.load(self)
1744
1759
1745 @storecache(narrowspec.FILENAME)
1760 @storecache(narrowspec.FILENAME)
1746 def _storenarrowmatch(self):
1761 def _storenarrowmatch(self):
1747 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1762 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1748 return matchmod.always()
1763 return matchmod.always()
1749 include, exclude = self.narrowpats
1764 include, exclude = self.narrowpats
1750 return narrowspec.match(self.root, include=include, exclude=exclude)
1765 return narrowspec.match(self.root, include=include, exclude=exclude)
1751
1766
1752 @storecache(narrowspec.FILENAME)
1767 @storecache(narrowspec.FILENAME)
1753 def _narrowmatch(self):
1768 def _narrowmatch(self):
1754 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1769 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1755 return matchmod.always()
1770 return matchmod.always()
1756 narrowspec.checkworkingcopynarrowspec(self)
1771 narrowspec.checkworkingcopynarrowspec(self)
1757 include, exclude = self.narrowpats
1772 include, exclude = self.narrowpats
1758 return narrowspec.match(self.root, include=include, exclude=exclude)
1773 return narrowspec.match(self.root, include=include, exclude=exclude)
1759
1774
1760 def narrowmatch(self, match=None, includeexact=False):
1775 def narrowmatch(self, match=None, includeexact=False):
1761 """matcher corresponding the the repo's narrowspec
1776 """matcher corresponding the the repo's narrowspec
1762
1777
1763 If `match` is given, then that will be intersected with the narrow
1778 If `match` is given, then that will be intersected with the narrow
1764 matcher.
1779 matcher.
1765
1780
1766 If `includeexact` is True, then any exact matches from `match` will
1781 If `includeexact` is True, then any exact matches from `match` will
1767 be included even if they're outside the narrowspec.
1782 be included even if they're outside the narrowspec.
1768 """
1783 """
1769 if match:
1784 if match:
1770 if includeexact and not self._narrowmatch.always():
1785 if includeexact and not self._narrowmatch.always():
1771 # do not exclude explicitly-specified paths so that they can
1786 # do not exclude explicitly-specified paths so that they can
1772 # be warned later on
1787 # be warned later on
1773 em = matchmod.exact(match.files())
1788 em = matchmod.exact(match.files())
1774 nm = matchmod.unionmatcher([self._narrowmatch, em])
1789 nm = matchmod.unionmatcher([self._narrowmatch, em])
1775 return matchmod.intersectmatchers(match, nm)
1790 return matchmod.intersectmatchers(match, nm)
1776 return matchmod.intersectmatchers(match, self._narrowmatch)
1791 return matchmod.intersectmatchers(match, self._narrowmatch)
1777 return self._narrowmatch
1792 return self._narrowmatch
1778
1793
1779 def setnarrowpats(self, newincludes, newexcludes):
1794 def setnarrowpats(self, newincludes, newexcludes):
1780 narrowspec.save(self, newincludes, newexcludes)
1795 narrowspec.save(self, newincludes, newexcludes)
1781 self.invalidate(clearfilecache=True)
1796 self.invalidate(clearfilecache=True)
1782
1797
1783 @unfilteredpropertycache
1798 @unfilteredpropertycache
1784 def _quick_access_changeid_null(self):
1799 def _quick_access_changeid_null(self):
1785 return {
1800 return {
1786 b'null': (nullrev, self.nodeconstants.nullid),
1801 b'null': (nullrev, self.nodeconstants.nullid),
1787 nullrev: (nullrev, self.nodeconstants.nullid),
1802 nullrev: (nullrev, self.nodeconstants.nullid),
1788 self.nullid: (nullrev, self.nullid),
1803 self.nullid: (nullrev, self.nullid),
1789 }
1804 }
1790
1805
1791 @unfilteredpropertycache
1806 @unfilteredpropertycache
1792 def _quick_access_changeid_wc(self):
1807 def _quick_access_changeid_wc(self):
1793 # also fast path access to the working copy parents
1808 # also fast path access to the working copy parents
1794 # however, only do it for filter that ensure wc is visible.
1809 # however, only do it for filter that ensure wc is visible.
1795 quick = self._quick_access_changeid_null.copy()
1810 quick = self._quick_access_changeid_null.copy()
1796 cl = self.unfiltered().changelog
1811 cl = self.unfiltered().changelog
1797 for node in self.dirstate.parents():
1812 for node in self.dirstate.parents():
1798 if node == self.nullid:
1813 if node == self.nullid:
1799 continue
1814 continue
1800 rev = cl.index.get_rev(node)
1815 rev = cl.index.get_rev(node)
1801 if rev is None:
1816 if rev is None:
1802 # unknown working copy parent case:
1817 # unknown working copy parent case:
1803 #
1818 #
1804 # skip the fast path and let higher code deal with it
1819 # skip the fast path and let higher code deal with it
1805 continue
1820 continue
1806 pair = (rev, node)
1821 pair = (rev, node)
1807 quick[rev] = pair
1822 quick[rev] = pair
1808 quick[node] = pair
1823 quick[node] = pair
1809 # also add the parents of the parents
1824 # also add the parents of the parents
1810 for r in cl.parentrevs(rev):
1825 for r in cl.parentrevs(rev):
1811 if r == nullrev:
1826 if r == nullrev:
1812 continue
1827 continue
1813 n = cl.node(r)
1828 n = cl.node(r)
1814 pair = (r, n)
1829 pair = (r, n)
1815 quick[r] = pair
1830 quick[r] = pair
1816 quick[n] = pair
1831 quick[n] = pair
1817 p1node = self.dirstate.p1()
1832 p1node = self.dirstate.p1()
1818 if p1node != self.nullid:
1833 if p1node != self.nullid:
1819 quick[b'.'] = quick[p1node]
1834 quick[b'.'] = quick[p1node]
1820 return quick
1835 return quick
1821
1836
1822 @unfilteredmethod
1837 @unfilteredmethod
1823 def _quick_access_changeid_invalidate(self):
1838 def _quick_access_changeid_invalidate(self):
1824 if '_quick_access_changeid_wc' in vars(self):
1839 if '_quick_access_changeid_wc' in vars(self):
1825 del self.__dict__['_quick_access_changeid_wc']
1840 del self.__dict__['_quick_access_changeid_wc']
1826
1841
1827 @property
1842 @property
1828 def _quick_access_changeid(self):
1843 def _quick_access_changeid(self):
1829 """an helper dictionnary for __getitem__ calls
1844 """an helper dictionnary for __getitem__ calls
1830
1845
1831 This contains a list of symbol we can recognise right away without
1846 This contains a list of symbol we can recognise right away without
1832 further processing.
1847 further processing.
1833 """
1848 """
1834 if self.filtername in repoview.filter_has_wc:
1849 if self.filtername in repoview.filter_has_wc:
1835 return self._quick_access_changeid_wc
1850 return self._quick_access_changeid_wc
1836 return self._quick_access_changeid_null
1851 return self._quick_access_changeid_null
1837
1852
1838 def __getitem__(self, changeid):
1853 def __getitem__(self, changeid):
1839 # dealing with special cases
1854 # dealing with special cases
1840 if changeid is None:
1855 if changeid is None:
1841 return context.workingctx(self)
1856 return context.workingctx(self)
1842 if isinstance(changeid, context.basectx):
1857 if isinstance(changeid, context.basectx):
1843 return changeid
1858 return changeid
1844
1859
1845 # dealing with multiple revisions
1860 # dealing with multiple revisions
1846 if isinstance(changeid, slice):
1861 if isinstance(changeid, slice):
1847 # wdirrev isn't contiguous so the slice shouldn't include it
1862 # wdirrev isn't contiguous so the slice shouldn't include it
1848 return [
1863 return [
1849 self[i]
1864 self[i]
1850 for i in pycompat.xrange(*changeid.indices(len(self)))
1865 for i in pycompat.xrange(*changeid.indices(len(self)))
1851 if i not in self.changelog.filteredrevs
1866 if i not in self.changelog.filteredrevs
1852 ]
1867 ]
1853
1868
1854 # dealing with some special values
1869 # dealing with some special values
1855 quick_access = self._quick_access_changeid.get(changeid)
1870 quick_access = self._quick_access_changeid.get(changeid)
1856 if quick_access is not None:
1871 if quick_access is not None:
1857 rev, node = quick_access
1872 rev, node = quick_access
1858 return context.changectx(self, rev, node, maybe_filtered=False)
1873 return context.changectx(self, rev, node, maybe_filtered=False)
1859 if changeid == b'tip':
1874 if changeid == b'tip':
1860 node = self.changelog.tip()
1875 node = self.changelog.tip()
1861 rev = self.changelog.rev(node)
1876 rev = self.changelog.rev(node)
1862 return context.changectx(self, rev, node)
1877 return context.changectx(self, rev, node)
1863
1878
1864 # dealing with arbitrary values
1879 # dealing with arbitrary values
1865 try:
1880 try:
1866 if isinstance(changeid, int):
1881 if isinstance(changeid, int):
1867 node = self.changelog.node(changeid)
1882 node = self.changelog.node(changeid)
1868 rev = changeid
1883 rev = changeid
1869 elif changeid == b'.':
1884 elif changeid == b'.':
1870 # this is a hack to delay/avoid loading obsmarkers
1885 # this is a hack to delay/avoid loading obsmarkers
1871 # when we know that '.' won't be hidden
1886 # when we know that '.' won't be hidden
1872 node = self.dirstate.p1()
1887 node = self.dirstate.p1()
1873 rev = self.unfiltered().changelog.rev(node)
1888 rev = self.unfiltered().changelog.rev(node)
1874 elif len(changeid) == self.nodeconstants.nodelen:
1889 elif len(changeid) == self.nodeconstants.nodelen:
1875 try:
1890 try:
1876 node = changeid
1891 node = changeid
1877 rev = self.changelog.rev(changeid)
1892 rev = self.changelog.rev(changeid)
1878 except error.FilteredLookupError:
1893 except error.FilteredLookupError:
1879 changeid = hex(changeid) # for the error message
1894 changeid = hex(changeid) # for the error message
1880 raise
1895 raise
1881 except LookupError:
1896 except LookupError:
1882 # check if it might have come from damaged dirstate
1897 # check if it might have come from damaged dirstate
1883 #
1898 #
1884 # XXX we could avoid the unfiltered if we had a recognizable
1899 # XXX we could avoid the unfiltered if we had a recognizable
1885 # exception for filtered changeset access
1900 # exception for filtered changeset access
1886 if (
1901 if (
1887 self.local()
1902 self.local()
1888 and changeid in self.unfiltered().dirstate.parents()
1903 and changeid in self.unfiltered().dirstate.parents()
1889 ):
1904 ):
1890 msg = _(b"working directory has unknown parent '%s'!")
1905 msg = _(b"working directory has unknown parent '%s'!")
1891 raise error.Abort(msg % short(changeid))
1906 raise error.Abort(msg % short(changeid))
1892 changeid = hex(changeid) # for the error message
1907 changeid = hex(changeid) # for the error message
1893 raise
1908 raise
1894
1909
1895 elif len(changeid) == 2 * self.nodeconstants.nodelen:
1910 elif len(changeid) == 2 * self.nodeconstants.nodelen:
1896 node = bin(changeid)
1911 node = bin(changeid)
1897 rev = self.changelog.rev(node)
1912 rev = self.changelog.rev(node)
1898 else:
1913 else:
1899 raise error.ProgrammingError(
1914 raise error.ProgrammingError(
1900 b"unsupported changeid '%s' of type %s"
1915 b"unsupported changeid '%s' of type %s"
1901 % (changeid, pycompat.bytestr(type(changeid)))
1916 % (changeid, pycompat.bytestr(type(changeid)))
1902 )
1917 )
1903
1918
1904 return context.changectx(self, rev, node)
1919 return context.changectx(self, rev, node)
1905
1920
1906 except (error.FilteredIndexError, error.FilteredLookupError):
1921 except (error.FilteredIndexError, error.FilteredLookupError):
1907 raise error.FilteredRepoLookupError(
1922 raise error.FilteredRepoLookupError(
1908 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1923 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1909 )
1924 )
1910 except (IndexError, LookupError):
1925 except (IndexError, LookupError):
1911 raise error.RepoLookupError(
1926 raise error.RepoLookupError(
1912 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1927 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1913 )
1928 )
1914 except error.WdirUnsupported:
1929 except error.WdirUnsupported:
1915 return context.workingctx(self)
1930 return context.workingctx(self)
1916
1931
1917 def __contains__(self, changeid):
1932 def __contains__(self, changeid):
1918 """True if the given changeid exists"""
1933 """True if the given changeid exists"""
1919 try:
1934 try:
1920 self[changeid]
1935 self[changeid]
1921 return True
1936 return True
1922 except error.RepoLookupError:
1937 except error.RepoLookupError:
1923 return False
1938 return False
1924
1939
1925 def __nonzero__(self):
1940 def __nonzero__(self):
1926 return True
1941 return True
1927
1942
1928 __bool__ = __nonzero__
1943 __bool__ = __nonzero__
1929
1944
1930 def __len__(self):
1945 def __len__(self):
1931 # no need to pay the cost of repoview.changelog
1946 # no need to pay the cost of repoview.changelog
1932 unfi = self.unfiltered()
1947 unfi = self.unfiltered()
1933 return len(unfi.changelog)
1948 return len(unfi.changelog)
1934
1949
1935 def __iter__(self):
1950 def __iter__(self):
1936 return iter(self.changelog)
1951 return iter(self.changelog)
1937
1952
1938 def revs(self, expr, *args):
1953 def revs(self, expr, *args):
1939 """Find revisions matching a revset.
1954 """Find revisions matching a revset.
1940
1955
1941 The revset is specified as a string ``expr`` that may contain
1956 The revset is specified as a string ``expr`` that may contain
1942 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1957 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1943
1958
1944 Revset aliases from the configuration are not expanded. To expand
1959 Revset aliases from the configuration are not expanded. To expand
1945 user aliases, consider calling ``scmutil.revrange()`` or
1960 user aliases, consider calling ``scmutil.revrange()`` or
1946 ``repo.anyrevs([expr], user=True)``.
1961 ``repo.anyrevs([expr], user=True)``.
1947
1962
1948 Returns a smartset.abstractsmartset, which is a list-like interface
1963 Returns a smartset.abstractsmartset, which is a list-like interface
1949 that contains integer revisions.
1964 that contains integer revisions.
1950 """
1965 """
1951 tree = revsetlang.spectree(expr, *args)
1966 tree = revsetlang.spectree(expr, *args)
1952 return revset.makematcher(tree)(self)
1967 return revset.makematcher(tree)(self)
1953
1968
1954 def set(self, expr, *args):
1969 def set(self, expr, *args):
1955 """Find revisions matching a revset and emit changectx instances.
1970 """Find revisions matching a revset and emit changectx instances.
1956
1971
1957 This is a convenience wrapper around ``revs()`` that iterates the
1972 This is a convenience wrapper around ``revs()`` that iterates the
1958 result and is a generator of changectx instances.
1973 result and is a generator of changectx instances.
1959
1974
1960 Revset aliases from the configuration are not expanded. To expand
1975 Revset aliases from the configuration are not expanded. To expand
1961 user aliases, consider calling ``scmutil.revrange()``.
1976 user aliases, consider calling ``scmutil.revrange()``.
1962 """
1977 """
1963 for r in self.revs(expr, *args):
1978 for r in self.revs(expr, *args):
1964 yield self[r]
1979 yield self[r]
1965
1980
1966 def anyrevs(self, specs, user=False, localalias=None):
1981 def anyrevs(self, specs, user=False, localalias=None):
1967 """Find revisions matching one of the given revsets.
1982 """Find revisions matching one of the given revsets.
1968
1983
1969 Revset aliases from the configuration are not expanded by default. To
1984 Revset aliases from the configuration are not expanded by default. To
1970 expand user aliases, specify ``user=True``. To provide some local
1985 expand user aliases, specify ``user=True``. To provide some local
1971 definitions overriding user aliases, set ``localalias`` to
1986 definitions overriding user aliases, set ``localalias`` to
1972 ``{name: definitionstring}``.
1987 ``{name: definitionstring}``.
1973 """
1988 """
1974 if specs == [b'null']:
1989 if specs == [b'null']:
1975 return revset.baseset([nullrev])
1990 return revset.baseset([nullrev])
1976 if specs == [b'.']:
1991 if specs == [b'.']:
1977 quick_data = self._quick_access_changeid.get(b'.')
1992 quick_data = self._quick_access_changeid.get(b'.')
1978 if quick_data is not None:
1993 if quick_data is not None:
1979 return revset.baseset([quick_data[0]])
1994 return revset.baseset([quick_data[0]])
1980 if user:
1995 if user:
1981 m = revset.matchany(
1996 m = revset.matchany(
1982 self.ui,
1997 self.ui,
1983 specs,
1998 specs,
1984 lookup=revset.lookupfn(self),
1999 lookup=revset.lookupfn(self),
1985 localalias=localalias,
2000 localalias=localalias,
1986 )
2001 )
1987 else:
2002 else:
1988 m = revset.matchany(None, specs, localalias=localalias)
2003 m = revset.matchany(None, specs, localalias=localalias)
1989 return m(self)
2004 return m(self)
1990
2005
1991 def url(self):
2006 def url(self):
1992 return b'file:' + self.root
2007 return b'file:' + self.root
1993
2008
1994 def hook(self, name, throw=False, **args):
2009 def hook(self, name, throw=False, **args):
1995 """Call a hook, passing this repo instance.
2010 """Call a hook, passing this repo instance.
1996
2011
1997 This a convenience method to aid invoking hooks. Extensions likely
2012 This a convenience method to aid invoking hooks. Extensions likely
1998 won't call this unless they have registered a custom hook or are
2013 won't call this unless they have registered a custom hook or are
1999 replacing code that is expected to call a hook.
2014 replacing code that is expected to call a hook.
2000 """
2015 """
2001 return hook.hook(self.ui, self, name, throw, **args)
2016 return hook.hook(self.ui, self, name, throw, **args)
2002
2017
2003 @filteredpropertycache
2018 @filteredpropertycache
2004 def _tagscache(self):
2019 def _tagscache(self):
2005 """Returns a tagscache object that contains various tags related
2020 """Returns a tagscache object that contains various tags related
2006 caches."""
2021 caches."""
2007
2022
2008 # This simplifies its cache management by having one decorated
2023 # This simplifies its cache management by having one decorated
2009 # function (this one) and the rest simply fetch things from it.
2024 # function (this one) and the rest simply fetch things from it.
2010 class tagscache(object):
2025 class tagscache(object):
2011 def __init__(self):
2026 def __init__(self):
2012 # These two define the set of tags for this repository. tags
2027 # These two define the set of tags for this repository. tags
2013 # maps tag name to node; tagtypes maps tag name to 'global' or
2028 # maps tag name to node; tagtypes maps tag name to 'global' or
2014 # 'local'. (Global tags are defined by .hgtags across all
2029 # 'local'. (Global tags are defined by .hgtags across all
2015 # heads, and local tags are defined in .hg/localtags.)
2030 # heads, and local tags are defined in .hg/localtags.)
2016 # They constitute the in-memory cache of tags.
2031 # They constitute the in-memory cache of tags.
2017 self.tags = self.tagtypes = None
2032 self.tags = self.tagtypes = None
2018
2033
2019 self.nodetagscache = self.tagslist = None
2034 self.nodetagscache = self.tagslist = None
2020
2035
2021 cache = tagscache()
2036 cache = tagscache()
2022 cache.tags, cache.tagtypes = self._findtags()
2037 cache.tags, cache.tagtypes = self._findtags()
2023
2038
2024 return cache
2039 return cache
2025
2040
2026 def tags(self):
2041 def tags(self):
2027 '''return a mapping of tag to node'''
2042 '''return a mapping of tag to node'''
2028 t = {}
2043 t = {}
2029 if self.changelog.filteredrevs:
2044 if self.changelog.filteredrevs:
2030 tags, tt = self._findtags()
2045 tags, tt = self._findtags()
2031 else:
2046 else:
2032 tags = self._tagscache.tags
2047 tags = self._tagscache.tags
2033 rev = self.changelog.rev
2048 rev = self.changelog.rev
2034 for k, v in pycompat.iteritems(tags):
2049 for k, v in pycompat.iteritems(tags):
2035 try:
2050 try:
2036 # ignore tags to unknown nodes
2051 # ignore tags to unknown nodes
2037 rev(v)
2052 rev(v)
2038 t[k] = v
2053 t[k] = v
2039 except (error.LookupError, ValueError):
2054 except (error.LookupError, ValueError):
2040 pass
2055 pass
2041 return t
2056 return t
2042
2057
2043 def _findtags(self):
2058 def _findtags(self):
2044 """Do the hard work of finding tags. Return a pair of dicts
2059 """Do the hard work of finding tags. Return a pair of dicts
2045 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2060 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2046 maps tag name to a string like \'global\' or \'local\'.
2061 maps tag name to a string like \'global\' or \'local\'.
2047 Subclasses or extensions are free to add their own tags, but
2062 Subclasses or extensions are free to add their own tags, but
2048 should be aware that the returned dicts will be retained for the
2063 should be aware that the returned dicts will be retained for the
2049 duration of the localrepo object."""
2064 duration of the localrepo object."""
2050
2065
2051 # XXX what tagtype should subclasses/extensions use? Currently
2066 # XXX what tagtype should subclasses/extensions use? Currently
2052 # mq and bookmarks add tags, but do not set the tagtype at all.
2067 # mq and bookmarks add tags, but do not set the tagtype at all.
2053 # Should each extension invent its own tag type? Should there
2068 # Should each extension invent its own tag type? Should there
2054 # be one tagtype for all such "virtual" tags? Or is the status
2069 # be one tagtype for all such "virtual" tags? Or is the status
2055 # quo fine?
2070 # quo fine?
2056
2071
2057 # map tag name to (node, hist)
2072 # map tag name to (node, hist)
2058 alltags = tagsmod.findglobaltags(self.ui, self)
2073 alltags = tagsmod.findglobaltags(self.ui, self)
2059 # map tag name to tag type
2074 # map tag name to tag type
2060 tagtypes = {tag: b'global' for tag in alltags}
2075 tagtypes = {tag: b'global' for tag in alltags}
2061
2076
2062 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2077 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2063
2078
2064 # Build the return dicts. Have to re-encode tag names because
2079 # Build the return dicts. Have to re-encode tag names because
2065 # the tags module always uses UTF-8 (in order not to lose info
2080 # the tags module always uses UTF-8 (in order not to lose info
2066 # writing to the cache), but the rest of Mercurial wants them in
2081 # writing to the cache), but the rest of Mercurial wants them in
2067 # local encoding.
2082 # local encoding.
2068 tags = {}
2083 tags = {}
2069 for (name, (node, hist)) in pycompat.iteritems(alltags):
2084 for (name, (node, hist)) in pycompat.iteritems(alltags):
2070 if node != self.nullid:
2085 if node != self.nullid:
2071 tags[encoding.tolocal(name)] = node
2086 tags[encoding.tolocal(name)] = node
2072 tags[b'tip'] = self.changelog.tip()
2087 tags[b'tip'] = self.changelog.tip()
2073 tagtypes = {
2088 tagtypes = {
2074 encoding.tolocal(name): value
2089 encoding.tolocal(name): value
2075 for (name, value) in pycompat.iteritems(tagtypes)
2090 for (name, value) in pycompat.iteritems(tagtypes)
2076 }
2091 }
2077 return (tags, tagtypes)
2092 return (tags, tagtypes)
2078
2093
2079 def tagtype(self, tagname):
2094 def tagtype(self, tagname):
2080 """
2095 """
2081 return the type of the given tag. result can be:
2096 return the type of the given tag. result can be:
2082
2097
2083 'local' : a local tag
2098 'local' : a local tag
2084 'global' : a global tag
2099 'global' : a global tag
2085 None : tag does not exist
2100 None : tag does not exist
2086 """
2101 """
2087
2102
2088 return self._tagscache.tagtypes.get(tagname)
2103 return self._tagscache.tagtypes.get(tagname)
2089
2104
2090 def tagslist(self):
2105 def tagslist(self):
2091 '''return a list of tags ordered by revision'''
2106 '''return a list of tags ordered by revision'''
2092 if not self._tagscache.tagslist:
2107 if not self._tagscache.tagslist:
2093 l = []
2108 l = []
2094 for t, n in pycompat.iteritems(self.tags()):
2109 for t, n in pycompat.iteritems(self.tags()):
2095 l.append((self.changelog.rev(n), t, n))
2110 l.append((self.changelog.rev(n), t, n))
2096 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2111 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2097
2112
2098 return self._tagscache.tagslist
2113 return self._tagscache.tagslist
2099
2114
2100 def nodetags(self, node):
2115 def nodetags(self, node):
2101 '''return the tags associated with a node'''
2116 '''return the tags associated with a node'''
2102 if not self._tagscache.nodetagscache:
2117 if not self._tagscache.nodetagscache:
2103 nodetagscache = {}
2118 nodetagscache = {}
2104 for t, n in pycompat.iteritems(self._tagscache.tags):
2119 for t, n in pycompat.iteritems(self._tagscache.tags):
2105 nodetagscache.setdefault(n, []).append(t)
2120 nodetagscache.setdefault(n, []).append(t)
2106 for tags in pycompat.itervalues(nodetagscache):
2121 for tags in pycompat.itervalues(nodetagscache):
2107 tags.sort()
2122 tags.sort()
2108 self._tagscache.nodetagscache = nodetagscache
2123 self._tagscache.nodetagscache = nodetagscache
2109 return self._tagscache.nodetagscache.get(node, [])
2124 return self._tagscache.nodetagscache.get(node, [])
2110
2125
2111 def nodebookmarks(self, node):
2126 def nodebookmarks(self, node):
2112 """return the list of bookmarks pointing to the specified node"""
2127 """return the list of bookmarks pointing to the specified node"""
2113 return self._bookmarks.names(node)
2128 return self._bookmarks.names(node)
2114
2129
2115 def branchmap(self):
2130 def branchmap(self):
2116 """returns a dictionary {branch: [branchheads]} with branchheads
2131 """returns a dictionary {branch: [branchheads]} with branchheads
2117 ordered by increasing revision number"""
2132 ordered by increasing revision number"""
2118 return self._branchcaches[self]
2133 return self._branchcaches[self]
2119
2134
2120 @unfilteredmethod
2135 @unfilteredmethod
2121 def revbranchcache(self):
2136 def revbranchcache(self):
2122 if not self._revbranchcache:
2137 if not self._revbranchcache:
2123 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2138 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2124 return self._revbranchcache
2139 return self._revbranchcache
2125
2140
2126 def register_changeset(self, rev, changelogrevision):
2141 def register_changeset(self, rev, changelogrevision):
2127 self.revbranchcache().setdata(rev, changelogrevision)
2142 self.revbranchcache().setdata(rev, changelogrevision)
2128
2143
2129 def branchtip(self, branch, ignoremissing=False):
2144 def branchtip(self, branch, ignoremissing=False):
2130 """return the tip node for a given branch
2145 """return the tip node for a given branch
2131
2146
2132 If ignoremissing is True, then this method will not raise an error.
2147 If ignoremissing is True, then this method will not raise an error.
2133 This is helpful for callers that only expect None for a missing branch
2148 This is helpful for callers that only expect None for a missing branch
2134 (e.g. namespace).
2149 (e.g. namespace).
2135
2150
2136 """
2151 """
2137 try:
2152 try:
2138 return self.branchmap().branchtip(branch)
2153 return self.branchmap().branchtip(branch)
2139 except KeyError:
2154 except KeyError:
2140 if not ignoremissing:
2155 if not ignoremissing:
2141 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2156 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2142 else:
2157 else:
2143 pass
2158 pass
2144
2159
2145 def lookup(self, key):
2160 def lookup(self, key):
2146 node = scmutil.revsymbol(self, key).node()
2161 node = scmutil.revsymbol(self, key).node()
2147 if node is None:
2162 if node is None:
2148 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2163 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2149 return node
2164 return node
2150
2165
2151 def lookupbranch(self, key):
2166 def lookupbranch(self, key):
2152 if self.branchmap().hasbranch(key):
2167 if self.branchmap().hasbranch(key):
2153 return key
2168 return key
2154
2169
2155 return scmutil.revsymbol(self, key).branch()
2170 return scmutil.revsymbol(self, key).branch()
2156
2171
2157 def known(self, nodes):
2172 def known(self, nodes):
2158 cl = self.changelog
2173 cl = self.changelog
2159 get_rev = cl.index.get_rev
2174 get_rev = cl.index.get_rev
2160 filtered = cl.filteredrevs
2175 filtered = cl.filteredrevs
2161 result = []
2176 result = []
2162 for n in nodes:
2177 for n in nodes:
2163 r = get_rev(n)
2178 r = get_rev(n)
2164 resp = not (r is None or r in filtered)
2179 resp = not (r is None or r in filtered)
2165 result.append(resp)
2180 result.append(resp)
2166 return result
2181 return result
2167
2182
2168 def local(self):
2183 def local(self):
2169 return self
2184 return self
2170
2185
2171 def publishing(self):
2186 def publishing(self):
2172 # it's safe (and desirable) to trust the publish flag unconditionally
2187 # it's safe (and desirable) to trust the publish flag unconditionally
2173 # so that we don't finalize changes shared between users via ssh or nfs
2188 # so that we don't finalize changes shared between users via ssh or nfs
2174 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2189 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2175
2190
2176 def cancopy(self):
2191 def cancopy(self):
2177 # so statichttprepo's override of local() works
2192 # so statichttprepo's override of local() works
2178 if not self.local():
2193 if not self.local():
2179 return False
2194 return False
2180 if not self.publishing():
2195 if not self.publishing():
2181 return True
2196 return True
2182 # if publishing we can't copy if there is filtered content
2197 # if publishing we can't copy if there is filtered content
2183 return not self.filtered(b'visible').changelog.filteredrevs
2198 return not self.filtered(b'visible').changelog.filteredrevs
2184
2199
2185 def shared(self):
2200 def shared(self):
2186 '''the type of shared repository (None if not shared)'''
2201 '''the type of shared repository (None if not shared)'''
2187 if self.sharedpath != self.path:
2202 if self.sharedpath != self.path:
2188 return b'store'
2203 return b'store'
2189 return None
2204 return None
2190
2205
2191 def wjoin(self, f, *insidef):
2206 def wjoin(self, f, *insidef):
2192 return self.vfs.reljoin(self.root, f, *insidef)
2207 return self.vfs.reljoin(self.root, f, *insidef)
2193
2208
2194 def setparents(self, p1, p2=None):
2209 def setparents(self, p1, p2=None):
2195 if p2 is None:
2210 if p2 is None:
2196 p2 = self.nullid
2211 p2 = self.nullid
2197 self[None].setparents(p1, p2)
2212 self[None].setparents(p1, p2)
2198 self._quick_access_changeid_invalidate()
2213 self._quick_access_changeid_invalidate()
2199
2214
2200 def filectx(self, path, changeid=None, fileid=None, changectx=None):
2215 def filectx(self, path, changeid=None, fileid=None, changectx=None):
2201 """changeid must be a changeset revision, if specified.
2216 """changeid must be a changeset revision, if specified.
2202 fileid can be a file revision or node."""
2217 fileid can be a file revision or node."""
2203 return context.filectx(
2218 return context.filectx(
2204 self, path, changeid, fileid, changectx=changectx
2219 self, path, changeid, fileid, changectx=changectx
2205 )
2220 )
2206
2221
2207 def getcwd(self):
2222 def getcwd(self):
2208 return self.dirstate.getcwd()
2223 return self.dirstate.getcwd()
2209
2224
2210 def pathto(self, f, cwd=None):
2225 def pathto(self, f, cwd=None):
2211 return self.dirstate.pathto(f, cwd)
2226 return self.dirstate.pathto(f, cwd)
2212
2227
2213 def _loadfilter(self, filter):
2228 def _loadfilter(self, filter):
2214 if filter not in self._filterpats:
2229 if filter not in self._filterpats:
2215 l = []
2230 l = []
2216 for pat, cmd in self.ui.configitems(filter):
2231 for pat, cmd in self.ui.configitems(filter):
2217 if cmd == b'!':
2232 if cmd == b'!':
2218 continue
2233 continue
2219 mf = matchmod.match(self.root, b'', [pat])
2234 mf = matchmod.match(self.root, b'', [pat])
2220 fn = None
2235 fn = None
2221 params = cmd
2236 params = cmd
2222 for name, filterfn in pycompat.iteritems(self._datafilters):
2237 for name, filterfn in pycompat.iteritems(self._datafilters):
2223 if cmd.startswith(name):
2238 if cmd.startswith(name):
2224 fn = filterfn
2239 fn = filterfn
2225 params = cmd[len(name) :].lstrip()
2240 params = cmd[len(name) :].lstrip()
2226 break
2241 break
2227 if not fn:
2242 if not fn:
2228 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2243 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2229 fn.__name__ = 'commandfilter'
2244 fn.__name__ = 'commandfilter'
2230 # Wrap old filters not supporting keyword arguments
2245 # Wrap old filters not supporting keyword arguments
2231 if not pycompat.getargspec(fn)[2]:
2246 if not pycompat.getargspec(fn)[2]:
2232 oldfn = fn
2247 oldfn = fn
2233 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2248 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2234 fn.__name__ = 'compat-' + oldfn.__name__
2249 fn.__name__ = 'compat-' + oldfn.__name__
2235 l.append((mf, fn, params))
2250 l.append((mf, fn, params))
2236 self._filterpats[filter] = l
2251 self._filterpats[filter] = l
2237 return self._filterpats[filter]
2252 return self._filterpats[filter]
2238
2253
2239 def _filter(self, filterpats, filename, data):
2254 def _filter(self, filterpats, filename, data):
2240 for mf, fn, cmd in filterpats:
2255 for mf, fn, cmd in filterpats:
2241 if mf(filename):
2256 if mf(filename):
2242 self.ui.debug(
2257 self.ui.debug(
2243 b"filtering %s through %s\n"
2258 b"filtering %s through %s\n"
2244 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2259 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2245 )
2260 )
2246 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2261 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2247 break
2262 break
2248
2263
2249 return data
2264 return data
2250
2265
2251 @unfilteredpropertycache
2266 @unfilteredpropertycache
2252 def _encodefilterpats(self):
2267 def _encodefilterpats(self):
2253 return self._loadfilter(b'encode')
2268 return self._loadfilter(b'encode')
2254
2269
2255 @unfilteredpropertycache
2270 @unfilteredpropertycache
2256 def _decodefilterpats(self):
2271 def _decodefilterpats(self):
2257 return self._loadfilter(b'decode')
2272 return self._loadfilter(b'decode')
2258
2273
2259 def adddatafilter(self, name, filter):
2274 def adddatafilter(self, name, filter):
2260 self._datafilters[name] = filter
2275 self._datafilters[name] = filter
2261
2276
2262 def wread(self, filename):
2277 def wread(self, filename):
2263 if self.wvfs.islink(filename):
2278 if self.wvfs.islink(filename):
2264 data = self.wvfs.readlink(filename)
2279 data = self.wvfs.readlink(filename)
2265 else:
2280 else:
2266 data = self.wvfs.read(filename)
2281 data = self.wvfs.read(filename)
2267 return self._filter(self._encodefilterpats, filename, data)
2282 return self._filter(self._encodefilterpats, filename, data)
2268
2283
2269 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2284 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2270 """write ``data`` into ``filename`` in the working directory
2285 """write ``data`` into ``filename`` in the working directory
2271
2286
2272 This returns length of written (maybe decoded) data.
2287 This returns length of written (maybe decoded) data.
2273 """
2288 """
2274 data = self._filter(self._decodefilterpats, filename, data)
2289 data = self._filter(self._decodefilterpats, filename, data)
2275 if b'l' in flags:
2290 if b'l' in flags:
2276 self.wvfs.symlink(data, filename)
2291 self.wvfs.symlink(data, filename)
2277 else:
2292 else:
2278 self.wvfs.write(
2293 self.wvfs.write(
2279 filename, data, backgroundclose=backgroundclose, **kwargs
2294 filename, data, backgroundclose=backgroundclose, **kwargs
2280 )
2295 )
2281 if b'x' in flags:
2296 if b'x' in flags:
2282 self.wvfs.setflags(filename, False, True)
2297 self.wvfs.setflags(filename, False, True)
2283 else:
2298 else:
2284 self.wvfs.setflags(filename, False, False)
2299 self.wvfs.setflags(filename, False, False)
2285 return len(data)
2300 return len(data)
2286
2301
2287 def wwritedata(self, filename, data):
2302 def wwritedata(self, filename, data):
2288 return self._filter(self._decodefilterpats, filename, data)
2303 return self._filter(self._decodefilterpats, filename, data)
2289
2304
2290 def currenttransaction(self):
2305 def currenttransaction(self):
2291 """return the current transaction or None if non exists"""
2306 """return the current transaction or None if non exists"""
2292 if self._transref:
2307 if self._transref:
2293 tr = self._transref()
2308 tr = self._transref()
2294 else:
2309 else:
2295 tr = None
2310 tr = None
2296
2311
2297 if tr and tr.running():
2312 if tr and tr.running():
2298 return tr
2313 return tr
2299 return None
2314 return None
2300
2315
2301 def transaction(self, desc, report=None):
2316 def transaction(self, desc, report=None):
2302 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2317 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2303 b'devel', b'check-locks'
2318 b'devel', b'check-locks'
2304 ):
2319 ):
2305 if self._currentlock(self._lockref) is None:
2320 if self._currentlock(self._lockref) is None:
2306 raise error.ProgrammingError(b'transaction requires locking')
2321 raise error.ProgrammingError(b'transaction requires locking')
2307 tr = self.currenttransaction()
2322 tr = self.currenttransaction()
2308 if tr is not None:
2323 if tr is not None:
2309 return tr.nest(name=desc)
2324 return tr.nest(name=desc)
2310
2325
2311 # abort here if the journal already exists
2326 # abort here if the journal already exists
2312 if self.svfs.exists(b"journal"):
2327 if self.svfs.exists(b"journal"):
2313 raise error.RepoError(
2328 raise error.RepoError(
2314 _(b"abandoned transaction found"),
2329 _(b"abandoned transaction found"),
2315 hint=_(b"run 'hg recover' to clean up transaction"),
2330 hint=_(b"run 'hg recover' to clean up transaction"),
2316 )
2331 )
2317
2332
2318 idbase = b"%.40f#%f" % (random.random(), time.time())
2333 idbase = b"%.40f#%f" % (random.random(), time.time())
2319 ha = hex(hashutil.sha1(idbase).digest())
2334 ha = hex(hashutil.sha1(idbase).digest())
2320 txnid = b'TXN:' + ha
2335 txnid = b'TXN:' + ha
2321 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2336 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2322
2337
2323 self._writejournal(desc)
2338 self._writejournal(desc)
2324 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2339 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2325 if report:
2340 if report:
2326 rp = report
2341 rp = report
2327 else:
2342 else:
2328 rp = self.ui.warn
2343 rp = self.ui.warn
2329 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2344 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2330 # we must avoid cyclic reference between repo and transaction.
2345 # we must avoid cyclic reference between repo and transaction.
2331 reporef = weakref.ref(self)
2346 reporef = weakref.ref(self)
2332 # Code to track tag movement
2347 # Code to track tag movement
2333 #
2348 #
2334 # Since tags are all handled as file content, it is actually quite hard
2349 # Since tags are all handled as file content, it is actually quite hard
2335 # to track these movement from a code perspective. So we fallback to a
2350 # to track these movement from a code perspective. So we fallback to a
2336 # tracking at the repository level. One could envision to track changes
2351 # tracking at the repository level. One could envision to track changes
2337 # to the '.hgtags' file through changegroup apply but that fails to
2352 # to the '.hgtags' file through changegroup apply but that fails to
2338 # cope with case where transaction expose new heads without changegroup
2353 # cope with case where transaction expose new heads without changegroup
2339 # being involved (eg: phase movement).
2354 # being involved (eg: phase movement).
2340 #
2355 #
2341 # For now, We gate the feature behind a flag since this likely comes
2356 # For now, We gate the feature behind a flag since this likely comes
2342 # with performance impacts. The current code run more often than needed
2357 # with performance impacts. The current code run more often than needed
2343 # and do not use caches as much as it could. The current focus is on
2358 # and do not use caches as much as it could. The current focus is on
2344 # the behavior of the feature so we disable it by default. The flag
2359 # the behavior of the feature so we disable it by default. The flag
2345 # will be removed when we are happy with the performance impact.
2360 # will be removed when we are happy with the performance impact.
2346 #
2361 #
2347 # Once this feature is no longer experimental move the following
2362 # Once this feature is no longer experimental move the following
2348 # documentation to the appropriate help section:
2363 # documentation to the appropriate help section:
2349 #
2364 #
2350 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2365 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2351 # tags (new or changed or deleted tags). In addition the details of
2366 # tags (new or changed or deleted tags). In addition the details of
2352 # these changes are made available in a file at:
2367 # these changes are made available in a file at:
2353 # ``REPOROOT/.hg/changes/tags.changes``.
2368 # ``REPOROOT/.hg/changes/tags.changes``.
2354 # Make sure you check for HG_TAG_MOVED before reading that file as it
2369 # Make sure you check for HG_TAG_MOVED before reading that file as it
2355 # might exist from a previous transaction even if no tag were touched
2370 # might exist from a previous transaction even if no tag were touched
2356 # in this one. Changes are recorded in a line base format::
2371 # in this one. Changes are recorded in a line base format::
2357 #
2372 #
2358 # <action> <hex-node> <tag-name>\n
2373 # <action> <hex-node> <tag-name>\n
2359 #
2374 #
2360 # Actions are defined as follow:
2375 # Actions are defined as follow:
2361 # "-R": tag is removed,
2376 # "-R": tag is removed,
2362 # "+A": tag is added,
2377 # "+A": tag is added,
2363 # "-M": tag is moved (old value),
2378 # "-M": tag is moved (old value),
2364 # "+M": tag is moved (new value),
2379 # "+M": tag is moved (new value),
2365 tracktags = lambda x: None
2380 tracktags = lambda x: None
2366 # experimental config: experimental.hook-track-tags
2381 # experimental config: experimental.hook-track-tags
2367 shouldtracktags = self.ui.configbool(
2382 shouldtracktags = self.ui.configbool(
2368 b'experimental', b'hook-track-tags'
2383 b'experimental', b'hook-track-tags'
2369 )
2384 )
2370 if desc != b'strip' and shouldtracktags:
2385 if desc != b'strip' and shouldtracktags:
2371 oldheads = self.changelog.headrevs()
2386 oldheads = self.changelog.headrevs()
2372
2387
2373 def tracktags(tr2):
2388 def tracktags(tr2):
2374 repo = reporef()
2389 repo = reporef()
2375 assert repo is not None # help pytype
2390 assert repo is not None # help pytype
2376 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2391 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2377 newheads = repo.changelog.headrevs()
2392 newheads = repo.changelog.headrevs()
2378 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2393 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2379 # notes: we compare lists here.
2394 # notes: we compare lists here.
2380 # As we do it only once buiding set would not be cheaper
2395 # As we do it only once buiding set would not be cheaper
2381 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2396 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2382 if changes:
2397 if changes:
2383 tr2.hookargs[b'tag_moved'] = b'1'
2398 tr2.hookargs[b'tag_moved'] = b'1'
2384 with repo.vfs(
2399 with repo.vfs(
2385 b'changes/tags.changes', b'w', atomictemp=True
2400 b'changes/tags.changes', b'w', atomictemp=True
2386 ) as changesfile:
2401 ) as changesfile:
2387 # note: we do not register the file to the transaction
2402 # note: we do not register the file to the transaction
2388 # because we needs it to still exist on the transaction
2403 # because we needs it to still exist on the transaction
2389 # is close (for txnclose hooks)
2404 # is close (for txnclose hooks)
2390 tagsmod.writediff(changesfile, changes)
2405 tagsmod.writediff(changesfile, changes)
2391
2406
2392 def validate(tr2):
2407 def validate(tr2):
2393 """will run pre-closing hooks"""
2408 """will run pre-closing hooks"""
2394 # XXX the transaction API is a bit lacking here so we take a hacky
2409 # XXX the transaction API is a bit lacking here so we take a hacky
2395 # path for now
2410 # path for now
2396 #
2411 #
2397 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2412 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2398 # dict is copied before these run. In addition we needs the data
2413 # dict is copied before these run. In addition we needs the data
2399 # available to in memory hooks too.
2414 # available to in memory hooks too.
2400 #
2415 #
2401 # Moreover, we also need to make sure this runs before txnclose
2416 # Moreover, we also need to make sure this runs before txnclose
2402 # hooks and there is no "pending" mechanism that would execute
2417 # hooks and there is no "pending" mechanism that would execute
2403 # logic only if hooks are about to run.
2418 # logic only if hooks are about to run.
2404 #
2419 #
2405 # Fixing this limitation of the transaction is also needed to track
2420 # Fixing this limitation of the transaction is also needed to track
2406 # other families of changes (bookmarks, phases, obsolescence).
2421 # other families of changes (bookmarks, phases, obsolescence).
2407 #
2422 #
2408 # This will have to be fixed before we remove the experimental
2423 # This will have to be fixed before we remove the experimental
2409 # gating.
2424 # gating.
2410 tracktags(tr2)
2425 tracktags(tr2)
2411 repo = reporef()
2426 repo = reporef()
2412 assert repo is not None # help pytype
2427 assert repo is not None # help pytype
2413
2428
2414 singleheadopt = (b'experimental', b'single-head-per-branch')
2429 singleheadopt = (b'experimental', b'single-head-per-branch')
2415 singlehead = repo.ui.configbool(*singleheadopt)
2430 singlehead = repo.ui.configbool(*singleheadopt)
2416 if singlehead:
2431 if singlehead:
2417 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2432 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2418 accountclosed = singleheadsub.get(
2433 accountclosed = singleheadsub.get(
2419 b"account-closed-heads", False
2434 b"account-closed-heads", False
2420 )
2435 )
2421 if singleheadsub.get(b"public-changes-only", False):
2436 if singleheadsub.get(b"public-changes-only", False):
2422 filtername = b"immutable"
2437 filtername = b"immutable"
2423 else:
2438 else:
2424 filtername = b"visible"
2439 filtername = b"visible"
2425 scmutil.enforcesinglehead(
2440 scmutil.enforcesinglehead(
2426 repo, tr2, desc, accountclosed, filtername
2441 repo, tr2, desc, accountclosed, filtername
2427 )
2442 )
2428 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2443 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2429 for name, (old, new) in sorted(
2444 for name, (old, new) in sorted(
2430 tr.changes[b'bookmarks'].items()
2445 tr.changes[b'bookmarks'].items()
2431 ):
2446 ):
2432 args = tr.hookargs.copy()
2447 args = tr.hookargs.copy()
2433 args.update(bookmarks.preparehookargs(name, old, new))
2448 args.update(bookmarks.preparehookargs(name, old, new))
2434 repo.hook(
2449 repo.hook(
2435 b'pretxnclose-bookmark',
2450 b'pretxnclose-bookmark',
2436 throw=True,
2451 throw=True,
2437 **pycompat.strkwargs(args)
2452 **pycompat.strkwargs(args)
2438 )
2453 )
2439 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2454 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2440 cl = repo.unfiltered().changelog
2455 cl = repo.unfiltered().changelog
2441 for revs, (old, new) in tr.changes[b'phases']:
2456 for revs, (old, new) in tr.changes[b'phases']:
2442 for rev in revs:
2457 for rev in revs:
2443 args = tr.hookargs.copy()
2458 args = tr.hookargs.copy()
2444 node = hex(cl.node(rev))
2459 node = hex(cl.node(rev))
2445 args.update(phases.preparehookargs(node, old, new))
2460 args.update(phases.preparehookargs(node, old, new))
2446 repo.hook(
2461 repo.hook(
2447 b'pretxnclose-phase',
2462 b'pretxnclose-phase',
2448 throw=True,
2463 throw=True,
2449 **pycompat.strkwargs(args)
2464 **pycompat.strkwargs(args)
2450 )
2465 )
2451
2466
2452 repo.hook(
2467 repo.hook(
2453 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2468 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2454 )
2469 )
2455
2470
2456 def releasefn(tr, success):
2471 def releasefn(tr, success):
2457 repo = reporef()
2472 repo = reporef()
2458 if repo is None:
2473 if repo is None:
2459 # If the repo has been GC'd (and this release function is being
2474 # If the repo has been GC'd (and this release function is being
2460 # called from transaction.__del__), there's not much we can do,
2475 # called from transaction.__del__), there's not much we can do,
2461 # so just leave the unfinished transaction there and let the
2476 # so just leave the unfinished transaction there and let the
2462 # user run `hg recover`.
2477 # user run `hg recover`.
2463 return
2478 return
2464 if success:
2479 if success:
2465 # this should be explicitly invoked here, because
2480 # this should be explicitly invoked here, because
2466 # in-memory changes aren't written out at closing
2481 # in-memory changes aren't written out at closing
2467 # transaction, if tr.addfilegenerator (via
2482 # transaction, if tr.addfilegenerator (via
2468 # dirstate.write or so) isn't invoked while
2483 # dirstate.write or so) isn't invoked while
2469 # transaction running
2484 # transaction running
2470 repo.dirstate.write(None)
2485 repo.dirstate.write(None)
2471 else:
2486 else:
2472 # discard all changes (including ones already written
2487 # discard all changes (including ones already written
2473 # out) in this transaction
2488 # out) in this transaction
2474 narrowspec.restorebackup(self, b'journal.narrowspec')
2489 narrowspec.restorebackup(self, b'journal.narrowspec')
2475 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2490 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2476 repo.dirstate.restorebackup(None, b'journal.dirstate')
2491 repo.dirstate.restorebackup(None, b'journal.dirstate')
2477
2492
2478 repo.invalidate(clearfilecache=True)
2493 repo.invalidate(clearfilecache=True)
2479
2494
2480 tr = transaction.transaction(
2495 tr = transaction.transaction(
2481 rp,
2496 rp,
2482 self.svfs,
2497 self.svfs,
2483 vfsmap,
2498 vfsmap,
2484 b"journal",
2499 b"journal",
2485 b"undo",
2500 b"undo",
2486 aftertrans(renames),
2501 aftertrans(renames),
2487 self.store.createmode,
2502 self.store.createmode,
2488 validator=validate,
2503 validator=validate,
2489 releasefn=releasefn,
2504 releasefn=releasefn,
2490 checkambigfiles=_cachedfiles,
2505 checkambigfiles=_cachedfiles,
2491 name=desc,
2506 name=desc,
2492 )
2507 )
2493 tr.changes[b'origrepolen'] = len(self)
2508 tr.changes[b'origrepolen'] = len(self)
2494 tr.changes[b'obsmarkers'] = set()
2509 tr.changes[b'obsmarkers'] = set()
2495 tr.changes[b'phases'] = []
2510 tr.changes[b'phases'] = []
2496 tr.changes[b'bookmarks'] = {}
2511 tr.changes[b'bookmarks'] = {}
2497
2512
2498 tr.hookargs[b'txnid'] = txnid
2513 tr.hookargs[b'txnid'] = txnid
2499 tr.hookargs[b'txnname'] = desc
2514 tr.hookargs[b'txnname'] = desc
2500 tr.hookargs[b'changes'] = tr.changes
2515 tr.hookargs[b'changes'] = tr.changes
2501 # note: writing the fncache only during finalize mean that the file is
2516 # note: writing the fncache only during finalize mean that the file is
2502 # outdated when running hooks. As fncache is used for streaming clone,
2517 # outdated when running hooks. As fncache is used for streaming clone,
2503 # this is not expected to break anything that happen during the hooks.
2518 # this is not expected to break anything that happen during the hooks.
2504 tr.addfinalize(b'flush-fncache', self.store.write)
2519 tr.addfinalize(b'flush-fncache', self.store.write)
2505
2520
2506 def txnclosehook(tr2):
2521 def txnclosehook(tr2):
2507 """To be run if transaction is successful, will schedule a hook run"""
2522 """To be run if transaction is successful, will schedule a hook run"""
2508 # Don't reference tr2 in hook() so we don't hold a reference.
2523 # Don't reference tr2 in hook() so we don't hold a reference.
2509 # This reduces memory consumption when there are multiple
2524 # This reduces memory consumption when there are multiple
2510 # transactions per lock. This can likely go away if issue5045
2525 # transactions per lock. This can likely go away if issue5045
2511 # fixes the function accumulation.
2526 # fixes the function accumulation.
2512 hookargs = tr2.hookargs
2527 hookargs = tr2.hookargs
2513
2528
2514 def hookfunc(unused_success):
2529 def hookfunc(unused_success):
2515 repo = reporef()
2530 repo = reporef()
2516 assert repo is not None # help pytype
2531 assert repo is not None # help pytype
2517
2532
2518 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2533 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2519 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2534 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2520 for name, (old, new) in bmchanges:
2535 for name, (old, new) in bmchanges:
2521 args = tr.hookargs.copy()
2536 args = tr.hookargs.copy()
2522 args.update(bookmarks.preparehookargs(name, old, new))
2537 args.update(bookmarks.preparehookargs(name, old, new))
2523 repo.hook(
2538 repo.hook(
2524 b'txnclose-bookmark',
2539 b'txnclose-bookmark',
2525 throw=False,
2540 throw=False,
2526 **pycompat.strkwargs(args)
2541 **pycompat.strkwargs(args)
2527 )
2542 )
2528
2543
2529 if hook.hashook(repo.ui, b'txnclose-phase'):
2544 if hook.hashook(repo.ui, b'txnclose-phase'):
2530 cl = repo.unfiltered().changelog
2545 cl = repo.unfiltered().changelog
2531 phasemv = sorted(
2546 phasemv = sorted(
2532 tr.changes[b'phases'], key=lambda r: r[0][0]
2547 tr.changes[b'phases'], key=lambda r: r[0][0]
2533 )
2548 )
2534 for revs, (old, new) in phasemv:
2549 for revs, (old, new) in phasemv:
2535 for rev in revs:
2550 for rev in revs:
2536 args = tr.hookargs.copy()
2551 args = tr.hookargs.copy()
2537 node = hex(cl.node(rev))
2552 node = hex(cl.node(rev))
2538 args.update(phases.preparehookargs(node, old, new))
2553 args.update(phases.preparehookargs(node, old, new))
2539 repo.hook(
2554 repo.hook(
2540 b'txnclose-phase',
2555 b'txnclose-phase',
2541 throw=False,
2556 throw=False,
2542 **pycompat.strkwargs(args)
2557 **pycompat.strkwargs(args)
2543 )
2558 )
2544
2559
2545 repo.hook(
2560 repo.hook(
2546 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2561 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2547 )
2562 )
2548
2563
2549 repo = reporef()
2564 repo = reporef()
2550 assert repo is not None # help pytype
2565 assert repo is not None # help pytype
2551 repo._afterlock(hookfunc)
2566 repo._afterlock(hookfunc)
2552
2567
2553 tr.addfinalize(b'txnclose-hook', txnclosehook)
2568 tr.addfinalize(b'txnclose-hook', txnclosehook)
2554 # Include a leading "-" to make it happen before the transaction summary
2569 # Include a leading "-" to make it happen before the transaction summary
2555 # reports registered via scmutil.registersummarycallback() whose names
2570 # reports registered via scmutil.registersummarycallback() whose names
2556 # are 00-txnreport etc. That way, the caches will be warm when the
2571 # are 00-txnreport etc. That way, the caches will be warm when the
2557 # callbacks run.
2572 # callbacks run.
2558 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2573 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2559
2574
2560 def txnaborthook(tr2):
2575 def txnaborthook(tr2):
2561 """To be run if transaction is aborted"""
2576 """To be run if transaction is aborted"""
2562 repo = reporef()
2577 repo = reporef()
2563 assert repo is not None # help pytype
2578 assert repo is not None # help pytype
2564 repo.hook(
2579 repo.hook(
2565 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2580 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2566 )
2581 )
2567
2582
2568 tr.addabort(b'txnabort-hook', txnaborthook)
2583 tr.addabort(b'txnabort-hook', txnaborthook)
2569 # avoid eager cache invalidation. in-memory data should be identical
2584 # avoid eager cache invalidation. in-memory data should be identical
2570 # to stored data if transaction has no error.
2585 # to stored data if transaction has no error.
2571 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2586 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2572 self._transref = weakref.ref(tr)
2587 self._transref = weakref.ref(tr)
2573 scmutil.registersummarycallback(self, tr, desc)
2588 scmutil.registersummarycallback(self, tr, desc)
2574 return tr
2589 return tr
2575
2590
2576 def _journalfiles(self):
2591 def _journalfiles(self):
2577 return (
2592 return (
2578 (self.svfs, b'journal'),
2593 (self.svfs, b'journal'),
2579 (self.svfs, b'journal.narrowspec'),
2594 (self.svfs, b'journal.narrowspec'),
2580 (self.vfs, b'journal.narrowspec.dirstate'),
2595 (self.vfs, b'journal.narrowspec.dirstate'),
2581 (self.vfs, b'journal.dirstate'),
2596 (self.vfs, b'journal.dirstate'),
2582 (self.vfs, b'journal.branch'),
2597 (self.vfs, b'journal.branch'),
2583 (self.vfs, b'journal.desc'),
2598 (self.vfs, b'journal.desc'),
2584 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2599 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2585 (self.svfs, b'journal.phaseroots'),
2600 (self.svfs, b'journal.phaseroots'),
2586 )
2601 )
2587
2602
2588 def undofiles(self):
2603 def undofiles(self):
2589 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2604 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2590
2605
2591 @unfilteredmethod
2606 @unfilteredmethod
2592 def _writejournal(self, desc):
2607 def _writejournal(self, desc):
2593 self.dirstate.savebackup(None, b'journal.dirstate')
2608 self.dirstate.savebackup(None, b'journal.dirstate')
2594 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2609 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2595 narrowspec.savebackup(self, b'journal.narrowspec')
2610 narrowspec.savebackup(self, b'journal.narrowspec')
2596 self.vfs.write(
2611 self.vfs.write(
2597 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2612 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2598 )
2613 )
2599 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2614 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2600 bookmarksvfs = bookmarks.bookmarksvfs(self)
2615 bookmarksvfs = bookmarks.bookmarksvfs(self)
2601 bookmarksvfs.write(
2616 bookmarksvfs.write(
2602 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2617 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2603 )
2618 )
2604 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2619 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2605
2620
2606 def recover(self):
2621 def recover(self):
2607 with self.lock():
2622 with self.lock():
2608 if self.svfs.exists(b"journal"):
2623 if self.svfs.exists(b"journal"):
2609 self.ui.status(_(b"rolling back interrupted transaction\n"))
2624 self.ui.status(_(b"rolling back interrupted transaction\n"))
2610 vfsmap = {
2625 vfsmap = {
2611 b'': self.svfs,
2626 b'': self.svfs,
2612 b'plain': self.vfs,
2627 b'plain': self.vfs,
2613 }
2628 }
2614 transaction.rollback(
2629 transaction.rollback(
2615 self.svfs,
2630 self.svfs,
2616 vfsmap,
2631 vfsmap,
2617 b"journal",
2632 b"journal",
2618 self.ui.warn,
2633 self.ui.warn,
2619 checkambigfiles=_cachedfiles,
2634 checkambigfiles=_cachedfiles,
2620 )
2635 )
2621 self.invalidate()
2636 self.invalidate()
2622 return True
2637 return True
2623 else:
2638 else:
2624 self.ui.warn(_(b"no interrupted transaction available\n"))
2639 self.ui.warn(_(b"no interrupted transaction available\n"))
2625 return False
2640 return False
2626
2641
2627 def rollback(self, dryrun=False, force=False):
2642 def rollback(self, dryrun=False, force=False):
2628 wlock = lock = dsguard = None
2643 wlock = lock = dsguard = None
2629 try:
2644 try:
2630 wlock = self.wlock()
2645 wlock = self.wlock()
2631 lock = self.lock()
2646 lock = self.lock()
2632 if self.svfs.exists(b"undo"):
2647 if self.svfs.exists(b"undo"):
2633 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2648 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2634
2649
2635 return self._rollback(dryrun, force, dsguard)
2650 return self._rollback(dryrun, force, dsguard)
2636 else:
2651 else:
2637 self.ui.warn(_(b"no rollback information available\n"))
2652 self.ui.warn(_(b"no rollback information available\n"))
2638 return 1
2653 return 1
2639 finally:
2654 finally:
2640 release(dsguard, lock, wlock)
2655 release(dsguard, lock, wlock)
2641
2656
2642 @unfilteredmethod # Until we get smarter cache management
2657 @unfilteredmethod # Until we get smarter cache management
2643 def _rollback(self, dryrun, force, dsguard):
2658 def _rollback(self, dryrun, force, dsguard):
2644 ui = self.ui
2659 ui = self.ui
2645 try:
2660 try:
2646 args = self.vfs.read(b'undo.desc').splitlines()
2661 args = self.vfs.read(b'undo.desc').splitlines()
2647 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2662 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2648 if len(args) >= 3:
2663 if len(args) >= 3:
2649 detail = args[2]
2664 detail = args[2]
2650 oldtip = oldlen - 1
2665 oldtip = oldlen - 1
2651
2666
2652 if detail and ui.verbose:
2667 if detail and ui.verbose:
2653 msg = _(
2668 msg = _(
2654 b'repository tip rolled back to revision %d'
2669 b'repository tip rolled back to revision %d'
2655 b' (undo %s: %s)\n'
2670 b' (undo %s: %s)\n'
2656 ) % (oldtip, desc, detail)
2671 ) % (oldtip, desc, detail)
2657 else:
2672 else:
2658 msg = _(
2673 msg = _(
2659 b'repository tip rolled back to revision %d (undo %s)\n'
2674 b'repository tip rolled back to revision %d (undo %s)\n'
2660 ) % (oldtip, desc)
2675 ) % (oldtip, desc)
2661 except IOError:
2676 except IOError:
2662 msg = _(b'rolling back unknown transaction\n')
2677 msg = _(b'rolling back unknown transaction\n')
2663 desc = None
2678 desc = None
2664
2679
2665 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2680 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2666 raise error.Abort(
2681 raise error.Abort(
2667 _(
2682 _(
2668 b'rollback of last commit while not checked out '
2683 b'rollback of last commit while not checked out '
2669 b'may lose data'
2684 b'may lose data'
2670 ),
2685 ),
2671 hint=_(b'use -f to force'),
2686 hint=_(b'use -f to force'),
2672 )
2687 )
2673
2688
2674 ui.status(msg)
2689 ui.status(msg)
2675 if dryrun:
2690 if dryrun:
2676 return 0
2691 return 0
2677
2692
2678 parents = self.dirstate.parents()
2693 parents = self.dirstate.parents()
2679 self.destroying()
2694 self.destroying()
2680 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2695 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2681 transaction.rollback(
2696 transaction.rollback(
2682 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2697 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2683 )
2698 )
2684 bookmarksvfs = bookmarks.bookmarksvfs(self)
2699 bookmarksvfs = bookmarks.bookmarksvfs(self)
2685 if bookmarksvfs.exists(b'undo.bookmarks'):
2700 if bookmarksvfs.exists(b'undo.bookmarks'):
2686 bookmarksvfs.rename(
2701 bookmarksvfs.rename(
2687 b'undo.bookmarks', b'bookmarks', checkambig=True
2702 b'undo.bookmarks', b'bookmarks', checkambig=True
2688 )
2703 )
2689 if self.svfs.exists(b'undo.phaseroots'):
2704 if self.svfs.exists(b'undo.phaseroots'):
2690 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2705 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2691 self.invalidate()
2706 self.invalidate()
2692
2707
2693 has_node = self.changelog.index.has_node
2708 has_node = self.changelog.index.has_node
2694 parentgone = any(not has_node(p) for p in parents)
2709 parentgone = any(not has_node(p) for p in parents)
2695 if parentgone:
2710 if parentgone:
2696 # prevent dirstateguard from overwriting already restored one
2711 # prevent dirstateguard from overwriting already restored one
2697 dsguard.close()
2712 dsguard.close()
2698
2713
2699 narrowspec.restorebackup(self, b'undo.narrowspec')
2714 narrowspec.restorebackup(self, b'undo.narrowspec')
2700 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2715 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2701 self.dirstate.restorebackup(None, b'undo.dirstate')
2716 self.dirstate.restorebackup(None, b'undo.dirstate')
2702 try:
2717 try:
2703 branch = self.vfs.read(b'undo.branch')
2718 branch = self.vfs.read(b'undo.branch')
2704 self.dirstate.setbranch(encoding.tolocal(branch))
2719 self.dirstate.setbranch(encoding.tolocal(branch))
2705 except IOError:
2720 except IOError:
2706 ui.warn(
2721 ui.warn(
2707 _(
2722 _(
2708 b'named branch could not be reset: '
2723 b'named branch could not be reset: '
2709 b'current branch is still \'%s\'\n'
2724 b'current branch is still \'%s\'\n'
2710 )
2725 )
2711 % self.dirstate.branch()
2726 % self.dirstate.branch()
2712 )
2727 )
2713
2728
2714 parents = tuple([p.rev() for p in self[None].parents()])
2729 parents = tuple([p.rev() for p in self[None].parents()])
2715 if len(parents) > 1:
2730 if len(parents) > 1:
2716 ui.status(
2731 ui.status(
2717 _(
2732 _(
2718 b'working directory now based on '
2733 b'working directory now based on '
2719 b'revisions %d and %d\n'
2734 b'revisions %d and %d\n'
2720 )
2735 )
2721 % parents
2736 % parents
2722 )
2737 )
2723 else:
2738 else:
2724 ui.status(
2739 ui.status(
2725 _(b'working directory now based on revision %d\n') % parents
2740 _(b'working directory now based on revision %d\n') % parents
2726 )
2741 )
2727 mergestatemod.mergestate.clean(self)
2742 mergestatemod.mergestate.clean(self)
2728
2743
2729 # TODO: if we know which new heads may result from this rollback, pass
2744 # TODO: if we know which new heads may result from this rollback, pass
2730 # them to destroy(), which will prevent the branchhead cache from being
2745 # them to destroy(), which will prevent the branchhead cache from being
2731 # invalidated.
2746 # invalidated.
2732 self.destroyed()
2747 self.destroyed()
2733 return 0
2748 return 0
2734
2749
2735 def _buildcacheupdater(self, newtransaction):
2750 def _buildcacheupdater(self, newtransaction):
2736 """called during transaction to build the callback updating cache
2751 """called during transaction to build the callback updating cache
2737
2752
2738 Lives on the repository to help extension who might want to augment
2753 Lives on the repository to help extension who might want to augment
2739 this logic. For this purpose, the created transaction is passed to the
2754 this logic. For this purpose, the created transaction is passed to the
2740 method.
2755 method.
2741 """
2756 """
2742 # we must avoid cyclic reference between repo and transaction.
2757 # we must avoid cyclic reference between repo and transaction.
2743 reporef = weakref.ref(self)
2758 reporef = weakref.ref(self)
2744
2759
2745 def updater(tr):
2760 def updater(tr):
2746 repo = reporef()
2761 repo = reporef()
2747 assert repo is not None # help pytype
2762 assert repo is not None # help pytype
2748 repo.updatecaches(tr)
2763 repo.updatecaches(tr)
2749
2764
2750 return updater
2765 return updater
2751
2766
2752 @unfilteredmethod
2767 @unfilteredmethod
2753 def updatecaches(self, tr=None, full=False, caches=None):
2768 def updatecaches(self, tr=None, full=False, caches=None):
2754 """warm appropriate caches
2769 """warm appropriate caches
2755
2770
2756 If this function is called after a transaction closed. The transaction
2771 If this function is called after a transaction closed. The transaction
2757 will be available in the 'tr' argument. This can be used to selectively
2772 will be available in the 'tr' argument. This can be used to selectively
2758 update caches relevant to the changes in that transaction.
2773 update caches relevant to the changes in that transaction.
2759
2774
2760 If 'full' is set, make sure all caches the function knows about have
2775 If 'full' is set, make sure all caches the function knows about have
2761 up-to-date data. Even the ones usually loaded more lazily.
2776 up-to-date data. Even the ones usually loaded more lazily.
2762
2777
2763 The `full` argument can take a special "post-clone" value. In this case
2778 The `full` argument can take a special "post-clone" value. In this case
2764 the cache warming is made after a clone and of the slower cache might
2779 the cache warming is made after a clone and of the slower cache might
2765 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2780 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2766 as we plan for a cleaner way to deal with this for 5.9.
2781 as we plan for a cleaner way to deal with this for 5.9.
2767 """
2782 """
2768 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2783 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2769 # During strip, many caches are invalid but
2784 # During strip, many caches are invalid but
2770 # later call to `destroyed` will refresh them.
2785 # later call to `destroyed` will refresh them.
2771 return
2786 return
2772
2787
2773 unfi = self.unfiltered()
2788 unfi = self.unfiltered()
2774
2789
2775 if full:
2790 if full:
2776 msg = (
2791 msg = (
2777 "`full` argument for `repo.updatecaches` is deprecated\n"
2792 "`full` argument for `repo.updatecaches` is deprecated\n"
2778 "(use `caches=repository.CACHE_ALL` instead)"
2793 "(use `caches=repository.CACHE_ALL` instead)"
2779 )
2794 )
2780 self.ui.deprecwarn(msg, b"5.9")
2795 self.ui.deprecwarn(msg, b"5.9")
2781 caches = repository.CACHES_ALL
2796 caches = repository.CACHES_ALL
2782 if full == b"post-clone":
2797 if full == b"post-clone":
2783 caches = repository.CACHES_POST_CLONE
2798 caches = repository.CACHES_POST_CLONE
2784 caches = repository.CACHES_ALL
2799 caches = repository.CACHES_ALL
2785 elif caches is None:
2800 elif caches is None:
2786 caches = repository.CACHES_DEFAULT
2801 caches = repository.CACHES_DEFAULT
2787
2802
2788 if repository.CACHE_BRANCHMAP_SERVED in caches:
2803 if repository.CACHE_BRANCHMAP_SERVED in caches:
2789 if tr is None or tr.changes[b'origrepolen'] < len(self):
2804 if tr is None or tr.changes[b'origrepolen'] < len(self):
2790 # accessing the 'served' branchmap should refresh all the others,
2805 # accessing the 'served' branchmap should refresh all the others,
2791 self.ui.debug(b'updating the branch cache\n')
2806 self.ui.debug(b'updating the branch cache\n')
2792 self.filtered(b'served').branchmap()
2807 self.filtered(b'served').branchmap()
2793 self.filtered(b'served.hidden').branchmap()
2808 self.filtered(b'served.hidden').branchmap()
2794
2809
2795 if repository.CACHE_CHANGELOG_CACHE in caches:
2810 if repository.CACHE_CHANGELOG_CACHE in caches:
2796 self.changelog.update_caches(transaction=tr)
2811 self.changelog.update_caches(transaction=tr)
2797
2812
2798 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2813 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2799 self.manifestlog.update_caches(transaction=tr)
2814 self.manifestlog.update_caches(transaction=tr)
2800
2815
2801 if repository.CACHE_REV_BRANCH in caches:
2816 if repository.CACHE_REV_BRANCH in caches:
2802 rbc = unfi.revbranchcache()
2817 rbc = unfi.revbranchcache()
2803 for r in unfi.changelog:
2818 for r in unfi.changelog:
2804 rbc.branchinfo(r)
2819 rbc.branchinfo(r)
2805 rbc.write()
2820 rbc.write()
2806
2821
2807 if repository.CACHE_FULL_MANIFEST in caches:
2822 if repository.CACHE_FULL_MANIFEST in caches:
2808 # ensure the working copy parents are in the manifestfulltextcache
2823 # ensure the working copy parents are in the manifestfulltextcache
2809 for ctx in self[b'.'].parents():
2824 for ctx in self[b'.'].parents():
2810 ctx.manifest() # accessing the manifest is enough
2825 ctx.manifest() # accessing the manifest is enough
2811
2826
2812 if repository.CACHE_FILE_NODE_TAGS in caches:
2827 if repository.CACHE_FILE_NODE_TAGS in caches:
2813 # accessing fnode cache warms the cache
2828 # accessing fnode cache warms the cache
2814 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2829 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2815
2830
2816 if repository.CACHE_TAGS_DEFAULT in caches:
2831 if repository.CACHE_TAGS_DEFAULT in caches:
2817 # accessing tags warm the cache
2832 # accessing tags warm the cache
2818 self.tags()
2833 self.tags()
2819 if repository.CACHE_TAGS_SERVED in caches:
2834 if repository.CACHE_TAGS_SERVED in caches:
2820 self.filtered(b'served').tags()
2835 self.filtered(b'served').tags()
2821
2836
2822 if repository.CACHE_BRANCHMAP_ALL in caches:
2837 if repository.CACHE_BRANCHMAP_ALL in caches:
2823 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2838 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2824 # so we're forcing a write to cause these caches to be warmed up
2839 # so we're forcing a write to cause these caches to be warmed up
2825 # even if they haven't explicitly been requested yet (if they've
2840 # even if they haven't explicitly been requested yet (if they've
2826 # never been used by hg, they won't ever have been written, even if
2841 # never been used by hg, they won't ever have been written, even if
2827 # they're a subset of another kind of cache that *has* been used).
2842 # they're a subset of another kind of cache that *has* been used).
2828 for filt in repoview.filtertable.keys():
2843 for filt in repoview.filtertable.keys():
2829 filtered = self.filtered(filt)
2844 filtered = self.filtered(filt)
2830 filtered.branchmap().write(filtered)
2845 filtered.branchmap().write(filtered)
2831
2846
2832 def invalidatecaches(self):
2847 def invalidatecaches(self):
2833
2848
2834 if '_tagscache' in vars(self):
2849 if '_tagscache' in vars(self):
2835 # can't use delattr on proxy
2850 # can't use delattr on proxy
2836 del self.__dict__['_tagscache']
2851 del self.__dict__['_tagscache']
2837
2852
2838 self._branchcaches.clear()
2853 self._branchcaches.clear()
2839 self.invalidatevolatilesets()
2854 self.invalidatevolatilesets()
2840 self._sparsesignaturecache.clear()
2855 self._sparsesignaturecache.clear()
2841
2856
2842 def invalidatevolatilesets(self):
2857 def invalidatevolatilesets(self):
2843 self.filteredrevcache.clear()
2858 self.filteredrevcache.clear()
2844 obsolete.clearobscaches(self)
2859 obsolete.clearobscaches(self)
2845 self._quick_access_changeid_invalidate()
2860 self._quick_access_changeid_invalidate()
2846
2861
2847 def invalidatedirstate(self):
2862 def invalidatedirstate(self):
2848 """Invalidates the dirstate, causing the next call to dirstate
2863 """Invalidates the dirstate, causing the next call to dirstate
2849 to check if it was modified since the last time it was read,
2864 to check if it was modified since the last time it was read,
2850 rereading it if it has.
2865 rereading it if it has.
2851
2866
2852 This is different to dirstate.invalidate() that it doesn't always
2867 This is different to dirstate.invalidate() that it doesn't always
2853 rereads the dirstate. Use dirstate.invalidate() if you want to
2868 rereads the dirstate. Use dirstate.invalidate() if you want to
2854 explicitly read the dirstate again (i.e. restoring it to a previous
2869 explicitly read the dirstate again (i.e. restoring it to a previous
2855 known good state)."""
2870 known good state)."""
2856 if hasunfilteredcache(self, 'dirstate'):
2871 if hasunfilteredcache(self, 'dirstate'):
2857 for k in self.dirstate._filecache:
2872 for k in self.dirstate._filecache:
2858 try:
2873 try:
2859 delattr(self.dirstate, k)
2874 delattr(self.dirstate, k)
2860 except AttributeError:
2875 except AttributeError:
2861 pass
2876 pass
2862 delattr(self.unfiltered(), 'dirstate')
2877 delattr(self.unfiltered(), 'dirstate')
2863
2878
2864 def invalidate(self, clearfilecache=False):
2879 def invalidate(self, clearfilecache=False):
2865 """Invalidates both store and non-store parts other than dirstate
2880 """Invalidates both store and non-store parts other than dirstate
2866
2881
2867 If a transaction is running, invalidation of store is omitted,
2882 If a transaction is running, invalidation of store is omitted,
2868 because discarding in-memory changes might cause inconsistency
2883 because discarding in-memory changes might cause inconsistency
2869 (e.g. incomplete fncache causes unintentional failure, but
2884 (e.g. incomplete fncache causes unintentional failure, but
2870 redundant one doesn't).
2885 redundant one doesn't).
2871 """
2886 """
2872 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2887 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2873 for k in list(self._filecache.keys()):
2888 for k in list(self._filecache.keys()):
2874 # dirstate is invalidated separately in invalidatedirstate()
2889 # dirstate is invalidated separately in invalidatedirstate()
2875 if k == b'dirstate':
2890 if k == b'dirstate':
2876 continue
2891 continue
2877 if (
2892 if (
2878 k == b'changelog'
2893 k == b'changelog'
2879 and self.currenttransaction()
2894 and self.currenttransaction()
2880 and self.changelog._delayed
2895 and self.changelog._delayed
2881 ):
2896 ):
2882 # The changelog object may store unwritten revisions. We don't
2897 # The changelog object may store unwritten revisions. We don't
2883 # want to lose them.
2898 # want to lose them.
2884 # TODO: Solve the problem instead of working around it.
2899 # TODO: Solve the problem instead of working around it.
2885 continue
2900 continue
2886
2901
2887 if clearfilecache:
2902 if clearfilecache:
2888 del self._filecache[k]
2903 del self._filecache[k]
2889 try:
2904 try:
2890 delattr(unfiltered, k)
2905 delattr(unfiltered, k)
2891 except AttributeError:
2906 except AttributeError:
2892 pass
2907 pass
2893 self.invalidatecaches()
2908 self.invalidatecaches()
2894 if not self.currenttransaction():
2909 if not self.currenttransaction():
2895 # TODO: Changing contents of store outside transaction
2910 # TODO: Changing contents of store outside transaction
2896 # causes inconsistency. We should make in-memory store
2911 # causes inconsistency. We should make in-memory store
2897 # changes detectable, and abort if changed.
2912 # changes detectable, and abort if changed.
2898 self.store.invalidatecaches()
2913 self.store.invalidatecaches()
2899
2914
2900 def invalidateall(self):
2915 def invalidateall(self):
2901 """Fully invalidates both store and non-store parts, causing the
2916 """Fully invalidates both store and non-store parts, causing the
2902 subsequent operation to reread any outside changes."""
2917 subsequent operation to reread any outside changes."""
2903 # extension should hook this to invalidate its caches
2918 # extension should hook this to invalidate its caches
2904 self.invalidate()
2919 self.invalidate()
2905 self.invalidatedirstate()
2920 self.invalidatedirstate()
2906
2921
2907 @unfilteredmethod
2922 @unfilteredmethod
2908 def _refreshfilecachestats(self, tr):
2923 def _refreshfilecachestats(self, tr):
2909 """Reload stats of cached files so that they are flagged as valid"""
2924 """Reload stats of cached files so that they are flagged as valid"""
2910 for k, ce in self._filecache.items():
2925 for k, ce in self._filecache.items():
2911 k = pycompat.sysstr(k)
2926 k = pycompat.sysstr(k)
2912 if k == 'dirstate' or k not in self.__dict__:
2927 if k == 'dirstate' or k not in self.__dict__:
2913 continue
2928 continue
2914 ce.refresh()
2929 ce.refresh()
2915
2930
2916 def _lock(
2931 def _lock(
2917 self,
2932 self,
2918 vfs,
2933 vfs,
2919 lockname,
2934 lockname,
2920 wait,
2935 wait,
2921 releasefn,
2936 releasefn,
2922 acquirefn,
2937 acquirefn,
2923 desc,
2938 desc,
2924 ):
2939 ):
2925 timeout = 0
2940 timeout = 0
2926 warntimeout = 0
2941 warntimeout = 0
2927 if wait:
2942 if wait:
2928 timeout = self.ui.configint(b"ui", b"timeout")
2943 timeout = self.ui.configint(b"ui", b"timeout")
2929 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2944 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2930 # internal config: ui.signal-safe-lock
2945 # internal config: ui.signal-safe-lock
2931 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2946 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2932
2947
2933 l = lockmod.trylock(
2948 l = lockmod.trylock(
2934 self.ui,
2949 self.ui,
2935 vfs,
2950 vfs,
2936 lockname,
2951 lockname,
2937 timeout,
2952 timeout,
2938 warntimeout,
2953 warntimeout,
2939 releasefn=releasefn,
2954 releasefn=releasefn,
2940 acquirefn=acquirefn,
2955 acquirefn=acquirefn,
2941 desc=desc,
2956 desc=desc,
2942 signalsafe=signalsafe,
2957 signalsafe=signalsafe,
2943 )
2958 )
2944 return l
2959 return l
2945
2960
2946 def _afterlock(self, callback):
2961 def _afterlock(self, callback):
2947 """add a callback to be run when the repository is fully unlocked
2962 """add a callback to be run when the repository is fully unlocked
2948
2963
2949 The callback will be executed when the outermost lock is released
2964 The callback will be executed when the outermost lock is released
2950 (with wlock being higher level than 'lock')."""
2965 (with wlock being higher level than 'lock')."""
2951 for ref in (self._wlockref, self._lockref):
2966 for ref in (self._wlockref, self._lockref):
2952 l = ref and ref()
2967 l = ref and ref()
2953 if l and l.held:
2968 if l and l.held:
2954 l.postrelease.append(callback)
2969 l.postrelease.append(callback)
2955 break
2970 break
2956 else: # no lock have been found.
2971 else: # no lock have been found.
2957 callback(True)
2972 callback(True)
2958
2973
2959 def lock(self, wait=True):
2974 def lock(self, wait=True):
2960 """Lock the repository store (.hg/store) and return a weak reference
2975 """Lock the repository store (.hg/store) and return a weak reference
2961 to the lock. Use this before modifying the store (e.g. committing or
2976 to the lock. Use this before modifying the store (e.g. committing or
2962 stripping). If you are opening a transaction, get a lock as well.)
2977 stripping). If you are opening a transaction, get a lock as well.)
2963
2978
2964 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2979 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2965 'wlock' first to avoid a dead-lock hazard."""
2980 'wlock' first to avoid a dead-lock hazard."""
2966 l = self._currentlock(self._lockref)
2981 l = self._currentlock(self._lockref)
2967 if l is not None:
2982 if l is not None:
2968 l.lock()
2983 l.lock()
2969 return l
2984 return l
2970
2985
2971 l = self._lock(
2986 l = self._lock(
2972 vfs=self.svfs,
2987 vfs=self.svfs,
2973 lockname=b"lock",
2988 lockname=b"lock",
2974 wait=wait,
2989 wait=wait,
2975 releasefn=None,
2990 releasefn=None,
2976 acquirefn=self.invalidate,
2991 acquirefn=self.invalidate,
2977 desc=_(b'repository %s') % self.origroot,
2992 desc=_(b'repository %s') % self.origroot,
2978 )
2993 )
2979 self._lockref = weakref.ref(l)
2994 self._lockref = weakref.ref(l)
2980 return l
2995 return l
2981
2996
2982 def wlock(self, wait=True):
2997 def wlock(self, wait=True):
2983 """Lock the non-store parts of the repository (everything under
2998 """Lock the non-store parts of the repository (everything under
2984 .hg except .hg/store) and return a weak reference to the lock.
2999 .hg except .hg/store) and return a weak reference to the lock.
2985
3000
2986 Use this before modifying files in .hg.
3001 Use this before modifying files in .hg.
2987
3002
2988 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3003 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2989 'wlock' first to avoid a dead-lock hazard."""
3004 'wlock' first to avoid a dead-lock hazard."""
2990 l = self._wlockref() if self._wlockref else None
3005 l = self._wlockref() if self._wlockref else None
2991 if l is not None and l.held:
3006 if l is not None and l.held:
2992 l.lock()
3007 l.lock()
2993 return l
3008 return l
2994
3009
2995 # We do not need to check for non-waiting lock acquisition. Such
3010 # We do not need to check for non-waiting lock acquisition. Such
2996 # acquisition would not cause dead-lock as they would just fail.
3011 # acquisition would not cause dead-lock as they would just fail.
2997 if wait and (
3012 if wait and (
2998 self.ui.configbool(b'devel', b'all-warnings')
3013 self.ui.configbool(b'devel', b'all-warnings')
2999 or self.ui.configbool(b'devel', b'check-locks')
3014 or self.ui.configbool(b'devel', b'check-locks')
3000 ):
3015 ):
3001 if self._currentlock(self._lockref) is not None:
3016 if self._currentlock(self._lockref) is not None:
3002 self.ui.develwarn(b'"wlock" acquired after "lock"')
3017 self.ui.develwarn(b'"wlock" acquired after "lock"')
3003
3018
3004 def unlock():
3019 def unlock():
3005 if self.dirstate.pendingparentchange():
3020 if self.dirstate.pendingparentchange():
3006 self.dirstate.invalidate()
3021 self.dirstate.invalidate()
3007 else:
3022 else:
3008 self.dirstate.write(None)
3023 self.dirstate.write(None)
3009
3024
3010 self._filecache[b'dirstate'].refresh()
3025 self._filecache[b'dirstate'].refresh()
3011
3026
3012 l = self._lock(
3027 l = self._lock(
3013 self.vfs,
3028 self.vfs,
3014 b"wlock",
3029 b"wlock",
3015 wait,
3030 wait,
3016 unlock,
3031 unlock,
3017 self.invalidatedirstate,
3032 self.invalidatedirstate,
3018 _(b'working directory of %s') % self.origroot,
3033 _(b'working directory of %s') % self.origroot,
3019 )
3034 )
3020 self._wlockref = weakref.ref(l)
3035 self._wlockref = weakref.ref(l)
3021 return l
3036 return l
3022
3037
3023 def _currentlock(self, lockref):
3038 def _currentlock(self, lockref):
3024 """Returns the lock if it's held, or None if it's not."""
3039 """Returns the lock if it's held, or None if it's not."""
3025 if lockref is None:
3040 if lockref is None:
3026 return None
3041 return None
3027 l = lockref()
3042 l = lockref()
3028 if l is None or not l.held:
3043 if l is None or not l.held:
3029 return None
3044 return None
3030 return l
3045 return l
3031
3046
3032 def currentwlock(self):
3047 def currentwlock(self):
3033 """Returns the wlock if it's held, or None if it's not."""
3048 """Returns the wlock if it's held, or None if it's not."""
3034 return self._currentlock(self._wlockref)
3049 return self._currentlock(self._wlockref)
3035
3050
3036 def checkcommitpatterns(self, wctx, match, status, fail):
3051 def checkcommitpatterns(self, wctx, match, status, fail):
3037 """check for commit arguments that aren't committable"""
3052 """check for commit arguments that aren't committable"""
3038 if match.isexact() or match.prefix():
3053 if match.isexact() or match.prefix():
3039 matched = set(status.modified + status.added + status.removed)
3054 matched = set(status.modified + status.added + status.removed)
3040
3055
3041 for f in match.files():
3056 for f in match.files():
3042 f = self.dirstate.normalize(f)
3057 f = self.dirstate.normalize(f)
3043 if f == b'.' or f in matched or f in wctx.substate:
3058 if f == b'.' or f in matched or f in wctx.substate:
3044 continue
3059 continue
3045 if f in status.deleted:
3060 if f in status.deleted:
3046 fail(f, _(b'file not found!'))
3061 fail(f, _(b'file not found!'))
3047 # Is it a directory that exists or used to exist?
3062 # Is it a directory that exists or used to exist?
3048 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3063 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3049 d = f + b'/'
3064 d = f + b'/'
3050 for mf in matched:
3065 for mf in matched:
3051 if mf.startswith(d):
3066 if mf.startswith(d):
3052 break
3067 break
3053 else:
3068 else:
3054 fail(f, _(b"no match under directory!"))
3069 fail(f, _(b"no match under directory!"))
3055 elif f not in self.dirstate:
3070 elif f not in self.dirstate:
3056 fail(f, _(b"file not tracked!"))
3071 fail(f, _(b"file not tracked!"))
3057
3072
3058 @unfilteredmethod
3073 @unfilteredmethod
3059 def commit(
3074 def commit(
3060 self,
3075 self,
3061 text=b"",
3076 text=b"",
3062 user=None,
3077 user=None,
3063 date=None,
3078 date=None,
3064 match=None,
3079 match=None,
3065 force=False,
3080 force=False,
3066 editor=None,
3081 editor=None,
3067 extra=None,
3082 extra=None,
3068 ):
3083 ):
3069 """Add a new revision to current repository.
3084 """Add a new revision to current repository.
3070
3085
3071 Revision information is gathered from the working directory,
3086 Revision information is gathered from the working directory,
3072 match can be used to filter the committed files. If editor is
3087 match can be used to filter the committed files. If editor is
3073 supplied, it is called to get a commit message.
3088 supplied, it is called to get a commit message.
3074 """
3089 """
3075 if extra is None:
3090 if extra is None:
3076 extra = {}
3091 extra = {}
3077
3092
3078 def fail(f, msg):
3093 def fail(f, msg):
3079 raise error.InputError(b'%s: %s' % (f, msg))
3094 raise error.InputError(b'%s: %s' % (f, msg))
3080
3095
3081 if not match:
3096 if not match:
3082 match = matchmod.always()
3097 match = matchmod.always()
3083
3098
3084 if not force:
3099 if not force:
3085 match.bad = fail
3100 match.bad = fail
3086
3101
3087 # lock() for recent changelog (see issue4368)
3102 # lock() for recent changelog (see issue4368)
3088 with self.wlock(), self.lock():
3103 with self.wlock(), self.lock():
3089 wctx = self[None]
3104 wctx = self[None]
3090 merge = len(wctx.parents()) > 1
3105 merge = len(wctx.parents()) > 1
3091
3106
3092 if not force and merge and not match.always():
3107 if not force and merge and not match.always():
3093 raise error.Abort(
3108 raise error.Abort(
3094 _(
3109 _(
3095 b'cannot partially commit a merge '
3110 b'cannot partially commit a merge '
3096 b'(do not specify files or patterns)'
3111 b'(do not specify files or patterns)'
3097 )
3112 )
3098 )
3113 )
3099
3114
3100 status = self.status(match=match, clean=force)
3115 status = self.status(match=match, clean=force)
3101 if force:
3116 if force:
3102 status.modified.extend(
3117 status.modified.extend(
3103 status.clean
3118 status.clean
3104 ) # mq may commit clean files
3119 ) # mq may commit clean files
3105
3120
3106 # check subrepos
3121 # check subrepos
3107 subs, commitsubs, newstate = subrepoutil.precommit(
3122 subs, commitsubs, newstate = subrepoutil.precommit(
3108 self.ui, wctx, status, match, force=force
3123 self.ui, wctx, status, match, force=force
3109 )
3124 )
3110
3125
3111 # make sure all explicit patterns are matched
3126 # make sure all explicit patterns are matched
3112 if not force:
3127 if not force:
3113 self.checkcommitpatterns(wctx, match, status, fail)
3128 self.checkcommitpatterns(wctx, match, status, fail)
3114
3129
3115 cctx = context.workingcommitctx(
3130 cctx = context.workingcommitctx(
3116 self, status, text, user, date, extra
3131 self, status, text, user, date, extra
3117 )
3132 )
3118
3133
3119 ms = mergestatemod.mergestate.read(self)
3134 ms = mergestatemod.mergestate.read(self)
3120 mergeutil.checkunresolved(ms)
3135 mergeutil.checkunresolved(ms)
3121
3136
3122 # internal config: ui.allowemptycommit
3137 # internal config: ui.allowemptycommit
3123 if cctx.isempty() and not self.ui.configbool(
3138 if cctx.isempty() and not self.ui.configbool(
3124 b'ui', b'allowemptycommit'
3139 b'ui', b'allowemptycommit'
3125 ):
3140 ):
3126 self.ui.debug(b'nothing to commit, clearing merge state\n')
3141 self.ui.debug(b'nothing to commit, clearing merge state\n')
3127 ms.reset()
3142 ms.reset()
3128 return None
3143 return None
3129
3144
3130 if merge and cctx.deleted():
3145 if merge and cctx.deleted():
3131 raise error.Abort(_(b"cannot commit merge with missing files"))
3146 raise error.Abort(_(b"cannot commit merge with missing files"))
3132
3147
3133 if editor:
3148 if editor:
3134 cctx._text = editor(self, cctx, subs)
3149 cctx._text = editor(self, cctx, subs)
3135 edited = text != cctx._text
3150 edited = text != cctx._text
3136
3151
3137 # Save commit message in case this transaction gets rolled back
3152 # Save commit message in case this transaction gets rolled back
3138 # (e.g. by a pretxncommit hook). Leave the content alone on
3153 # (e.g. by a pretxncommit hook). Leave the content alone on
3139 # the assumption that the user will use the same editor again.
3154 # the assumption that the user will use the same editor again.
3140 msgfn = self.savecommitmessage(cctx._text)
3155 msgfn = self.savecommitmessage(cctx._text)
3141
3156
3142 # commit subs and write new state
3157 # commit subs and write new state
3143 if subs:
3158 if subs:
3144 uipathfn = scmutil.getuipathfn(self)
3159 uipathfn = scmutil.getuipathfn(self)
3145 for s in sorted(commitsubs):
3160 for s in sorted(commitsubs):
3146 sub = wctx.sub(s)
3161 sub = wctx.sub(s)
3147 self.ui.status(
3162 self.ui.status(
3148 _(b'committing subrepository %s\n')
3163 _(b'committing subrepository %s\n')
3149 % uipathfn(subrepoutil.subrelpath(sub))
3164 % uipathfn(subrepoutil.subrelpath(sub))
3150 )
3165 )
3151 sr = sub.commit(cctx._text, user, date)
3166 sr = sub.commit(cctx._text, user, date)
3152 newstate[s] = (newstate[s][0], sr)
3167 newstate[s] = (newstate[s][0], sr)
3153 subrepoutil.writestate(self, newstate)
3168 subrepoutil.writestate(self, newstate)
3154
3169
3155 p1, p2 = self.dirstate.parents()
3170 p1, p2 = self.dirstate.parents()
3156 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3171 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3157 try:
3172 try:
3158 self.hook(
3173 self.hook(
3159 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3174 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3160 )
3175 )
3161 with self.transaction(b'commit'):
3176 with self.transaction(b'commit'):
3162 ret = self.commitctx(cctx, True)
3177 ret = self.commitctx(cctx, True)
3163 # update bookmarks, dirstate and mergestate
3178 # update bookmarks, dirstate and mergestate
3164 bookmarks.update(self, [p1, p2], ret)
3179 bookmarks.update(self, [p1, p2], ret)
3165 cctx.markcommitted(ret)
3180 cctx.markcommitted(ret)
3166 ms.reset()
3181 ms.reset()
3167 except: # re-raises
3182 except: # re-raises
3168 if edited:
3183 if edited:
3169 self.ui.write(
3184 self.ui.write(
3170 _(b'note: commit message saved in %s\n') % msgfn
3185 _(b'note: commit message saved in %s\n') % msgfn
3171 )
3186 )
3172 self.ui.write(
3187 self.ui.write(
3173 _(
3188 _(
3174 b"note: use 'hg commit --logfile "
3189 b"note: use 'hg commit --logfile "
3175 b".hg/last-message.txt --edit' to reuse it\n"
3190 b".hg/last-message.txt --edit' to reuse it\n"
3176 )
3191 )
3177 )
3192 )
3178 raise
3193 raise
3179
3194
3180 def commithook(unused_success):
3195 def commithook(unused_success):
3181 # hack for command that use a temporary commit (eg: histedit)
3196 # hack for command that use a temporary commit (eg: histedit)
3182 # temporary commit got stripped before hook release
3197 # temporary commit got stripped before hook release
3183 if self.changelog.hasnode(ret):
3198 if self.changelog.hasnode(ret):
3184 self.hook(
3199 self.hook(
3185 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3200 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3186 )
3201 )
3187
3202
3188 self._afterlock(commithook)
3203 self._afterlock(commithook)
3189 return ret
3204 return ret
3190
3205
3191 @unfilteredmethod
3206 @unfilteredmethod
3192 def commitctx(self, ctx, error=False, origctx=None):
3207 def commitctx(self, ctx, error=False, origctx=None):
3193 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3208 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3194
3209
3195 @unfilteredmethod
3210 @unfilteredmethod
3196 def destroying(self):
3211 def destroying(self):
3197 """Inform the repository that nodes are about to be destroyed.
3212 """Inform the repository that nodes are about to be destroyed.
3198 Intended for use by strip and rollback, so there's a common
3213 Intended for use by strip and rollback, so there's a common
3199 place for anything that has to be done before destroying history.
3214 place for anything that has to be done before destroying history.
3200
3215
3201 This is mostly useful for saving state that is in memory and waiting
3216 This is mostly useful for saving state that is in memory and waiting
3202 to be flushed when the current lock is released. Because a call to
3217 to be flushed when the current lock is released. Because a call to
3203 destroyed is imminent, the repo will be invalidated causing those
3218 destroyed is imminent, the repo will be invalidated causing those
3204 changes to stay in memory (waiting for the next unlock), or vanish
3219 changes to stay in memory (waiting for the next unlock), or vanish
3205 completely.
3220 completely.
3206 """
3221 """
3207 # When using the same lock to commit and strip, the phasecache is left
3222 # When using the same lock to commit and strip, the phasecache is left
3208 # dirty after committing. Then when we strip, the repo is invalidated,
3223 # dirty after committing. Then when we strip, the repo is invalidated,
3209 # causing those changes to disappear.
3224 # causing those changes to disappear.
3210 if '_phasecache' in vars(self):
3225 if '_phasecache' in vars(self):
3211 self._phasecache.write()
3226 self._phasecache.write()
3212
3227
3213 @unfilteredmethod
3228 @unfilteredmethod
3214 def destroyed(self):
3229 def destroyed(self):
3215 """Inform the repository that nodes have been destroyed.
3230 """Inform the repository that nodes have been destroyed.
3216 Intended for use by strip and rollback, so there's a common
3231 Intended for use by strip and rollback, so there's a common
3217 place for anything that has to be done after destroying history.
3232 place for anything that has to be done after destroying history.
3218 """
3233 """
3219 # When one tries to:
3234 # When one tries to:
3220 # 1) destroy nodes thus calling this method (e.g. strip)
3235 # 1) destroy nodes thus calling this method (e.g. strip)
3221 # 2) use phasecache somewhere (e.g. commit)
3236 # 2) use phasecache somewhere (e.g. commit)
3222 #
3237 #
3223 # then 2) will fail because the phasecache contains nodes that were
3238 # then 2) will fail because the phasecache contains nodes that were
3224 # removed. We can either remove phasecache from the filecache,
3239 # removed. We can either remove phasecache from the filecache,
3225 # causing it to reload next time it is accessed, or simply filter
3240 # causing it to reload next time it is accessed, or simply filter
3226 # the removed nodes now and write the updated cache.
3241 # the removed nodes now and write the updated cache.
3227 self._phasecache.filterunknown(self)
3242 self._phasecache.filterunknown(self)
3228 self._phasecache.write()
3243 self._phasecache.write()
3229
3244
3230 # refresh all repository caches
3245 # refresh all repository caches
3231 self.updatecaches()
3246 self.updatecaches()
3232
3247
3233 # Ensure the persistent tag cache is updated. Doing it now
3248 # Ensure the persistent tag cache is updated. Doing it now
3234 # means that the tag cache only has to worry about destroyed
3249 # means that the tag cache only has to worry about destroyed
3235 # heads immediately after a strip/rollback. That in turn
3250 # heads immediately after a strip/rollback. That in turn
3236 # guarantees that "cachetip == currenttip" (comparing both rev
3251 # guarantees that "cachetip == currenttip" (comparing both rev
3237 # and node) always means no nodes have been added or destroyed.
3252 # and node) always means no nodes have been added or destroyed.
3238
3253
3239 # XXX this is suboptimal when qrefresh'ing: we strip the current
3254 # XXX this is suboptimal when qrefresh'ing: we strip the current
3240 # head, refresh the tag cache, then immediately add a new head.
3255 # head, refresh the tag cache, then immediately add a new head.
3241 # But I think doing it this way is necessary for the "instant
3256 # But I think doing it this way is necessary for the "instant
3242 # tag cache retrieval" case to work.
3257 # tag cache retrieval" case to work.
3243 self.invalidate()
3258 self.invalidate()
3244
3259
3245 def status(
3260 def status(
3246 self,
3261 self,
3247 node1=b'.',
3262 node1=b'.',
3248 node2=None,
3263 node2=None,
3249 match=None,
3264 match=None,
3250 ignored=False,
3265 ignored=False,
3251 clean=False,
3266 clean=False,
3252 unknown=False,
3267 unknown=False,
3253 listsubrepos=False,
3268 listsubrepos=False,
3254 ):
3269 ):
3255 '''a convenience method that calls node1.status(node2)'''
3270 '''a convenience method that calls node1.status(node2)'''
3256 return self[node1].status(
3271 return self[node1].status(
3257 node2, match, ignored, clean, unknown, listsubrepos
3272 node2, match, ignored, clean, unknown, listsubrepos
3258 )
3273 )
3259
3274
3260 def addpostdsstatus(self, ps):
3275 def addpostdsstatus(self, ps):
3261 """Add a callback to run within the wlock, at the point at which status
3276 """Add a callback to run within the wlock, at the point at which status
3262 fixups happen.
3277 fixups happen.
3263
3278
3264 On status completion, callback(wctx, status) will be called with the
3279 On status completion, callback(wctx, status) will be called with the
3265 wlock held, unless the dirstate has changed from underneath or the wlock
3280 wlock held, unless the dirstate has changed from underneath or the wlock
3266 couldn't be grabbed.
3281 couldn't be grabbed.
3267
3282
3268 Callbacks should not capture and use a cached copy of the dirstate --
3283 Callbacks should not capture and use a cached copy of the dirstate --
3269 it might change in the meanwhile. Instead, they should access the
3284 it might change in the meanwhile. Instead, they should access the
3270 dirstate via wctx.repo().dirstate.
3285 dirstate via wctx.repo().dirstate.
3271
3286
3272 This list is emptied out after each status run -- extensions should
3287 This list is emptied out after each status run -- extensions should
3273 make sure it adds to this list each time dirstate.status is called.
3288 make sure it adds to this list each time dirstate.status is called.
3274 Extensions should also make sure they don't call this for statuses
3289 Extensions should also make sure they don't call this for statuses
3275 that don't involve the dirstate.
3290 that don't involve the dirstate.
3276 """
3291 """
3277
3292
3278 # The list is located here for uniqueness reasons -- it is actually
3293 # The list is located here for uniqueness reasons -- it is actually
3279 # managed by the workingctx, but that isn't unique per-repo.
3294 # managed by the workingctx, but that isn't unique per-repo.
3280 self._postdsstatus.append(ps)
3295 self._postdsstatus.append(ps)
3281
3296
3282 def postdsstatus(self):
3297 def postdsstatus(self):
3283 """Used by workingctx to get the list of post-dirstate-status hooks."""
3298 """Used by workingctx to get the list of post-dirstate-status hooks."""
3284 return self._postdsstatus
3299 return self._postdsstatus
3285
3300
3286 def clearpostdsstatus(self):
3301 def clearpostdsstatus(self):
3287 """Used by workingctx to clear post-dirstate-status hooks."""
3302 """Used by workingctx to clear post-dirstate-status hooks."""
3288 del self._postdsstatus[:]
3303 del self._postdsstatus[:]
3289
3304
3290 def heads(self, start=None):
3305 def heads(self, start=None):
3291 if start is None:
3306 if start is None:
3292 cl = self.changelog
3307 cl = self.changelog
3293 headrevs = reversed(cl.headrevs())
3308 headrevs = reversed(cl.headrevs())
3294 return [cl.node(rev) for rev in headrevs]
3309 return [cl.node(rev) for rev in headrevs]
3295
3310
3296 heads = self.changelog.heads(start)
3311 heads = self.changelog.heads(start)
3297 # sort the output in rev descending order
3312 # sort the output in rev descending order
3298 return sorted(heads, key=self.changelog.rev, reverse=True)
3313 return sorted(heads, key=self.changelog.rev, reverse=True)
3299
3314
3300 def branchheads(self, branch=None, start=None, closed=False):
3315 def branchheads(self, branch=None, start=None, closed=False):
3301 """return a (possibly filtered) list of heads for the given branch
3316 """return a (possibly filtered) list of heads for the given branch
3302
3317
3303 Heads are returned in topological order, from newest to oldest.
3318 Heads are returned in topological order, from newest to oldest.
3304 If branch is None, use the dirstate branch.
3319 If branch is None, use the dirstate branch.
3305 If start is not None, return only heads reachable from start.
3320 If start is not None, return only heads reachable from start.
3306 If closed is True, return heads that are marked as closed as well.
3321 If closed is True, return heads that are marked as closed as well.
3307 """
3322 """
3308 if branch is None:
3323 if branch is None:
3309 branch = self[None].branch()
3324 branch = self[None].branch()
3310 branches = self.branchmap()
3325 branches = self.branchmap()
3311 if not branches.hasbranch(branch):
3326 if not branches.hasbranch(branch):
3312 return []
3327 return []
3313 # the cache returns heads ordered lowest to highest
3328 # the cache returns heads ordered lowest to highest
3314 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3329 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3315 if start is not None:
3330 if start is not None:
3316 # filter out the heads that cannot be reached from startrev
3331 # filter out the heads that cannot be reached from startrev
3317 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3332 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3318 bheads = [h for h in bheads if h in fbheads]
3333 bheads = [h for h in bheads if h in fbheads]
3319 return bheads
3334 return bheads
3320
3335
3321 def branches(self, nodes):
3336 def branches(self, nodes):
3322 if not nodes:
3337 if not nodes:
3323 nodes = [self.changelog.tip()]
3338 nodes = [self.changelog.tip()]
3324 b = []
3339 b = []
3325 for n in nodes:
3340 for n in nodes:
3326 t = n
3341 t = n
3327 while True:
3342 while True:
3328 p = self.changelog.parents(n)
3343 p = self.changelog.parents(n)
3329 if p[1] != self.nullid or p[0] == self.nullid:
3344 if p[1] != self.nullid or p[0] == self.nullid:
3330 b.append((t, n, p[0], p[1]))
3345 b.append((t, n, p[0], p[1]))
3331 break
3346 break
3332 n = p[0]
3347 n = p[0]
3333 return b
3348 return b
3334
3349
3335 def between(self, pairs):
3350 def between(self, pairs):
3336 r = []
3351 r = []
3337
3352
3338 for top, bottom in pairs:
3353 for top, bottom in pairs:
3339 n, l, i = top, [], 0
3354 n, l, i = top, [], 0
3340 f = 1
3355 f = 1
3341
3356
3342 while n != bottom and n != self.nullid:
3357 while n != bottom and n != self.nullid:
3343 p = self.changelog.parents(n)[0]
3358 p = self.changelog.parents(n)[0]
3344 if i == f:
3359 if i == f:
3345 l.append(n)
3360 l.append(n)
3346 f = f * 2
3361 f = f * 2
3347 n = p
3362 n = p
3348 i += 1
3363 i += 1
3349
3364
3350 r.append(l)
3365 r.append(l)
3351
3366
3352 return r
3367 return r
3353
3368
3354 def checkpush(self, pushop):
3369 def checkpush(self, pushop):
3355 """Extensions can override this function if additional checks have
3370 """Extensions can override this function if additional checks have
3356 to be performed before pushing, or call it if they override push
3371 to be performed before pushing, or call it if they override push
3357 command.
3372 command.
3358 """
3373 """
3359
3374
3360 @unfilteredpropertycache
3375 @unfilteredpropertycache
3361 def prepushoutgoinghooks(self):
3376 def prepushoutgoinghooks(self):
3362 """Return util.hooks consists of a pushop with repo, remote, outgoing
3377 """Return util.hooks consists of a pushop with repo, remote, outgoing
3363 methods, which are called before pushing changesets.
3378 methods, which are called before pushing changesets.
3364 """
3379 """
3365 return util.hooks()
3380 return util.hooks()
3366
3381
3367 def pushkey(self, namespace, key, old, new):
3382 def pushkey(self, namespace, key, old, new):
3368 try:
3383 try:
3369 tr = self.currenttransaction()
3384 tr = self.currenttransaction()
3370 hookargs = {}
3385 hookargs = {}
3371 if tr is not None:
3386 if tr is not None:
3372 hookargs.update(tr.hookargs)
3387 hookargs.update(tr.hookargs)
3373 hookargs = pycompat.strkwargs(hookargs)
3388 hookargs = pycompat.strkwargs(hookargs)
3374 hookargs['namespace'] = namespace
3389 hookargs['namespace'] = namespace
3375 hookargs['key'] = key
3390 hookargs['key'] = key
3376 hookargs['old'] = old
3391 hookargs['old'] = old
3377 hookargs['new'] = new
3392 hookargs['new'] = new
3378 self.hook(b'prepushkey', throw=True, **hookargs)
3393 self.hook(b'prepushkey', throw=True, **hookargs)
3379 except error.HookAbort as exc:
3394 except error.HookAbort as exc:
3380 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3395 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3381 if exc.hint:
3396 if exc.hint:
3382 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3397 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3383 return False
3398 return False
3384 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3399 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3385 ret = pushkey.push(self, namespace, key, old, new)
3400 ret = pushkey.push(self, namespace, key, old, new)
3386
3401
3387 def runhook(unused_success):
3402 def runhook(unused_success):
3388 self.hook(
3403 self.hook(
3389 b'pushkey',
3404 b'pushkey',
3390 namespace=namespace,
3405 namespace=namespace,
3391 key=key,
3406 key=key,
3392 old=old,
3407 old=old,
3393 new=new,
3408 new=new,
3394 ret=ret,
3409 ret=ret,
3395 )
3410 )
3396
3411
3397 self._afterlock(runhook)
3412 self._afterlock(runhook)
3398 return ret
3413 return ret
3399
3414
3400 def listkeys(self, namespace):
3415 def listkeys(self, namespace):
3401 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3416 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3402 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3417 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3403 values = pushkey.list(self, namespace)
3418 values = pushkey.list(self, namespace)
3404 self.hook(b'listkeys', namespace=namespace, values=values)
3419 self.hook(b'listkeys', namespace=namespace, values=values)
3405 return values
3420 return values
3406
3421
3407 def debugwireargs(self, one, two, three=None, four=None, five=None):
3422 def debugwireargs(self, one, two, three=None, four=None, five=None):
3408 '''used to test argument passing over the wire'''
3423 '''used to test argument passing over the wire'''
3409 return b"%s %s %s %s %s" % (
3424 return b"%s %s %s %s %s" % (
3410 one,
3425 one,
3411 two,
3426 two,
3412 pycompat.bytestr(three),
3427 pycompat.bytestr(three),
3413 pycompat.bytestr(four),
3428 pycompat.bytestr(four),
3414 pycompat.bytestr(five),
3429 pycompat.bytestr(five),
3415 )
3430 )
3416
3431
3417 def savecommitmessage(self, text):
3432 def savecommitmessage(self, text):
3418 fp = self.vfs(b'last-message.txt', b'wb')
3433 fp = self.vfs(b'last-message.txt', b'wb')
3419 try:
3434 try:
3420 fp.write(text)
3435 fp.write(text)
3421 finally:
3436 finally:
3422 fp.close()
3437 fp.close()
3423 return self.pathto(fp.name[len(self.root) + 1 :])
3438 return self.pathto(fp.name[len(self.root) + 1 :])
3424
3439
3425 def register_wanted_sidedata(self, category):
3440 def register_wanted_sidedata(self, category):
3426 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3441 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3427 # Only revlogv2 repos can want sidedata.
3442 # Only revlogv2 repos can want sidedata.
3428 return
3443 return
3429 self._wanted_sidedata.add(pycompat.bytestr(category))
3444 self._wanted_sidedata.add(pycompat.bytestr(category))
3430
3445
3431 def register_sidedata_computer(
3446 def register_sidedata_computer(
3432 self, kind, category, keys, computer, flags, replace=False
3447 self, kind, category, keys, computer, flags, replace=False
3433 ):
3448 ):
3434 if kind not in revlogconst.ALL_KINDS:
3449 if kind not in revlogconst.ALL_KINDS:
3435 msg = _(b"unexpected revlog kind '%s'.")
3450 msg = _(b"unexpected revlog kind '%s'.")
3436 raise error.ProgrammingError(msg % kind)
3451 raise error.ProgrammingError(msg % kind)
3437 category = pycompat.bytestr(category)
3452 category = pycompat.bytestr(category)
3438 already_registered = category in self._sidedata_computers.get(kind, [])
3453 already_registered = category in self._sidedata_computers.get(kind, [])
3439 if already_registered and not replace:
3454 if already_registered and not replace:
3440 msg = _(
3455 msg = _(
3441 b"cannot register a sidedata computer twice for category '%s'."
3456 b"cannot register a sidedata computer twice for category '%s'."
3442 )
3457 )
3443 raise error.ProgrammingError(msg % category)
3458 raise error.ProgrammingError(msg % category)
3444 if replace and not already_registered:
3459 if replace and not already_registered:
3445 msg = _(
3460 msg = _(
3446 b"cannot replace a sidedata computer that isn't registered "
3461 b"cannot replace a sidedata computer that isn't registered "
3447 b"for category '%s'."
3462 b"for category '%s'."
3448 )
3463 )
3449 raise error.ProgrammingError(msg % category)
3464 raise error.ProgrammingError(msg % category)
3450 self._sidedata_computers.setdefault(kind, {})
3465 self._sidedata_computers.setdefault(kind, {})
3451 self._sidedata_computers[kind][category] = (keys, computer, flags)
3466 self._sidedata_computers[kind][category] = (keys, computer, flags)
3452
3467
3453
3468
3454 # used to avoid circular references so destructors work
3469 # used to avoid circular references so destructors work
3455 def aftertrans(files):
3470 def aftertrans(files):
3456 renamefiles = [tuple(t) for t in files]
3471 renamefiles = [tuple(t) for t in files]
3457
3472
3458 def a():
3473 def a():
3459 for vfs, src, dest in renamefiles:
3474 for vfs, src, dest in renamefiles:
3460 # if src and dest refer to a same file, vfs.rename is a no-op,
3475 # if src and dest refer to a same file, vfs.rename is a no-op,
3461 # leaving both src and dest on disk. delete dest to make sure
3476 # leaving both src and dest on disk. delete dest to make sure
3462 # the rename couldn't be such a no-op.
3477 # the rename couldn't be such a no-op.
3463 vfs.tryunlink(dest)
3478 vfs.tryunlink(dest)
3464 try:
3479 try:
3465 vfs.rename(src, dest)
3480 vfs.rename(src, dest)
3466 except OSError as exc: # journal file does not yet exist
3481 except OSError as exc: # journal file does not yet exist
3467 if exc.errno != errno.ENOENT:
3482 if exc.errno != errno.ENOENT:
3468 raise
3483 raise
3469
3484
3470 return a
3485 return a
3471
3486
3472
3487
3473 def undoname(fn):
3488 def undoname(fn):
3474 base, name = os.path.split(fn)
3489 base, name = os.path.split(fn)
3475 assert name.startswith(b'journal')
3490 assert name.startswith(b'journal')
3476 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3491 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3477
3492
3478
3493
3479 def instance(ui, path, create, intents=None, createopts=None):
3494 def instance(ui, path, create, intents=None, createopts=None):
3480 localpath = urlutil.urllocalpath(path)
3495 localpath = urlutil.urllocalpath(path)
3481 if create:
3496 if create:
3482 createrepository(ui, localpath, createopts=createopts)
3497 createrepository(ui, localpath, createopts=createopts)
3483
3498
3484 return makelocalrepository(ui, localpath, intents=intents)
3499 return makelocalrepository(ui, localpath, intents=intents)
3485
3500
3486
3501
3487 def islocal(path):
3502 def islocal(path):
3488 return True
3503 return True
3489
3504
3490
3505
3491 def defaultcreateopts(ui, createopts=None):
3506 def defaultcreateopts(ui, createopts=None):
3492 """Populate the default creation options for a repository.
3507 """Populate the default creation options for a repository.
3493
3508
3494 A dictionary of explicitly requested creation options can be passed
3509 A dictionary of explicitly requested creation options can be passed
3495 in. Missing keys will be populated.
3510 in. Missing keys will be populated.
3496 """
3511 """
3497 createopts = dict(createopts or {})
3512 createopts = dict(createopts or {})
3498
3513
3499 if b'backend' not in createopts:
3514 if b'backend' not in createopts:
3500 # experimental config: storage.new-repo-backend
3515 # experimental config: storage.new-repo-backend
3501 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3516 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3502
3517
3503 return createopts
3518 return createopts
3504
3519
3505
3520
3506 def clone_requirements(ui, createopts, srcrepo):
3521 def clone_requirements(ui, createopts, srcrepo):
3507 """clone the requirements of a local repo for a local clone
3522 """clone the requirements of a local repo for a local clone
3508
3523
3509 The store requirements are unchanged while the working copy requirements
3524 The store requirements are unchanged while the working copy requirements
3510 depends on the configuration
3525 depends on the configuration
3511 """
3526 """
3512 target_requirements = set()
3527 target_requirements = set()
3513 createopts = defaultcreateopts(ui, createopts=createopts)
3528 createopts = defaultcreateopts(ui, createopts=createopts)
3514 for r in newreporequirements(ui, createopts):
3529 for r in newreporequirements(ui, createopts):
3515 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3530 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3516 target_requirements.add(r)
3531 target_requirements.add(r)
3517
3532
3518 for r in srcrepo.requirements:
3533 for r in srcrepo.requirements:
3519 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3534 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3520 target_requirements.add(r)
3535 target_requirements.add(r)
3521 return target_requirements
3536 return target_requirements
3522
3537
3523
3538
3524 def newreporequirements(ui, createopts):
3539 def newreporequirements(ui, createopts):
3525 """Determine the set of requirements for a new local repository.
3540 """Determine the set of requirements for a new local repository.
3526
3541
3527 Extensions can wrap this function to specify custom requirements for
3542 Extensions can wrap this function to specify custom requirements for
3528 new repositories.
3543 new repositories.
3529 """
3544 """
3530 # If the repo is being created from a shared repository, we copy
3545 # If the repo is being created from a shared repository, we copy
3531 # its requirements.
3546 # its requirements.
3532 if b'sharedrepo' in createopts:
3547 if b'sharedrepo' in createopts:
3533 requirements = set(createopts[b'sharedrepo'].requirements)
3548 requirements = set(createopts[b'sharedrepo'].requirements)
3534 if createopts.get(b'sharedrelative'):
3549 if createopts.get(b'sharedrelative'):
3535 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3550 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3536 else:
3551 else:
3537 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3552 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3538
3553
3539 return requirements
3554 return requirements
3540
3555
3541 if b'backend' not in createopts:
3556 if b'backend' not in createopts:
3542 raise error.ProgrammingError(
3557 raise error.ProgrammingError(
3543 b'backend key not present in createopts; '
3558 b'backend key not present in createopts; '
3544 b'was defaultcreateopts() called?'
3559 b'was defaultcreateopts() called?'
3545 )
3560 )
3546
3561
3547 if createopts[b'backend'] != b'revlogv1':
3562 if createopts[b'backend'] != b'revlogv1':
3548 raise error.Abort(
3563 raise error.Abort(
3549 _(
3564 _(
3550 b'unable to determine repository requirements for '
3565 b'unable to determine repository requirements for '
3551 b'storage backend: %s'
3566 b'storage backend: %s'
3552 )
3567 )
3553 % createopts[b'backend']
3568 % createopts[b'backend']
3554 )
3569 )
3555
3570
3556 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3571 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3557 if ui.configbool(b'format', b'usestore'):
3572 if ui.configbool(b'format', b'usestore'):
3558 requirements.add(requirementsmod.STORE_REQUIREMENT)
3573 requirements.add(requirementsmod.STORE_REQUIREMENT)
3559 if ui.configbool(b'format', b'usefncache'):
3574 if ui.configbool(b'format', b'usefncache'):
3560 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3575 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3561 if ui.configbool(b'format', b'dotencode'):
3576 if ui.configbool(b'format', b'dotencode'):
3562 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3577 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3563
3578
3564 compengines = ui.configlist(b'format', b'revlog-compression')
3579 compengines = ui.configlist(b'format', b'revlog-compression')
3565 for compengine in compengines:
3580 for compengine in compengines:
3566 if compengine in util.compengines:
3581 if compengine in util.compengines:
3567 engine = util.compengines[compengine]
3582 engine = util.compengines[compengine]
3568 if engine.available() and engine.revlogheader():
3583 if engine.available() and engine.revlogheader():
3569 break
3584 break
3570 else:
3585 else:
3571 raise error.Abort(
3586 raise error.Abort(
3572 _(
3587 _(
3573 b'compression engines %s defined by '
3588 b'compression engines %s defined by '
3574 b'format.revlog-compression not available'
3589 b'format.revlog-compression not available'
3575 )
3590 )
3576 % b', '.join(b'"%s"' % e for e in compengines),
3591 % b', '.join(b'"%s"' % e for e in compengines),
3577 hint=_(
3592 hint=_(
3578 b'run "hg debuginstall" to list available '
3593 b'run "hg debuginstall" to list available '
3579 b'compression engines'
3594 b'compression engines'
3580 ),
3595 ),
3581 )
3596 )
3582
3597
3583 # zlib is the historical default and doesn't need an explicit requirement.
3598 # zlib is the historical default and doesn't need an explicit requirement.
3584 if compengine == b'zstd':
3599 if compengine == b'zstd':
3585 requirements.add(b'revlog-compression-zstd')
3600 requirements.add(b'revlog-compression-zstd')
3586 elif compengine != b'zlib':
3601 elif compengine != b'zlib':
3587 requirements.add(b'exp-compression-%s' % compengine)
3602 requirements.add(b'exp-compression-%s' % compengine)
3588
3603
3589 if scmutil.gdinitconfig(ui):
3604 if scmutil.gdinitconfig(ui):
3590 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3605 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3591 if ui.configbool(b'format', b'sparse-revlog'):
3606 if ui.configbool(b'format', b'sparse-revlog'):
3592 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3607 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3593
3608
3594 # experimental config: format.exp-dirstate-v2
3609 # experimental config: format.exp-dirstate-v2
3595 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3610 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3596 if ui.configbool(b'format', b'exp-dirstate-v2'):
3611 if ui.configbool(b'format', b'exp-dirstate-v2'):
3597 if dirstate.SUPPORTS_DIRSTATE_V2:
3612 if dirstate.SUPPORTS_DIRSTATE_V2:
3598 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3613 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3599 else:
3614 else:
3600 raise error.Abort(
3615 raise error.Abort(
3601 _(
3616 _(
3602 b"dirstate v2 format requested by config "
3617 b"dirstate v2 format requested by config "
3603 b"but not supported (requires Rust extensions)"
3618 b"but not supported (requires Rust extensions)"
3604 )
3619 )
3605 )
3620 )
3606
3621
3607 # experimental config: format.exp-use-copies-side-data-changeset
3622 # experimental config: format.exp-use-copies-side-data-changeset
3608 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3623 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3609 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3624 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3610 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3625 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3611 if ui.configbool(b'experimental', b'treemanifest'):
3626 if ui.configbool(b'experimental', b'treemanifest'):
3612 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3627 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3613
3628
3614 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3629 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3615 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3630 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3616 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3631 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3617
3632
3618 revlogv2 = ui.config(b'experimental', b'revlogv2')
3633 revlogv2 = ui.config(b'experimental', b'revlogv2')
3619 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3634 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3620 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3635 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3621 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3636 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3622 # experimental config: format.internal-phase
3637 # experimental config: format.internal-phase
3623 if ui.configbool(b'format', b'internal-phase'):
3638 if ui.configbool(b'format', b'internal-phase'):
3624 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3639 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3625
3640
3626 if createopts.get(b'narrowfiles'):
3641 if createopts.get(b'narrowfiles'):
3627 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3642 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3628
3643
3629 if createopts.get(b'lfs'):
3644 if createopts.get(b'lfs'):
3630 requirements.add(b'lfs')
3645 requirements.add(b'lfs')
3631
3646
3632 if ui.configbool(b'format', b'bookmarks-in-store'):
3647 if ui.configbool(b'format', b'bookmarks-in-store'):
3633 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3648 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3634
3649
3635 if ui.configbool(b'format', b'use-persistent-nodemap'):
3650 if ui.configbool(b'format', b'use-persistent-nodemap'):
3636 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3651 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3637
3652
3638 # if share-safe is enabled, let's create the new repository with the new
3653 # if share-safe is enabled, let's create the new repository with the new
3639 # requirement
3654 # requirement
3640 if ui.configbool(b'format', b'use-share-safe'):
3655 if ui.configbool(b'format', b'use-share-safe'):
3641 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3656 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3642
3657
3643 return requirements
3658 return requirements
3644
3659
3645
3660
3646 def checkrequirementscompat(ui, requirements):
3661 def checkrequirementscompat(ui, requirements):
3647 """Checks compatibility of repository requirements enabled and disabled.
3662 """Checks compatibility of repository requirements enabled and disabled.
3648
3663
3649 Returns a set of requirements which needs to be dropped because dependend
3664 Returns a set of requirements which needs to be dropped because dependend
3650 requirements are not enabled. Also warns users about it"""
3665 requirements are not enabled. Also warns users about it"""
3651
3666
3652 dropped = set()
3667 dropped = set()
3653
3668
3654 if requirementsmod.STORE_REQUIREMENT not in requirements:
3669 if requirementsmod.STORE_REQUIREMENT not in requirements:
3655 if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3670 if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3656 ui.warn(
3671 ui.warn(
3657 _(
3672 _(
3658 b'ignoring enabled \'format.bookmarks-in-store\' config '
3673 b'ignoring enabled \'format.bookmarks-in-store\' config '
3659 b'beacuse it is incompatible with disabled '
3674 b'beacuse it is incompatible with disabled '
3660 b'\'format.usestore\' config\n'
3675 b'\'format.usestore\' config\n'
3661 )
3676 )
3662 )
3677 )
3663 dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3678 dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3664
3679
3665 if (
3680 if (
3666 requirementsmod.SHARED_REQUIREMENT in requirements
3681 requirementsmod.SHARED_REQUIREMENT in requirements
3667 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3682 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3668 ):
3683 ):
3669 raise error.Abort(
3684 raise error.Abort(
3670 _(
3685 _(
3671 b"cannot create shared repository as source was created"
3686 b"cannot create shared repository as source was created"
3672 b" with 'format.usestore' config disabled"
3687 b" with 'format.usestore' config disabled"
3673 )
3688 )
3674 )
3689 )
3675
3690
3676 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3691 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3677 ui.warn(
3692 ui.warn(
3678 _(
3693 _(
3679 b"ignoring enabled 'format.use-share-safe' config because "
3694 b"ignoring enabled 'format.use-share-safe' config because "
3680 b"it is incompatible with disabled 'format.usestore'"
3695 b"it is incompatible with disabled 'format.usestore'"
3681 b" config\n"
3696 b" config\n"
3682 )
3697 )
3683 )
3698 )
3684 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3699 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3685
3700
3686 return dropped
3701 return dropped
3687
3702
3688
3703
3689 def filterknowncreateopts(ui, createopts):
3704 def filterknowncreateopts(ui, createopts):
3690 """Filters a dict of repo creation options against options that are known.
3705 """Filters a dict of repo creation options against options that are known.
3691
3706
3692 Receives a dict of repo creation options and returns a dict of those
3707 Receives a dict of repo creation options and returns a dict of those
3693 options that we don't know how to handle.
3708 options that we don't know how to handle.
3694
3709
3695 This function is called as part of repository creation. If the
3710 This function is called as part of repository creation. If the
3696 returned dict contains any items, repository creation will not
3711 returned dict contains any items, repository creation will not
3697 be allowed, as it means there was a request to create a repository
3712 be allowed, as it means there was a request to create a repository
3698 with options not recognized by loaded code.
3713 with options not recognized by loaded code.
3699
3714
3700 Extensions can wrap this function to filter out creation options
3715 Extensions can wrap this function to filter out creation options
3701 they know how to handle.
3716 they know how to handle.
3702 """
3717 """
3703 known = {
3718 known = {
3704 b'backend',
3719 b'backend',
3705 b'lfs',
3720 b'lfs',
3706 b'narrowfiles',
3721 b'narrowfiles',
3707 b'sharedrepo',
3722 b'sharedrepo',
3708 b'sharedrelative',
3723 b'sharedrelative',
3709 b'shareditems',
3724 b'shareditems',
3710 b'shallowfilestore',
3725 b'shallowfilestore',
3711 }
3726 }
3712
3727
3713 return {k: v for k, v in createopts.items() if k not in known}
3728 return {k: v for k, v in createopts.items() if k not in known}
3714
3729
3715
3730
3716 def createrepository(ui, path, createopts=None, requirements=None):
3731 def createrepository(ui, path, createopts=None, requirements=None):
3717 """Create a new repository in a vfs.
3732 """Create a new repository in a vfs.
3718
3733
3719 ``path`` path to the new repo's working directory.
3734 ``path`` path to the new repo's working directory.
3720 ``createopts`` options for the new repository.
3735 ``createopts`` options for the new repository.
3721 ``requirement`` predefined set of requirements.
3736 ``requirement`` predefined set of requirements.
3722 (incompatible with ``createopts``)
3737 (incompatible with ``createopts``)
3723
3738
3724 The following keys for ``createopts`` are recognized:
3739 The following keys for ``createopts`` are recognized:
3725
3740
3726 backend
3741 backend
3727 The storage backend to use.
3742 The storage backend to use.
3728 lfs
3743 lfs
3729 Repository will be created with ``lfs`` requirement. The lfs extension
3744 Repository will be created with ``lfs`` requirement. The lfs extension
3730 will automatically be loaded when the repository is accessed.
3745 will automatically be loaded when the repository is accessed.
3731 narrowfiles
3746 narrowfiles
3732 Set up repository to support narrow file storage.
3747 Set up repository to support narrow file storage.
3733 sharedrepo
3748 sharedrepo
3734 Repository object from which storage should be shared.
3749 Repository object from which storage should be shared.
3735 sharedrelative
3750 sharedrelative
3736 Boolean indicating if the path to the shared repo should be
3751 Boolean indicating if the path to the shared repo should be
3737 stored as relative. By default, the pointer to the "parent" repo
3752 stored as relative. By default, the pointer to the "parent" repo
3738 is stored as an absolute path.
3753 is stored as an absolute path.
3739 shareditems
3754 shareditems
3740 Set of items to share to the new repository (in addition to storage).
3755 Set of items to share to the new repository (in addition to storage).
3741 shallowfilestore
3756 shallowfilestore
3742 Indicates that storage for files should be shallow (not all ancestor
3757 Indicates that storage for files should be shallow (not all ancestor
3743 revisions are known).
3758 revisions are known).
3744 """
3759 """
3745
3760
3746 if requirements is not None:
3761 if requirements is not None:
3747 if createopts is not None:
3762 if createopts is not None:
3748 msg = b'cannot specify both createopts and requirements'
3763 msg = b'cannot specify both createopts and requirements'
3749 raise error.ProgrammingError(msg)
3764 raise error.ProgrammingError(msg)
3750 createopts = {}
3765 createopts = {}
3751 else:
3766 else:
3752 createopts = defaultcreateopts(ui, createopts=createopts)
3767 createopts = defaultcreateopts(ui, createopts=createopts)
3753
3768
3754 unknownopts = filterknowncreateopts(ui, createopts)
3769 unknownopts = filterknowncreateopts(ui, createopts)
3755
3770
3756 if not isinstance(unknownopts, dict):
3771 if not isinstance(unknownopts, dict):
3757 raise error.ProgrammingError(
3772 raise error.ProgrammingError(
3758 b'filterknowncreateopts() did not return a dict'
3773 b'filterknowncreateopts() did not return a dict'
3759 )
3774 )
3760
3775
3761 if unknownopts:
3776 if unknownopts:
3762 raise error.Abort(
3777 raise error.Abort(
3763 _(
3778 _(
3764 b'unable to create repository because of unknown '
3779 b'unable to create repository because of unknown '
3765 b'creation option: %s'
3780 b'creation option: %s'
3766 )
3781 )
3767 % b', '.join(sorted(unknownopts)),
3782 % b', '.join(sorted(unknownopts)),
3768 hint=_(b'is a required extension not loaded?'),
3783 hint=_(b'is a required extension not loaded?'),
3769 )
3784 )
3770
3785
3771 requirements = newreporequirements(ui, createopts=createopts)
3786 requirements = newreporequirements(ui, createopts=createopts)
3772 requirements -= checkrequirementscompat(ui, requirements)
3787 requirements -= checkrequirementscompat(ui, requirements)
3773
3788
3774 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3789 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3775
3790
3776 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3791 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3777 if hgvfs.exists():
3792 if hgvfs.exists():
3778 raise error.RepoError(_(b'repository %s already exists') % path)
3793 raise error.RepoError(_(b'repository %s already exists') % path)
3779
3794
3780 if b'sharedrepo' in createopts:
3795 if b'sharedrepo' in createopts:
3781 sharedpath = createopts[b'sharedrepo'].sharedpath
3796 sharedpath = createopts[b'sharedrepo'].sharedpath
3782
3797
3783 if createopts.get(b'sharedrelative'):
3798 if createopts.get(b'sharedrelative'):
3784 try:
3799 try:
3785 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3800 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3786 sharedpath = util.pconvert(sharedpath)
3801 sharedpath = util.pconvert(sharedpath)
3787 except (IOError, ValueError) as e:
3802 except (IOError, ValueError) as e:
3788 # ValueError is raised on Windows if the drive letters differ
3803 # ValueError is raised on Windows if the drive letters differ
3789 # on each path.
3804 # on each path.
3790 raise error.Abort(
3805 raise error.Abort(
3791 _(b'cannot calculate relative path'),
3806 _(b'cannot calculate relative path'),
3792 hint=stringutil.forcebytestr(e),
3807 hint=stringutil.forcebytestr(e),
3793 )
3808 )
3794
3809
3795 if not wdirvfs.exists():
3810 if not wdirvfs.exists():
3796 wdirvfs.makedirs()
3811 wdirvfs.makedirs()
3797
3812
3798 hgvfs.makedir(notindexed=True)
3813 hgvfs.makedir(notindexed=True)
3799 if b'sharedrepo' not in createopts:
3814 if b'sharedrepo' not in createopts:
3800 hgvfs.mkdir(b'cache')
3815 hgvfs.mkdir(b'cache')
3801 hgvfs.mkdir(b'wcache')
3816 hgvfs.mkdir(b'wcache')
3802
3817
3803 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3818 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3804 if has_store and b'sharedrepo' not in createopts:
3819 if has_store and b'sharedrepo' not in createopts:
3805 hgvfs.mkdir(b'store')
3820 hgvfs.mkdir(b'store')
3806
3821
3807 # We create an invalid changelog outside the store so very old
3822 # We create an invalid changelog outside the store so very old
3808 # Mercurial versions (which didn't know about the requirements
3823 # Mercurial versions (which didn't know about the requirements
3809 # file) encounter an error on reading the changelog. This
3824 # file) encounter an error on reading the changelog. This
3810 # effectively locks out old clients and prevents them from
3825 # effectively locks out old clients and prevents them from
3811 # mucking with a repo in an unknown format.
3826 # mucking with a repo in an unknown format.
3812 #
3827 #
3813 # The revlog header has version 65535, which won't be recognized by
3828 # The revlog header has version 65535, which won't be recognized by
3814 # such old clients.
3829 # such old clients.
3815 hgvfs.append(
3830 hgvfs.append(
3816 b'00changelog.i',
3831 b'00changelog.i',
3817 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3832 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3818 b'layout',
3833 b'layout',
3819 )
3834 )
3820
3835
3821 # Filter the requirements into working copy and store ones
3836 # Filter the requirements into working copy and store ones
3822 wcreq, storereq = scmutil.filterrequirements(requirements)
3837 wcreq, storereq = scmutil.filterrequirements(requirements)
3823 # write working copy ones
3838 # write working copy ones
3824 scmutil.writerequires(hgvfs, wcreq)
3839 scmutil.writerequires(hgvfs, wcreq)
3825 # If there are store requirements and the current repository
3840 # If there are store requirements and the current repository
3826 # is not a shared one, write stored requirements
3841 # is not a shared one, write stored requirements
3827 # For new shared repository, we don't need to write the store
3842 # For new shared repository, we don't need to write the store
3828 # requirements as they are already present in store requires
3843 # requirements as they are already present in store requires
3829 if storereq and b'sharedrepo' not in createopts:
3844 if storereq and b'sharedrepo' not in createopts:
3830 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3845 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3831 scmutil.writerequires(storevfs, storereq)
3846 scmutil.writerequires(storevfs, storereq)
3832
3847
3833 # Write out file telling readers where to find the shared store.
3848 # Write out file telling readers where to find the shared store.
3834 if b'sharedrepo' in createopts:
3849 if b'sharedrepo' in createopts:
3835 hgvfs.write(b'sharedpath', sharedpath)
3850 hgvfs.write(b'sharedpath', sharedpath)
3836
3851
3837 if createopts.get(b'shareditems'):
3852 if createopts.get(b'shareditems'):
3838 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3853 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3839 hgvfs.write(b'shared', shared)
3854 hgvfs.write(b'shared', shared)
3840
3855
3841
3856
3842 def poisonrepository(repo):
3857 def poisonrepository(repo):
3843 """Poison a repository instance so it can no longer be used."""
3858 """Poison a repository instance so it can no longer be used."""
3844 # Perform any cleanup on the instance.
3859 # Perform any cleanup on the instance.
3845 repo.close()
3860 repo.close()
3846
3861
3847 # Our strategy is to replace the type of the object with one that
3862 # Our strategy is to replace the type of the object with one that
3848 # has all attribute lookups result in error.
3863 # has all attribute lookups result in error.
3849 #
3864 #
3850 # But we have to allow the close() method because some constructors
3865 # But we have to allow the close() method because some constructors
3851 # of repos call close() on repo references.
3866 # of repos call close() on repo references.
3852 class poisonedrepository(object):
3867 class poisonedrepository(object):
3853 def __getattribute__(self, item):
3868 def __getattribute__(self, item):
3854 if item == 'close':
3869 if item == 'close':
3855 return object.__getattribute__(self, item)
3870 return object.__getattribute__(self, item)
3856
3871
3857 raise error.ProgrammingError(
3872 raise error.ProgrammingError(
3858 b'repo instances should not be used after unshare'
3873 b'repo instances should not be used after unshare'
3859 )
3874 )
3860
3875
3861 def close(self):
3876 def close(self):
3862 pass
3877 pass
3863
3878
3864 # We may have a repoview, which intercepts __setattr__. So be sure
3879 # We may have a repoview, which intercepts __setattr__. So be sure
3865 # we operate at the lowest level possible.
3880 # we operate at the lowest level possible.
3866 object.__setattr__(repo, '__class__', poisonedrepository)
3881 object.__setattr__(repo, '__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now