##// END OF EJS Templates
cleanup: return directly instead of assigning variable
Manuel Jacob -
r50208:5c01ca5f default
parent child Browse files
Show More
@@ -1,3937 +1,3936 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 # coding: utf-8
2 # coding: utf-8
3 #
3 #
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9
9
10 import functools
10 import functools
11 import os
11 import os
12 import random
12 import random
13 import sys
13 import sys
14 import time
14 import time
15 import weakref
15 import weakref
16
16
17 from concurrent import futures
17 from concurrent import futures
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 bin,
20 bin,
21 hex,
21 hex,
22 nullrev,
22 nullrev,
23 sha1nodeconstants,
23 sha1nodeconstants,
24 short,
24 short,
25 )
25 )
26 from .pycompat import (
26 from .pycompat import (
27 delattr,
27 delattr,
28 getattr,
28 getattr,
29 )
29 )
30 from . import (
30 from . import (
31 bookmarks,
31 bookmarks,
32 branchmap,
32 branchmap,
33 bundle2,
33 bundle2,
34 bundlecaches,
34 bundlecaches,
35 changegroup,
35 changegroup,
36 color,
36 color,
37 commit,
37 commit,
38 context,
38 context,
39 dirstate,
39 dirstate,
40 dirstateguard,
40 dirstateguard,
41 discovery,
41 discovery,
42 encoding,
42 encoding,
43 error,
43 error,
44 exchange,
44 exchange,
45 extensions,
45 extensions,
46 filelog,
46 filelog,
47 hook,
47 hook,
48 lock as lockmod,
48 lock as lockmod,
49 match as matchmod,
49 match as matchmod,
50 mergestate as mergestatemod,
50 mergestate as mergestatemod,
51 mergeutil,
51 mergeutil,
52 namespaces,
52 namespaces,
53 narrowspec,
53 narrowspec,
54 obsolete,
54 obsolete,
55 pathutil,
55 pathutil,
56 phases,
56 phases,
57 pushkey,
57 pushkey,
58 pycompat,
58 pycompat,
59 rcutil,
59 rcutil,
60 repoview,
60 repoview,
61 requirements as requirementsmod,
61 requirements as requirementsmod,
62 revlog,
62 revlog,
63 revset,
63 revset,
64 revsetlang,
64 revsetlang,
65 scmutil,
65 scmutil,
66 sparse,
66 sparse,
67 store as storemod,
67 store as storemod,
68 subrepoutil,
68 subrepoutil,
69 tags as tagsmod,
69 tags as tagsmod,
70 transaction,
70 transaction,
71 txnutil,
71 txnutil,
72 util,
72 util,
73 vfs as vfsmod,
73 vfs as vfsmod,
74 wireprototypes,
74 wireprototypes,
75 )
75 )
76
76
77 from .interfaces import (
77 from .interfaces import (
78 repository,
78 repository,
79 util as interfaceutil,
79 util as interfaceutil,
80 )
80 )
81
81
82 from .utils import (
82 from .utils import (
83 hashutil,
83 hashutil,
84 procutil,
84 procutil,
85 stringutil,
85 stringutil,
86 urlutil,
86 urlutil,
87 )
87 )
88
88
89 from .revlogutils import (
89 from .revlogutils import (
90 concurrency_checker as revlogchecker,
90 concurrency_checker as revlogchecker,
91 constants as revlogconst,
91 constants as revlogconst,
92 sidedata as sidedatamod,
92 sidedata as sidedatamod,
93 )
93 )
94
94
95 release = lockmod.release
95 release = lockmod.release
96 urlerr = util.urlerr
96 urlerr = util.urlerr
97 urlreq = util.urlreq
97 urlreq = util.urlreq
98
98
99 # set of (path, vfs-location) tuples. vfs-location is:
99 # set of (path, vfs-location) tuples. vfs-location is:
100 # - 'plain for vfs relative paths
100 # - 'plain for vfs relative paths
101 # - '' for svfs relative paths
101 # - '' for svfs relative paths
102 _cachedfiles = set()
102 _cachedfiles = set()
103
103
104
104
105 class _basefilecache(scmutil.filecache):
105 class _basefilecache(scmutil.filecache):
106 """All filecache usage on repo are done for logic that should be unfiltered"""
106 """All filecache usage on repo are done for logic that should be unfiltered"""
107
107
108 def __get__(self, repo, type=None):
108 def __get__(self, repo, type=None):
109 if repo is None:
109 if repo is None:
110 return self
110 return self
111 # proxy to unfiltered __dict__ since filtered repo has no entry
111 # proxy to unfiltered __dict__ since filtered repo has no entry
112 unfi = repo.unfiltered()
112 unfi = repo.unfiltered()
113 try:
113 try:
114 return unfi.__dict__[self.sname]
114 return unfi.__dict__[self.sname]
115 except KeyError:
115 except KeyError:
116 pass
116 pass
117 return super(_basefilecache, self).__get__(unfi, type)
117 return super(_basefilecache, self).__get__(unfi, type)
118
118
119 def set(self, repo, value):
119 def set(self, repo, value):
120 return super(_basefilecache, self).set(repo.unfiltered(), value)
120 return super(_basefilecache, self).set(repo.unfiltered(), value)
121
121
122
122
123 class repofilecache(_basefilecache):
123 class repofilecache(_basefilecache):
124 """filecache for files in .hg but outside of .hg/store"""
124 """filecache for files in .hg but outside of .hg/store"""
125
125
126 def __init__(self, *paths):
126 def __init__(self, *paths):
127 super(repofilecache, self).__init__(*paths)
127 super(repofilecache, self).__init__(*paths)
128 for path in paths:
128 for path in paths:
129 _cachedfiles.add((path, b'plain'))
129 _cachedfiles.add((path, b'plain'))
130
130
131 def join(self, obj, fname):
131 def join(self, obj, fname):
132 return obj.vfs.join(fname)
132 return obj.vfs.join(fname)
133
133
134
134
135 class storecache(_basefilecache):
135 class storecache(_basefilecache):
136 """filecache for files in the store"""
136 """filecache for files in the store"""
137
137
138 def __init__(self, *paths):
138 def __init__(self, *paths):
139 super(storecache, self).__init__(*paths)
139 super(storecache, self).__init__(*paths)
140 for path in paths:
140 for path in paths:
141 _cachedfiles.add((path, b''))
141 _cachedfiles.add((path, b''))
142
142
143 def join(self, obj, fname):
143 def join(self, obj, fname):
144 return obj.sjoin(fname)
144 return obj.sjoin(fname)
145
145
146
146
147 class changelogcache(storecache):
147 class changelogcache(storecache):
148 """filecache for the changelog"""
148 """filecache for the changelog"""
149
149
150 def __init__(self):
150 def __init__(self):
151 super(changelogcache, self).__init__()
151 super(changelogcache, self).__init__()
152 _cachedfiles.add((b'00changelog.i', b''))
152 _cachedfiles.add((b'00changelog.i', b''))
153 _cachedfiles.add((b'00changelog.n', b''))
153 _cachedfiles.add((b'00changelog.n', b''))
154
154
155 def tracked_paths(self, obj):
155 def tracked_paths(self, obj):
156 paths = [self.join(obj, b'00changelog.i')]
156 paths = [self.join(obj, b'00changelog.i')]
157 if obj.store.opener.options.get(b'persistent-nodemap', False):
157 if obj.store.opener.options.get(b'persistent-nodemap', False):
158 paths.append(self.join(obj, b'00changelog.n'))
158 paths.append(self.join(obj, b'00changelog.n'))
159 return paths
159 return paths
160
160
161
161
162 class manifestlogcache(storecache):
162 class manifestlogcache(storecache):
163 """filecache for the manifestlog"""
163 """filecache for the manifestlog"""
164
164
165 def __init__(self):
165 def __init__(self):
166 super(manifestlogcache, self).__init__()
166 super(manifestlogcache, self).__init__()
167 _cachedfiles.add((b'00manifest.i', b''))
167 _cachedfiles.add((b'00manifest.i', b''))
168 _cachedfiles.add((b'00manifest.n', b''))
168 _cachedfiles.add((b'00manifest.n', b''))
169
169
170 def tracked_paths(self, obj):
170 def tracked_paths(self, obj):
171 paths = [self.join(obj, b'00manifest.i')]
171 paths = [self.join(obj, b'00manifest.i')]
172 if obj.store.opener.options.get(b'persistent-nodemap', False):
172 if obj.store.opener.options.get(b'persistent-nodemap', False):
173 paths.append(self.join(obj, b'00manifest.n'))
173 paths.append(self.join(obj, b'00manifest.n'))
174 return paths
174 return paths
175
175
176
176
177 class mixedrepostorecache(_basefilecache):
177 class mixedrepostorecache(_basefilecache):
178 """filecache for a mix files in .hg/store and outside"""
178 """filecache for a mix files in .hg/store and outside"""
179
179
180 def __init__(self, *pathsandlocations):
180 def __init__(self, *pathsandlocations):
181 # scmutil.filecache only uses the path for passing back into our
181 # scmutil.filecache only uses the path for passing back into our
182 # join(), so we can safely pass a list of paths and locations
182 # join(), so we can safely pass a list of paths and locations
183 super(mixedrepostorecache, self).__init__(*pathsandlocations)
183 super(mixedrepostorecache, self).__init__(*pathsandlocations)
184 _cachedfiles.update(pathsandlocations)
184 _cachedfiles.update(pathsandlocations)
185
185
186 def join(self, obj, fnameandlocation):
186 def join(self, obj, fnameandlocation):
187 fname, location = fnameandlocation
187 fname, location = fnameandlocation
188 if location == b'plain':
188 if location == b'plain':
189 return obj.vfs.join(fname)
189 return obj.vfs.join(fname)
190 else:
190 else:
191 if location != b'':
191 if location != b'':
192 raise error.ProgrammingError(
192 raise error.ProgrammingError(
193 b'unexpected location: %s' % location
193 b'unexpected location: %s' % location
194 )
194 )
195 return obj.sjoin(fname)
195 return obj.sjoin(fname)
196
196
197
197
198 def isfilecached(repo, name):
198 def isfilecached(repo, name):
199 """check if a repo has already cached "name" filecache-ed property
199 """check if a repo has already cached "name" filecache-ed property
200
200
201 This returns (cachedobj-or-None, iscached) tuple.
201 This returns (cachedobj-or-None, iscached) tuple.
202 """
202 """
203 cacheentry = repo.unfiltered()._filecache.get(name, None)
203 cacheentry = repo.unfiltered()._filecache.get(name, None)
204 if not cacheentry:
204 if not cacheentry:
205 return None, False
205 return None, False
206 return cacheentry.obj, True
206 return cacheentry.obj, True
207
207
208
208
209 class unfilteredpropertycache(util.propertycache):
209 class unfilteredpropertycache(util.propertycache):
210 """propertycache that apply to unfiltered repo only"""
210 """propertycache that apply to unfiltered repo only"""
211
211
212 def __get__(self, repo, type=None):
212 def __get__(self, repo, type=None):
213 unfi = repo.unfiltered()
213 unfi = repo.unfiltered()
214 if unfi is repo:
214 if unfi is repo:
215 return super(unfilteredpropertycache, self).__get__(unfi)
215 return super(unfilteredpropertycache, self).__get__(unfi)
216 return getattr(unfi, self.name)
216 return getattr(unfi, self.name)
217
217
218
218
219 class filteredpropertycache(util.propertycache):
219 class filteredpropertycache(util.propertycache):
220 """propertycache that must take filtering in account"""
220 """propertycache that must take filtering in account"""
221
221
222 def cachevalue(self, obj, value):
222 def cachevalue(self, obj, value):
223 object.__setattr__(obj, self.name, value)
223 object.__setattr__(obj, self.name, value)
224
224
225
225
226 def hasunfilteredcache(repo, name):
226 def hasunfilteredcache(repo, name):
227 """check if a repo has an unfilteredpropertycache value for <name>"""
227 """check if a repo has an unfilteredpropertycache value for <name>"""
228 return name in vars(repo.unfiltered())
228 return name in vars(repo.unfiltered())
229
229
230
230
231 def unfilteredmethod(orig):
231 def unfilteredmethod(orig):
232 """decorate method that always need to be run on unfiltered version"""
232 """decorate method that always need to be run on unfiltered version"""
233
233
234 @functools.wraps(orig)
234 @functools.wraps(orig)
235 def wrapper(repo, *args, **kwargs):
235 def wrapper(repo, *args, **kwargs):
236 return orig(repo.unfiltered(), *args, **kwargs)
236 return orig(repo.unfiltered(), *args, **kwargs)
237
237
238 return wrapper
238 return wrapper
239
239
240
240
241 moderncaps = {
241 moderncaps = {
242 b'lookup',
242 b'lookup',
243 b'branchmap',
243 b'branchmap',
244 b'pushkey',
244 b'pushkey',
245 b'known',
245 b'known',
246 b'getbundle',
246 b'getbundle',
247 b'unbundle',
247 b'unbundle',
248 }
248 }
249 legacycaps = moderncaps.union({b'changegroupsubset'})
249 legacycaps = moderncaps.union({b'changegroupsubset'})
250
250
251
251
252 @interfaceutil.implementer(repository.ipeercommandexecutor)
252 @interfaceutil.implementer(repository.ipeercommandexecutor)
253 class localcommandexecutor:
253 class localcommandexecutor:
254 def __init__(self, peer):
254 def __init__(self, peer):
255 self._peer = peer
255 self._peer = peer
256 self._sent = False
256 self._sent = False
257 self._closed = False
257 self._closed = False
258
258
259 def __enter__(self):
259 def __enter__(self):
260 return self
260 return self
261
261
262 def __exit__(self, exctype, excvalue, exctb):
262 def __exit__(self, exctype, excvalue, exctb):
263 self.close()
263 self.close()
264
264
265 def callcommand(self, command, args):
265 def callcommand(self, command, args):
266 if self._sent:
266 if self._sent:
267 raise error.ProgrammingError(
267 raise error.ProgrammingError(
268 b'callcommand() cannot be used after sendcommands()'
268 b'callcommand() cannot be used after sendcommands()'
269 )
269 )
270
270
271 if self._closed:
271 if self._closed:
272 raise error.ProgrammingError(
272 raise error.ProgrammingError(
273 b'callcommand() cannot be used after close()'
273 b'callcommand() cannot be used after close()'
274 )
274 )
275
275
276 # We don't need to support anything fancy. Just call the named
276 # We don't need to support anything fancy. Just call the named
277 # method on the peer and return a resolved future.
277 # method on the peer and return a resolved future.
278 fn = getattr(self._peer, pycompat.sysstr(command))
278 fn = getattr(self._peer, pycompat.sysstr(command))
279
279
280 f = futures.Future()
280 f = futures.Future()
281
281
282 try:
282 try:
283 result = fn(**pycompat.strkwargs(args))
283 result = fn(**pycompat.strkwargs(args))
284 except Exception:
284 except Exception:
285 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
285 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
286 else:
286 else:
287 f.set_result(result)
287 f.set_result(result)
288
288
289 return f
289 return f
290
290
291 def sendcommands(self):
291 def sendcommands(self):
292 self._sent = True
292 self._sent = True
293
293
294 def close(self):
294 def close(self):
295 self._closed = True
295 self._closed = True
296
296
297
297
298 @interfaceutil.implementer(repository.ipeercommands)
298 @interfaceutil.implementer(repository.ipeercommands)
299 class localpeer(repository.peer):
299 class localpeer(repository.peer):
300 '''peer for a local repo; reflects only the most recent API'''
300 '''peer for a local repo; reflects only the most recent API'''
301
301
302 def __init__(self, repo, caps=None):
302 def __init__(self, repo, caps=None):
303 super(localpeer, self).__init__()
303 super(localpeer, self).__init__()
304
304
305 if caps is None:
305 if caps is None:
306 caps = moderncaps.copy()
306 caps = moderncaps.copy()
307 self._repo = repo.filtered(b'served')
307 self._repo = repo.filtered(b'served')
308 self.ui = repo.ui
308 self.ui = repo.ui
309
309
310 if repo._wanted_sidedata:
310 if repo._wanted_sidedata:
311 formatted = bundle2.format_remote_wanted_sidedata(repo)
311 formatted = bundle2.format_remote_wanted_sidedata(repo)
312 caps.add(b'exp-wanted-sidedata=' + formatted)
312 caps.add(b'exp-wanted-sidedata=' + formatted)
313
313
314 self._caps = repo._restrictcapabilities(caps)
314 self._caps = repo._restrictcapabilities(caps)
315
315
316 # Begin of _basepeer interface.
316 # Begin of _basepeer interface.
317
317
318 def url(self):
318 def url(self):
319 return self._repo.url()
319 return self._repo.url()
320
320
321 def local(self):
321 def local(self):
322 return self._repo
322 return self._repo
323
323
324 def peer(self):
324 def peer(self):
325 return self
325 return self
326
326
327 def canpush(self):
327 def canpush(self):
328 return True
328 return True
329
329
330 def close(self):
330 def close(self):
331 self._repo.close()
331 self._repo.close()
332
332
333 # End of _basepeer interface.
333 # End of _basepeer interface.
334
334
335 # Begin of _basewirecommands interface.
335 # Begin of _basewirecommands interface.
336
336
337 def branchmap(self):
337 def branchmap(self):
338 return self._repo.branchmap()
338 return self._repo.branchmap()
339
339
340 def capabilities(self):
340 def capabilities(self):
341 return self._caps
341 return self._caps
342
342
343 def clonebundles(self):
343 def clonebundles(self):
344 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
344 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
345
345
346 def debugwireargs(self, one, two, three=None, four=None, five=None):
346 def debugwireargs(self, one, two, three=None, four=None, five=None):
347 """Used to test argument passing over the wire"""
347 """Used to test argument passing over the wire"""
348 return b"%s %s %s %s %s" % (
348 return b"%s %s %s %s %s" % (
349 one,
349 one,
350 two,
350 two,
351 pycompat.bytestr(three),
351 pycompat.bytestr(three),
352 pycompat.bytestr(four),
352 pycompat.bytestr(four),
353 pycompat.bytestr(five),
353 pycompat.bytestr(five),
354 )
354 )
355
355
356 def getbundle(
356 def getbundle(
357 self,
357 self,
358 source,
358 source,
359 heads=None,
359 heads=None,
360 common=None,
360 common=None,
361 bundlecaps=None,
361 bundlecaps=None,
362 remote_sidedata=None,
362 remote_sidedata=None,
363 **kwargs
363 **kwargs
364 ):
364 ):
365 chunks = exchange.getbundlechunks(
365 chunks = exchange.getbundlechunks(
366 self._repo,
366 self._repo,
367 source,
367 source,
368 heads=heads,
368 heads=heads,
369 common=common,
369 common=common,
370 bundlecaps=bundlecaps,
370 bundlecaps=bundlecaps,
371 remote_sidedata=remote_sidedata,
371 remote_sidedata=remote_sidedata,
372 **kwargs
372 **kwargs
373 )[1]
373 )[1]
374 cb = util.chunkbuffer(chunks)
374 cb = util.chunkbuffer(chunks)
375
375
376 if exchange.bundle2requested(bundlecaps):
376 if exchange.bundle2requested(bundlecaps):
377 # When requesting a bundle2, getbundle returns a stream to make the
377 # When requesting a bundle2, getbundle returns a stream to make the
378 # wire level function happier. We need to build a proper object
378 # wire level function happier. We need to build a proper object
379 # from it in local peer.
379 # from it in local peer.
380 return bundle2.getunbundler(self.ui, cb)
380 return bundle2.getunbundler(self.ui, cb)
381 else:
381 else:
382 return changegroup.getunbundler(b'01', cb, None)
382 return changegroup.getunbundler(b'01', cb, None)
383
383
384 def heads(self):
384 def heads(self):
385 return self._repo.heads()
385 return self._repo.heads()
386
386
387 def known(self, nodes):
387 def known(self, nodes):
388 return self._repo.known(nodes)
388 return self._repo.known(nodes)
389
389
390 def listkeys(self, namespace):
390 def listkeys(self, namespace):
391 return self._repo.listkeys(namespace)
391 return self._repo.listkeys(namespace)
392
392
393 def lookup(self, key):
393 def lookup(self, key):
394 return self._repo.lookup(key)
394 return self._repo.lookup(key)
395
395
396 def pushkey(self, namespace, key, old, new):
396 def pushkey(self, namespace, key, old, new):
397 return self._repo.pushkey(namespace, key, old, new)
397 return self._repo.pushkey(namespace, key, old, new)
398
398
399 def stream_out(self):
399 def stream_out(self):
400 raise error.Abort(_(b'cannot perform stream clone against local peer'))
400 raise error.Abort(_(b'cannot perform stream clone against local peer'))
401
401
402 def unbundle(self, bundle, heads, url):
402 def unbundle(self, bundle, heads, url):
403 """apply a bundle on a repo
403 """apply a bundle on a repo
404
404
405 This function handles the repo locking itself."""
405 This function handles the repo locking itself."""
406 try:
406 try:
407 try:
407 try:
408 bundle = exchange.readbundle(self.ui, bundle, None)
408 bundle = exchange.readbundle(self.ui, bundle, None)
409 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
409 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
410 if util.safehasattr(ret, b'getchunks'):
410 if util.safehasattr(ret, b'getchunks'):
411 # This is a bundle20 object, turn it into an unbundler.
411 # This is a bundle20 object, turn it into an unbundler.
412 # This little dance should be dropped eventually when the
412 # This little dance should be dropped eventually when the
413 # API is finally improved.
413 # API is finally improved.
414 stream = util.chunkbuffer(ret.getchunks())
414 stream = util.chunkbuffer(ret.getchunks())
415 ret = bundle2.getunbundler(self.ui, stream)
415 ret = bundle2.getunbundler(self.ui, stream)
416 return ret
416 return ret
417 except Exception as exc:
417 except Exception as exc:
418 # If the exception contains output salvaged from a bundle2
418 # If the exception contains output salvaged from a bundle2
419 # reply, we need to make sure it is printed before continuing
419 # reply, we need to make sure it is printed before continuing
420 # to fail. So we build a bundle2 with such output and consume
420 # to fail. So we build a bundle2 with such output and consume
421 # it directly.
421 # it directly.
422 #
422 #
423 # This is not very elegant but allows a "simple" solution for
423 # This is not very elegant but allows a "simple" solution for
424 # issue4594
424 # issue4594
425 output = getattr(exc, '_bundle2salvagedoutput', ())
425 output = getattr(exc, '_bundle2salvagedoutput', ())
426 if output:
426 if output:
427 bundler = bundle2.bundle20(self._repo.ui)
427 bundler = bundle2.bundle20(self._repo.ui)
428 for out in output:
428 for out in output:
429 bundler.addpart(out)
429 bundler.addpart(out)
430 stream = util.chunkbuffer(bundler.getchunks())
430 stream = util.chunkbuffer(bundler.getchunks())
431 b = bundle2.getunbundler(self.ui, stream)
431 b = bundle2.getunbundler(self.ui, stream)
432 bundle2.processbundle(self._repo, b)
432 bundle2.processbundle(self._repo, b)
433 raise
433 raise
434 except error.PushRaced as exc:
434 except error.PushRaced as exc:
435 raise error.ResponseError(
435 raise error.ResponseError(
436 _(b'push failed:'), stringutil.forcebytestr(exc)
436 _(b'push failed:'), stringutil.forcebytestr(exc)
437 )
437 )
438
438
439 # End of _basewirecommands interface.
439 # End of _basewirecommands interface.
440
440
441 # Begin of peer interface.
441 # Begin of peer interface.
442
442
443 def commandexecutor(self):
443 def commandexecutor(self):
444 return localcommandexecutor(self)
444 return localcommandexecutor(self)
445
445
446 # End of peer interface.
446 # End of peer interface.
447
447
448
448
449 @interfaceutil.implementer(repository.ipeerlegacycommands)
449 @interfaceutil.implementer(repository.ipeerlegacycommands)
450 class locallegacypeer(localpeer):
450 class locallegacypeer(localpeer):
451 """peer extension which implements legacy methods too; used for tests with
451 """peer extension which implements legacy methods too; used for tests with
452 restricted capabilities"""
452 restricted capabilities"""
453
453
454 def __init__(self, repo):
454 def __init__(self, repo):
455 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
455 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
456
456
457 # Begin of baselegacywirecommands interface.
457 # Begin of baselegacywirecommands interface.
458
458
459 def between(self, pairs):
459 def between(self, pairs):
460 return self._repo.between(pairs)
460 return self._repo.between(pairs)
461
461
462 def branches(self, nodes):
462 def branches(self, nodes):
463 return self._repo.branches(nodes)
463 return self._repo.branches(nodes)
464
464
465 def changegroup(self, nodes, source):
465 def changegroup(self, nodes, source):
466 outgoing = discovery.outgoing(
466 outgoing = discovery.outgoing(
467 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
467 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
468 )
468 )
469 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
469 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
470
470
471 def changegroupsubset(self, bases, heads, source):
471 def changegroupsubset(self, bases, heads, source):
472 outgoing = discovery.outgoing(
472 outgoing = discovery.outgoing(
473 self._repo, missingroots=bases, ancestorsof=heads
473 self._repo, missingroots=bases, ancestorsof=heads
474 )
474 )
475 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
475 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
476
476
477 # End of baselegacywirecommands interface.
477 # End of baselegacywirecommands interface.
478
478
479
479
480 # Functions receiving (ui, features) that extensions can register to impact
480 # Functions receiving (ui, features) that extensions can register to impact
481 # the ability to load repositories with custom requirements. Only
481 # the ability to load repositories with custom requirements. Only
482 # functions defined in loaded extensions are called.
482 # functions defined in loaded extensions are called.
483 #
483 #
484 # The function receives a set of requirement strings that the repository
484 # The function receives a set of requirement strings that the repository
485 # is capable of opening. Functions will typically add elements to the
485 # is capable of opening. Functions will typically add elements to the
486 # set to reflect that the extension knows how to handle that requirements.
486 # set to reflect that the extension knows how to handle that requirements.
487 featuresetupfuncs = set()
487 featuresetupfuncs = set()
488
488
489
489
490 def _getsharedvfs(hgvfs, requirements):
490 def _getsharedvfs(hgvfs, requirements):
491 """returns the vfs object pointing to root of shared source
491 """returns the vfs object pointing to root of shared source
492 repo for a shared repository
492 repo for a shared repository
493
493
494 hgvfs is vfs pointing at .hg/ of current repo (shared one)
494 hgvfs is vfs pointing at .hg/ of current repo (shared one)
495 requirements is a set of requirements of current repo (shared one)
495 requirements is a set of requirements of current repo (shared one)
496 """
496 """
497 # The ``shared`` or ``relshared`` requirements indicate the
497 # The ``shared`` or ``relshared`` requirements indicate the
498 # store lives in the path contained in the ``.hg/sharedpath`` file.
498 # store lives in the path contained in the ``.hg/sharedpath`` file.
499 # This is an absolute path for ``shared`` and relative to
499 # This is an absolute path for ``shared`` and relative to
500 # ``.hg/`` for ``relshared``.
500 # ``.hg/`` for ``relshared``.
501 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
501 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
502 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
502 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
503 sharedpath = util.normpath(hgvfs.join(sharedpath))
503 sharedpath = util.normpath(hgvfs.join(sharedpath))
504
504
505 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
505 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
506
506
507 if not sharedvfs.exists():
507 if not sharedvfs.exists():
508 raise error.RepoError(
508 raise error.RepoError(
509 _(b'.hg/sharedpath points to nonexistent directory %s')
509 _(b'.hg/sharedpath points to nonexistent directory %s')
510 % sharedvfs.base
510 % sharedvfs.base
511 )
511 )
512 return sharedvfs
512 return sharedvfs
513
513
514
514
515 def _readrequires(vfs, allowmissing):
515 def _readrequires(vfs, allowmissing):
516 """reads the require file present at root of this vfs
516 """reads the require file present at root of this vfs
517 and return a set of requirements
517 and return a set of requirements
518
518
519 If allowmissing is True, we suppress FileNotFoundError if raised"""
519 If allowmissing is True, we suppress FileNotFoundError if raised"""
520 # requires file contains a newline-delimited list of
520 # requires file contains a newline-delimited list of
521 # features/capabilities the opener (us) must have in order to use
521 # features/capabilities the opener (us) must have in order to use
522 # the repository. This file was introduced in Mercurial 0.9.2,
522 # the repository. This file was introduced in Mercurial 0.9.2,
523 # which means very old repositories may not have one. We assume
523 # which means very old repositories may not have one. We assume
524 # a missing file translates to no requirements.
524 # a missing file translates to no requirements.
525 try:
525 try:
526 requirements = set(vfs.read(b'requires').splitlines())
526 return set(vfs.read(b'requires').splitlines())
527 except FileNotFoundError:
527 except FileNotFoundError:
528 if not allowmissing:
528 if not allowmissing:
529 raise
529 raise
530 requirements = set()
530 return set()
531 return requirements
532
531
533
532
534 def makelocalrepository(baseui, path, intents=None):
533 def makelocalrepository(baseui, path, intents=None):
535 """Create a local repository object.
534 """Create a local repository object.
536
535
537 Given arguments needed to construct a local repository, this function
536 Given arguments needed to construct a local repository, this function
538 performs various early repository loading functionality (such as
537 performs various early repository loading functionality (such as
539 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
538 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
540 the repository can be opened, derives a type suitable for representing
539 the repository can be opened, derives a type suitable for representing
541 that repository, and returns an instance of it.
540 that repository, and returns an instance of it.
542
541
543 The returned object conforms to the ``repository.completelocalrepository``
542 The returned object conforms to the ``repository.completelocalrepository``
544 interface.
543 interface.
545
544
546 The repository type is derived by calling a series of factory functions
545 The repository type is derived by calling a series of factory functions
547 for each aspect/interface of the final repository. These are defined by
546 for each aspect/interface of the final repository. These are defined by
548 ``REPO_INTERFACES``.
547 ``REPO_INTERFACES``.
549
548
550 Each factory function is called to produce a type implementing a specific
549 Each factory function is called to produce a type implementing a specific
551 interface. The cumulative list of returned types will be combined into a
550 interface. The cumulative list of returned types will be combined into a
552 new type and that type will be instantiated to represent the local
551 new type and that type will be instantiated to represent the local
553 repository.
552 repository.
554
553
555 The factory functions each receive various state that may be consulted
554 The factory functions each receive various state that may be consulted
556 as part of deriving a type.
555 as part of deriving a type.
557
556
558 Extensions should wrap these factory functions to customize repository type
557 Extensions should wrap these factory functions to customize repository type
559 creation. Note that an extension's wrapped function may be called even if
558 creation. Note that an extension's wrapped function may be called even if
560 that extension is not loaded for the repo being constructed. Extensions
559 that extension is not loaded for the repo being constructed. Extensions
561 should check if their ``__name__`` appears in the
560 should check if their ``__name__`` appears in the
562 ``extensionmodulenames`` set passed to the factory function and no-op if
561 ``extensionmodulenames`` set passed to the factory function and no-op if
563 not.
562 not.
564 """
563 """
565 ui = baseui.copy()
564 ui = baseui.copy()
566 # Prevent copying repo configuration.
565 # Prevent copying repo configuration.
567 ui.copy = baseui.copy
566 ui.copy = baseui.copy
568
567
569 # Working directory VFS rooted at repository root.
568 # Working directory VFS rooted at repository root.
570 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
569 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
571
570
572 # Main VFS for .hg/ directory.
571 # Main VFS for .hg/ directory.
573 hgpath = wdirvfs.join(b'.hg')
572 hgpath = wdirvfs.join(b'.hg')
574 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
573 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
575 # Whether this repository is shared one or not
574 # Whether this repository is shared one or not
576 shared = False
575 shared = False
577 # If this repository is shared, vfs pointing to shared repo
576 # If this repository is shared, vfs pointing to shared repo
578 sharedvfs = None
577 sharedvfs = None
579
578
580 # The .hg/ path should exist and should be a directory. All other
579 # The .hg/ path should exist and should be a directory. All other
581 # cases are errors.
580 # cases are errors.
582 if not hgvfs.isdir():
581 if not hgvfs.isdir():
583 try:
582 try:
584 hgvfs.stat()
583 hgvfs.stat()
585 except FileNotFoundError:
584 except FileNotFoundError:
586 pass
585 pass
587 except ValueError as e:
586 except ValueError as e:
588 # Can be raised on Python 3.8 when path is invalid.
587 # Can be raised on Python 3.8 when path is invalid.
589 raise error.Abort(
588 raise error.Abort(
590 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
589 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
591 )
590 )
592
591
593 raise error.RepoError(_(b'repository %s not found') % path)
592 raise error.RepoError(_(b'repository %s not found') % path)
594
593
595 requirements = _readrequires(hgvfs, True)
594 requirements = _readrequires(hgvfs, True)
596 shared = (
595 shared = (
597 requirementsmod.SHARED_REQUIREMENT in requirements
596 requirementsmod.SHARED_REQUIREMENT in requirements
598 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
597 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
599 )
598 )
600 storevfs = None
599 storevfs = None
601 if shared:
600 if shared:
602 # This is a shared repo
601 # This is a shared repo
603 sharedvfs = _getsharedvfs(hgvfs, requirements)
602 sharedvfs = _getsharedvfs(hgvfs, requirements)
604 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
603 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
605 else:
604 else:
606 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
605 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
607
606
608 # if .hg/requires contains the sharesafe requirement, it means
607 # if .hg/requires contains the sharesafe requirement, it means
609 # there exists a `.hg/store/requires` too and we should read it
608 # there exists a `.hg/store/requires` too and we should read it
610 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
609 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
611 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
610 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
612 # is not present, refer checkrequirementscompat() for that
611 # is not present, refer checkrequirementscompat() for that
613 #
612 #
614 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
613 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
615 # repository was shared the old way. We check the share source .hg/requires
614 # repository was shared the old way. We check the share source .hg/requires
616 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
615 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
617 # to be reshared
616 # to be reshared
618 hint = _(b"see `hg help config.format.use-share-safe` for more information")
617 hint = _(b"see `hg help config.format.use-share-safe` for more information")
619 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
618 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
620
619
621 if (
620 if (
622 shared
621 shared
623 and requirementsmod.SHARESAFE_REQUIREMENT
622 and requirementsmod.SHARESAFE_REQUIREMENT
624 not in _readrequires(sharedvfs, True)
623 not in _readrequires(sharedvfs, True)
625 ):
624 ):
626 mismatch_warn = ui.configbool(
625 mismatch_warn = ui.configbool(
627 b'share', b'safe-mismatch.source-not-safe.warn'
626 b'share', b'safe-mismatch.source-not-safe.warn'
628 )
627 )
629 mismatch_config = ui.config(
628 mismatch_config = ui.config(
630 b'share', b'safe-mismatch.source-not-safe'
629 b'share', b'safe-mismatch.source-not-safe'
631 )
630 )
632 if mismatch_config in (
631 if mismatch_config in (
633 b'downgrade-allow',
632 b'downgrade-allow',
634 b'allow',
633 b'allow',
635 b'downgrade-abort',
634 b'downgrade-abort',
636 ):
635 ):
637 # prevent cyclic import localrepo -> upgrade -> localrepo
636 # prevent cyclic import localrepo -> upgrade -> localrepo
638 from . import upgrade
637 from . import upgrade
639
638
640 upgrade.downgrade_share_to_non_safe(
639 upgrade.downgrade_share_to_non_safe(
641 ui,
640 ui,
642 hgvfs,
641 hgvfs,
643 sharedvfs,
642 sharedvfs,
644 requirements,
643 requirements,
645 mismatch_config,
644 mismatch_config,
646 mismatch_warn,
645 mismatch_warn,
647 )
646 )
648 elif mismatch_config == b'abort':
647 elif mismatch_config == b'abort':
649 raise error.Abort(
648 raise error.Abort(
650 _(b"share source does not support share-safe requirement"),
649 _(b"share source does not support share-safe requirement"),
651 hint=hint,
650 hint=hint,
652 )
651 )
653 else:
652 else:
654 raise error.Abort(
653 raise error.Abort(
655 _(
654 _(
656 b"share-safe mismatch with source.\nUnrecognized"
655 b"share-safe mismatch with source.\nUnrecognized"
657 b" value '%s' of `share.safe-mismatch.source-not-safe`"
656 b" value '%s' of `share.safe-mismatch.source-not-safe`"
658 b" set."
657 b" set."
659 )
658 )
660 % mismatch_config,
659 % mismatch_config,
661 hint=hint,
660 hint=hint,
662 )
661 )
663 else:
662 else:
664 requirements |= _readrequires(storevfs, False)
663 requirements |= _readrequires(storevfs, False)
665 elif shared:
664 elif shared:
666 sourcerequires = _readrequires(sharedvfs, False)
665 sourcerequires = _readrequires(sharedvfs, False)
667 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
666 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
668 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
667 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
669 mismatch_warn = ui.configbool(
668 mismatch_warn = ui.configbool(
670 b'share', b'safe-mismatch.source-safe.warn'
669 b'share', b'safe-mismatch.source-safe.warn'
671 )
670 )
672 if mismatch_config in (
671 if mismatch_config in (
673 b'upgrade-allow',
672 b'upgrade-allow',
674 b'allow',
673 b'allow',
675 b'upgrade-abort',
674 b'upgrade-abort',
676 ):
675 ):
677 # prevent cyclic import localrepo -> upgrade -> localrepo
676 # prevent cyclic import localrepo -> upgrade -> localrepo
678 from . import upgrade
677 from . import upgrade
679
678
680 upgrade.upgrade_share_to_safe(
679 upgrade.upgrade_share_to_safe(
681 ui,
680 ui,
682 hgvfs,
681 hgvfs,
683 storevfs,
682 storevfs,
684 requirements,
683 requirements,
685 mismatch_config,
684 mismatch_config,
686 mismatch_warn,
685 mismatch_warn,
687 )
686 )
688 elif mismatch_config == b'abort':
687 elif mismatch_config == b'abort':
689 raise error.Abort(
688 raise error.Abort(
690 _(
689 _(
691 b'version mismatch: source uses share-safe'
690 b'version mismatch: source uses share-safe'
692 b' functionality while the current share does not'
691 b' functionality while the current share does not'
693 ),
692 ),
694 hint=hint,
693 hint=hint,
695 )
694 )
696 else:
695 else:
697 raise error.Abort(
696 raise error.Abort(
698 _(
697 _(
699 b"share-safe mismatch with source.\nUnrecognized"
698 b"share-safe mismatch with source.\nUnrecognized"
700 b" value '%s' of `share.safe-mismatch.source-safe` set."
699 b" value '%s' of `share.safe-mismatch.source-safe` set."
701 )
700 )
702 % mismatch_config,
701 % mismatch_config,
703 hint=hint,
702 hint=hint,
704 )
703 )
705
704
706 # The .hg/hgrc file may load extensions or contain config options
705 # The .hg/hgrc file may load extensions or contain config options
707 # that influence repository construction. Attempt to load it and
706 # that influence repository construction. Attempt to load it and
708 # process any new extensions that it may have pulled in.
707 # process any new extensions that it may have pulled in.
709 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
708 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
710 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
709 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
711 extensions.loadall(ui)
710 extensions.loadall(ui)
712 extensions.populateui(ui)
711 extensions.populateui(ui)
713
712
714 # Set of module names of extensions loaded for this repository.
713 # Set of module names of extensions loaded for this repository.
715 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
714 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
716
715
717 supportedrequirements = gathersupportedrequirements(ui)
716 supportedrequirements = gathersupportedrequirements(ui)
718
717
719 # We first validate the requirements are known.
718 # We first validate the requirements are known.
720 ensurerequirementsrecognized(requirements, supportedrequirements)
719 ensurerequirementsrecognized(requirements, supportedrequirements)
721
720
722 # Then we validate that the known set is reasonable to use together.
721 # Then we validate that the known set is reasonable to use together.
723 ensurerequirementscompatible(ui, requirements)
722 ensurerequirementscompatible(ui, requirements)
724
723
725 # TODO there are unhandled edge cases related to opening repositories with
724 # TODO there are unhandled edge cases related to opening repositories with
726 # shared storage. If storage is shared, we should also test for requirements
725 # shared storage. If storage is shared, we should also test for requirements
727 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
726 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
728 # that repo, as that repo may load extensions needed to open it. This is a
727 # that repo, as that repo may load extensions needed to open it. This is a
729 # bit complicated because we don't want the other hgrc to overwrite settings
728 # bit complicated because we don't want the other hgrc to overwrite settings
730 # in this hgrc.
729 # in this hgrc.
731 #
730 #
732 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
731 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
733 # file when sharing repos. But if a requirement is added after the share is
732 # file when sharing repos. But if a requirement is added after the share is
734 # performed, thereby introducing a new requirement for the opener, we may
733 # performed, thereby introducing a new requirement for the opener, we may
735 # will not see that and could encounter a run-time error interacting with
734 # will not see that and could encounter a run-time error interacting with
736 # that shared store since it has an unknown-to-us requirement.
735 # that shared store since it has an unknown-to-us requirement.
737
736
738 # At this point, we know we should be capable of opening the repository.
737 # At this point, we know we should be capable of opening the repository.
739 # Now get on with doing that.
738 # Now get on with doing that.
740
739
741 features = set()
740 features = set()
742
741
743 # The "store" part of the repository holds versioned data. How it is
742 # The "store" part of the repository holds versioned data. How it is
744 # accessed is determined by various requirements. If `shared` or
743 # accessed is determined by various requirements. If `shared` or
745 # `relshared` requirements are present, this indicates current repository
744 # `relshared` requirements are present, this indicates current repository
746 # is a share and store exists in path mentioned in `.hg/sharedpath`
745 # is a share and store exists in path mentioned in `.hg/sharedpath`
747 if shared:
746 if shared:
748 storebasepath = sharedvfs.base
747 storebasepath = sharedvfs.base
749 cachepath = sharedvfs.join(b'cache')
748 cachepath = sharedvfs.join(b'cache')
750 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
749 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
751 else:
750 else:
752 storebasepath = hgvfs.base
751 storebasepath = hgvfs.base
753 cachepath = hgvfs.join(b'cache')
752 cachepath = hgvfs.join(b'cache')
754 wcachepath = hgvfs.join(b'wcache')
753 wcachepath = hgvfs.join(b'wcache')
755
754
756 # The store has changed over time and the exact layout is dictated by
755 # The store has changed over time and the exact layout is dictated by
757 # requirements. The store interface abstracts differences across all
756 # requirements. The store interface abstracts differences across all
758 # of them.
757 # of them.
759 store = makestore(
758 store = makestore(
760 requirements,
759 requirements,
761 storebasepath,
760 storebasepath,
762 lambda base: vfsmod.vfs(base, cacheaudited=True),
761 lambda base: vfsmod.vfs(base, cacheaudited=True),
763 )
762 )
764 hgvfs.createmode = store.createmode
763 hgvfs.createmode = store.createmode
765
764
766 storevfs = store.vfs
765 storevfs = store.vfs
767 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
766 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
768
767
769 if (
768 if (
770 requirementsmod.REVLOGV2_REQUIREMENT in requirements
769 requirementsmod.REVLOGV2_REQUIREMENT in requirements
771 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
770 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
772 ):
771 ):
773 features.add(repository.REPO_FEATURE_SIDE_DATA)
772 features.add(repository.REPO_FEATURE_SIDE_DATA)
774 # the revlogv2 docket introduced race condition that we need to fix
773 # the revlogv2 docket introduced race condition that we need to fix
775 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
774 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
776
775
777 # The cache vfs is used to manage cache files.
776 # The cache vfs is used to manage cache files.
778 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
777 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
779 cachevfs.createmode = store.createmode
778 cachevfs.createmode = store.createmode
780 # The cache vfs is used to manage cache files related to the working copy
779 # The cache vfs is used to manage cache files related to the working copy
781 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
780 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
782 wcachevfs.createmode = store.createmode
781 wcachevfs.createmode = store.createmode
783
782
784 # Now resolve the type for the repository object. We do this by repeatedly
783 # Now resolve the type for the repository object. We do this by repeatedly
785 # calling a factory function to produces types for specific aspects of the
784 # calling a factory function to produces types for specific aspects of the
786 # repo's operation. The aggregate returned types are used as base classes
785 # repo's operation. The aggregate returned types are used as base classes
787 # for a dynamically-derived type, which will represent our new repository.
786 # for a dynamically-derived type, which will represent our new repository.
788
787
789 bases = []
788 bases = []
790 extrastate = {}
789 extrastate = {}
791
790
792 for iface, fn in REPO_INTERFACES:
791 for iface, fn in REPO_INTERFACES:
793 # We pass all potentially useful state to give extensions tons of
792 # We pass all potentially useful state to give extensions tons of
794 # flexibility.
793 # flexibility.
795 typ = fn()(
794 typ = fn()(
796 ui=ui,
795 ui=ui,
797 intents=intents,
796 intents=intents,
798 requirements=requirements,
797 requirements=requirements,
799 features=features,
798 features=features,
800 wdirvfs=wdirvfs,
799 wdirvfs=wdirvfs,
801 hgvfs=hgvfs,
800 hgvfs=hgvfs,
802 store=store,
801 store=store,
803 storevfs=storevfs,
802 storevfs=storevfs,
804 storeoptions=storevfs.options,
803 storeoptions=storevfs.options,
805 cachevfs=cachevfs,
804 cachevfs=cachevfs,
806 wcachevfs=wcachevfs,
805 wcachevfs=wcachevfs,
807 extensionmodulenames=extensionmodulenames,
806 extensionmodulenames=extensionmodulenames,
808 extrastate=extrastate,
807 extrastate=extrastate,
809 baseclasses=bases,
808 baseclasses=bases,
810 )
809 )
811
810
812 if not isinstance(typ, type):
811 if not isinstance(typ, type):
813 raise error.ProgrammingError(
812 raise error.ProgrammingError(
814 b'unable to construct type for %s' % iface
813 b'unable to construct type for %s' % iface
815 )
814 )
816
815
817 bases.append(typ)
816 bases.append(typ)
818
817
819 # type() allows you to use characters in type names that wouldn't be
818 # type() allows you to use characters in type names that wouldn't be
820 # recognized as Python symbols in source code. We abuse that to add
819 # recognized as Python symbols in source code. We abuse that to add
821 # rich information about our constructed repo.
820 # rich information about our constructed repo.
822 name = pycompat.sysstr(
821 name = pycompat.sysstr(
823 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
822 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
824 )
823 )
825
824
826 cls = type(name, tuple(bases), {})
825 cls = type(name, tuple(bases), {})
827
826
828 return cls(
827 return cls(
829 baseui=baseui,
828 baseui=baseui,
830 ui=ui,
829 ui=ui,
831 origroot=path,
830 origroot=path,
832 wdirvfs=wdirvfs,
831 wdirvfs=wdirvfs,
833 hgvfs=hgvfs,
832 hgvfs=hgvfs,
834 requirements=requirements,
833 requirements=requirements,
835 supportedrequirements=supportedrequirements,
834 supportedrequirements=supportedrequirements,
836 sharedpath=storebasepath,
835 sharedpath=storebasepath,
837 store=store,
836 store=store,
838 cachevfs=cachevfs,
837 cachevfs=cachevfs,
839 wcachevfs=wcachevfs,
838 wcachevfs=wcachevfs,
840 features=features,
839 features=features,
841 intents=intents,
840 intents=intents,
842 )
841 )
843
842
844
843
845 def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None):
844 def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None):
846 """Load hgrc files/content into a ui instance.
845 """Load hgrc files/content into a ui instance.
847
846
848 This is called during repository opening to load any additional
847 This is called during repository opening to load any additional
849 config files or settings relevant to the current repository.
848 config files or settings relevant to the current repository.
850
849
851 Returns a bool indicating whether any additional configs were loaded.
850 Returns a bool indicating whether any additional configs were loaded.
852
851
853 Extensions should monkeypatch this function to modify how per-repo
852 Extensions should monkeypatch this function to modify how per-repo
854 configs are loaded. For example, an extension may wish to pull in
853 configs are loaded. For example, an extension may wish to pull in
855 configs from alternate files or sources.
854 configs from alternate files or sources.
856
855
857 sharedvfs is vfs object pointing to source repo if the current one is a
856 sharedvfs is vfs object pointing to source repo if the current one is a
858 shared one
857 shared one
859 """
858 """
860 if not rcutil.use_repo_hgrc():
859 if not rcutil.use_repo_hgrc():
861 return False
860 return False
862
861
863 ret = False
862 ret = False
864 # first load config from shared source if we has to
863 # first load config from shared source if we has to
865 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
864 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
866 try:
865 try:
867 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
866 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
868 ret = True
867 ret = True
869 except IOError:
868 except IOError:
870 pass
869 pass
871
870
872 try:
871 try:
873 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
872 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
874 ret = True
873 ret = True
875 except IOError:
874 except IOError:
876 pass
875 pass
877
876
878 try:
877 try:
879 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
878 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
880 ret = True
879 ret = True
881 except IOError:
880 except IOError:
882 pass
881 pass
883
882
884 return ret
883 return ret
885
884
886
885
887 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
886 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
888 """Perform additional actions after .hg/hgrc is loaded.
887 """Perform additional actions after .hg/hgrc is loaded.
889
888
890 This function is called during repository loading immediately after
889 This function is called during repository loading immediately after
891 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
890 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
892
891
893 The function can be used to validate configs, automatically add
892 The function can be used to validate configs, automatically add
894 options (including extensions) based on requirements, etc.
893 options (including extensions) based on requirements, etc.
895 """
894 """
896
895
897 # Map of requirements to list of extensions to load automatically when
896 # Map of requirements to list of extensions to load automatically when
898 # requirement is present.
897 # requirement is present.
899 autoextensions = {
898 autoextensions = {
900 b'git': [b'git'],
899 b'git': [b'git'],
901 b'largefiles': [b'largefiles'],
900 b'largefiles': [b'largefiles'],
902 b'lfs': [b'lfs'],
901 b'lfs': [b'lfs'],
903 }
902 }
904
903
905 for requirement, names in sorted(autoextensions.items()):
904 for requirement, names in sorted(autoextensions.items()):
906 if requirement not in requirements:
905 if requirement not in requirements:
907 continue
906 continue
908
907
909 for name in names:
908 for name in names:
910 if not ui.hasconfig(b'extensions', name):
909 if not ui.hasconfig(b'extensions', name):
911 ui.setconfig(b'extensions', name, b'', source=b'autoload')
910 ui.setconfig(b'extensions', name, b'', source=b'autoload')
912
911
913
912
914 def gathersupportedrequirements(ui):
913 def gathersupportedrequirements(ui):
915 """Determine the complete set of recognized requirements."""
914 """Determine the complete set of recognized requirements."""
916 # Start with all requirements supported by this file.
915 # Start with all requirements supported by this file.
917 supported = set(localrepository._basesupported)
916 supported = set(localrepository._basesupported)
918
917
919 # Execute ``featuresetupfuncs`` entries if they belong to an extension
918 # Execute ``featuresetupfuncs`` entries if they belong to an extension
920 # relevant to this ui instance.
919 # relevant to this ui instance.
921 modules = {m.__name__ for n, m in extensions.extensions(ui)}
920 modules = {m.__name__ for n, m in extensions.extensions(ui)}
922
921
923 for fn in featuresetupfuncs:
922 for fn in featuresetupfuncs:
924 if fn.__module__ in modules:
923 if fn.__module__ in modules:
925 fn(ui, supported)
924 fn(ui, supported)
926
925
927 # Add derived requirements from registered compression engines.
926 # Add derived requirements from registered compression engines.
928 for name in util.compengines:
927 for name in util.compengines:
929 engine = util.compengines[name]
928 engine = util.compengines[name]
930 if engine.available() and engine.revlogheader():
929 if engine.available() and engine.revlogheader():
931 supported.add(b'exp-compression-%s' % name)
930 supported.add(b'exp-compression-%s' % name)
932 if engine.name() == b'zstd':
931 if engine.name() == b'zstd':
933 supported.add(requirementsmod.REVLOG_COMPRESSION_ZSTD)
932 supported.add(requirementsmod.REVLOG_COMPRESSION_ZSTD)
934
933
935 return supported
934 return supported
936
935
937
936
938 def ensurerequirementsrecognized(requirements, supported):
937 def ensurerequirementsrecognized(requirements, supported):
939 """Validate that a set of local requirements is recognized.
938 """Validate that a set of local requirements is recognized.
940
939
941 Receives a set of requirements. Raises an ``error.RepoError`` if there
940 Receives a set of requirements. Raises an ``error.RepoError`` if there
942 exists any requirement in that set that currently loaded code doesn't
941 exists any requirement in that set that currently loaded code doesn't
943 recognize.
942 recognize.
944
943
945 Returns a set of supported requirements.
944 Returns a set of supported requirements.
946 """
945 """
947 missing = set()
946 missing = set()
948
947
949 for requirement in requirements:
948 for requirement in requirements:
950 if requirement in supported:
949 if requirement in supported:
951 continue
950 continue
952
951
953 if not requirement or not requirement[0:1].isalnum():
952 if not requirement or not requirement[0:1].isalnum():
954 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
953 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
955
954
956 missing.add(requirement)
955 missing.add(requirement)
957
956
958 if missing:
957 if missing:
959 raise error.RequirementError(
958 raise error.RequirementError(
960 _(b'repository requires features unknown to this Mercurial: %s')
959 _(b'repository requires features unknown to this Mercurial: %s')
961 % b' '.join(sorted(missing)),
960 % b' '.join(sorted(missing)),
962 hint=_(
961 hint=_(
963 b'see https://mercurial-scm.org/wiki/MissingRequirement '
962 b'see https://mercurial-scm.org/wiki/MissingRequirement '
964 b'for more information'
963 b'for more information'
965 ),
964 ),
966 )
965 )
967
966
968
967
969 def ensurerequirementscompatible(ui, requirements):
968 def ensurerequirementscompatible(ui, requirements):
970 """Validates that a set of recognized requirements is mutually compatible.
969 """Validates that a set of recognized requirements is mutually compatible.
971
970
972 Some requirements may not be compatible with others or require
971 Some requirements may not be compatible with others or require
973 config options that aren't enabled. This function is called during
972 config options that aren't enabled. This function is called during
974 repository opening to ensure that the set of requirements needed
973 repository opening to ensure that the set of requirements needed
975 to open a repository is sane and compatible with config options.
974 to open a repository is sane and compatible with config options.
976
975
977 Extensions can monkeypatch this function to perform additional
976 Extensions can monkeypatch this function to perform additional
978 checking.
977 checking.
979
978
980 ``error.RepoError`` should be raised on failure.
979 ``error.RepoError`` should be raised on failure.
981 """
980 """
982 if (
981 if (
983 requirementsmod.SPARSE_REQUIREMENT in requirements
982 requirementsmod.SPARSE_REQUIREMENT in requirements
984 and not sparse.enabled
983 and not sparse.enabled
985 ):
984 ):
986 raise error.RepoError(
985 raise error.RepoError(
987 _(
986 _(
988 b'repository is using sparse feature but '
987 b'repository is using sparse feature but '
989 b'sparse is not enabled; enable the '
988 b'sparse is not enabled; enable the '
990 b'"sparse" extensions to access'
989 b'"sparse" extensions to access'
991 )
990 )
992 )
991 )
993
992
994
993
995 def makestore(requirements, path, vfstype):
994 def makestore(requirements, path, vfstype):
996 """Construct a storage object for a repository."""
995 """Construct a storage object for a repository."""
997 if requirementsmod.STORE_REQUIREMENT in requirements:
996 if requirementsmod.STORE_REQUIREMENT in requirements:
998 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
997 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
999 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
998 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1000 return storemod.fncachestore(path, vfstype, dotencode)
999 return storemod.fncachestore(path, vfstype, dotencode)
1001
1000
1002 return storemod.encodedstore(path, vfstype)
1001 return storemod.encodedstore(path, vfstype)
1003
1002
1004 return storemod.basicstore(path, vfstype)
1003 return storemod.basicstore(path, vfstype)
1005
1004
1006
1005
1007 def resolvestorevfsoptions(ui, requirements, features):
1006 def resolvestorevfsoptions(ui, requirements, features):
1008 """Resolve the options to pass to the store vfs opener.
1007 """Resolve the options to pass to the store vfs opener.
1009
1008
1010 The returned dict is used to influence behavior of the storage layer.
1009 The returned dict is used to influence behavior of the storage layer.
1011 """
1010 """
1012 options = {}
1011 options = {}
1013
1012
1014 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1013 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1015 options[b'treemanifest'] = True
1014 options[b'treemanifest'] = True
1016
1015
1017 # experimental config: format.manifestcachesize
1016 # experimental config: format.manifestcachesize
1018 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1017 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1019 if manifestcachesize is not None:
1018 if manifestcachesize is not None:
1020 options[b'manifestcachesize'] = manifestcachesize
1019 options[b'manifestcachesize'] = manifestcachesize
1021
1020
1022 # In the absence of another requirement superseding a revlog-related
1021 # In the absence of another requirement superseding a revlog-related
1023 # requirement, we have to assume the repo is using revlog version 0.
1022 # requirement, we have to assume the repo is using revlog version 0.
1024 # This revlog format is super old and we don't bother trying to parse
1023 # This revlog format is super old and we don't bother trying to parse
1025 # opener options for it because those options wouldn't do anything
1024 # opener options for it because those options wouldn't do anything
1026 # meaningful on such old repos.
1025 # meaningful on such old repos.
1027 if (
1026 if (
1028 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1027 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1029 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1028 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1030 ):
1029 ):
1031 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1030 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1032 else: # explicitly mark repo as using revlogv0
1031 else: # explicitly mark repo as using revlogv0
1033 options[b'revlogv0'] = True
1032 options[b'revlogv0'] = True
1034
1033
1035 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1034 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1036 options[b'copies-storage'] = b'changeset-sidedata'
1035 options[b'copies-storage'] = b'changeset-sidedata'
1037 else:
1036 else:
1038 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1037 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1039 copiesextramode = (b'changeset-only', b'compatibility')
1038 copiesextramode = (b'changeset-only', b'compatibility')
1040 if writecopiesto in copiesextramode:
1039 if writecopiesto in copiesextramode:
1041 options[b'copies-storage'] = b'extra'
1040 options[b'copies-storage'] = b'extra'
1042
1041
1043 return options
1042 return options
1044
1043
1045
1044
1046 def resolverevlogstorevfsoptions(ui, requirements, features):
1045 def resolverevlogstorevfsoptions(ui, requirements, features):
1047 """Resolve opener options specific to revlogs."""
1046 """Resolve opener options specific to revlogs."""
1048
1047
1049 options = {}
1048 options = {}
1050 options[b'flagprocessors'] = {}
1049 options[b'flagprocessors'] = {}
1051
1050
1052 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1051 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1053 options[b'revlogv1'] = True
1052 options[b'revlogv1'] = True
1054 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1053 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1055 options[b'revlogv2'] = True
1054 options[b'revlogv2'] = True
1056 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1055 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1057 options[b'changelogv2'] = True
1056 options[b'changelogv2'] = True
1058
1057
1059 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1058 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1060 options[b'generaldelta'] = True
1059 options[b'generaldelta'] = True
1061
1060
1062 # experimental config: format.chunkcachesize
1061 # experimental config: format.chunkcachesize
1063 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1062 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1064 if chunkcachesize is not None:
1063 if chunkcachesize is not None:
1065 options[b'chunkcachesize'] = chunkcachesize
1064 options[b'chunkcachesize'] = chunkcachesize
1066
1065
1067 deltabothparents = ui.configbool(
1066 deltabothparents = ui.configbool(
1068 b'storage', b'revlog.optimize-delta-parent-choice'
1067 b'storage', b'revlog.optimize-delta-parent-choice'
1069 )
1068 )
1070 options[b'deltabothparents'] = deltabothparents
1069 options[b'deltabothparents'] = deltabothparents
1071 options[b'debug-delta'] = ui.configbool(b'debug', b'revlog.debug-delta')
1070 options[b'debug-delta'] = ui.configbool(b'debug', b'revlog.debug-delta')
1072
1071
1073 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1072 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1074 options[b'issue6528.fix-incoming'] = issue6528
1073 options[b'issue6528.fix-incoming'] = issue6528
1075
1074
1076 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1075 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1077 lazydeltabase = False
1076 lazydeltabase = False
1078 if lazydelta:
1077 if lazydelta:
1079 lazydeltabase = ui.configbool(
1078 lazydeltabase = ui.configbool(
1080 b'storage', b'revlog.reuse-external-delta-parent'
1079 b'storage', b'revlog.reuse-external-delta-parent'
1081 )
1080 )
1082 if lazydeltabase is None:
1081 if lazydeltabase is None:
1083 lazydeltabase = not scmutil.gddeltaconfig(ui)
1082 lazydeltabase = not scmutil.gddeltaconfig(ui)
1084 options[b'lazydelta'] = lazydelta
1083 options[b'lazydelta'] = lazydelta
1085 options[b'lazydeltabase'] = lazydeltabase
1084 options[b'lazydeltabase'] = lazydeltabase
1086
1085
1087 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1086 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1088 if 0 <= chainspan:
1087 if 0 <= chainspan:
1089 options[b'maxdeltachainspan'] = chainspan
1088 options[b'maxdeltachainspan'] = chainspan
1090
1089
1091 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1090 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1092 if mmapindexthreshold is not None:
1091 if mmapindexthreshold is not None:
1093 options[b'mmapindexthreshold'] = mmapindexthreshold
1092 options[b'mmapindexthreshold'] = mmapindexthreshold
1094
1093
1095 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1094 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1096 srdensitythres = float(
1095 srdensitythres = float(
1097 ui.config(b'experimental', b'sparse-read.density-threshold')
1096 ui.config(b'experimental', b'sparse-read.density-threshold')
1098 )
1097 )
1099 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1098 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1100 options[b'with-sparse-read'] = withsparseread
1099 options[b'with-sparse-read'] = withsparseread
1101 options[b'sparse-read-density-threshold'] = srdensitythres
1100 options[b'sparse-read-density-threshold'] = srdensitythres
1102 options[b'sparse-read-min-gap-size'] = srmingapsize
1101 options[b'sparse-read-min-gap-size'] = srmingapsize
1103
1102
1104 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1103 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1105 options[b'sparse-revlog'] = sparserevlog
1104 options[b'sparse-revlog'] = sparserevlog
1106 if sparserevlog:
1105 if sparserevlog:
1107 options[b'generaldelta'] = True
1106 options[b'generaldelta'] = True
1108
1107
1109 maxchainlen = None
1108 maxchainlen = None
1110 if sparserevlog:
1109 if sparserevlog:
1111 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1110 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1112 # experimental config: format.maxchainlen
1111 # experimental config: format.maxchainlen
1113 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1112 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1114 if maxchainlen is not None:
1113 if maxchainlen is not None:
1115 options[b'maxchainlen'] = maxchainlen
1114 options[b'maxchainlen'] = maxchainlen
1116
1115
1117 for r in requirements:
1116 for r in requirements:
1118 # we allow multiple compression engine requirement to co-exist because
1117 # we allow multiple compression engine requirement to co-exist because
1119 # strickly speaking, revlog seems to support mixed compression style.
1118 # strickly speaking, revlog seems to support mixed compression style.
1120 #
1119 #
1121 # The compression used for new entries will be "the last one"
1120 # The compression used for new entries will be "the last one"
1122 prefix = r.startswith
1121 prefix = r.startswith
1123 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1122 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1124 options[b'compengine'] = r.split(b'-', 2)[2]
1123 options[b'compengine'] = r.split(b'-', 2)[2]
1125
1124
1126 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1125 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1127 if options[b'zlib.level'] is not None:
1126 if options[b'zlib.level'] is not None:
1128 if not (0 <= options[b'zlib.level'] <= 9):
1127 if not (0 <= options[b'zlib.level'] <= 9):
1129 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1128 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1130 raise error.Abort(msg % options[b'zlib.level'])
1129 raise error.Abort(msg % options[b'zlib.level'])
1131 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1130 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1132 if options[b'zstd.level'] is not None:
1131 if options[b'zstd.level'] is not None:
1133 if not (0 <= options[b'zstd.level'] <= 22):
1132 if not (0 <= options[b'zstd.level'] <= 22):
1134 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1133 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1135 raise error.Abort(msg % options[b'zstd.level'])
1134 raise error.Abort(msg % options[b'zstd.level'])
1136
1135
1137 if requirementsmod.NARROW_REQUIREMENT in requirements:
1136 if requirementsmod.NARROW_REQUIREMENT in requirements:
1138 options[b'enableellipsis'] = True
1137 options[b'enableellipsis'] = True
1139
1138
1140 if ui.configbool(b'experimental', b'rust.index'):
1139 if ui.configbool(b'experimental', b'rust.index'):
1141 options[b'rust.index'] = True
1140 options[b'rust.index'] = True
1142 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1141 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1143 slow_path = ui.config(
1142 slow_path = ui.config(
1144 b'storage', b'revlog.persistent-nodemap.slow-path'
1143 b'storage', b'revlog.persistent-nodemap.slow-path'
1145 )
1144 )
1146 if slow_path not in (b'allow', b'warn', b'abort'):
1145 if slow_path not in (b'allow', b'warn', b'abort'):
1147 default = ui.config_default(
1146 default = ui.config_default(
1148 b'storage', b'revlog.persistent-nodemap.slow-path'
1147 b'storage', b'revlog.persistent-nodemap.slow-path'
1149 )
1148 )
1150 msg = _(
1149 msg = _(
1151 b'unknown value for config '
1150 b'unknown value for config '
1152 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1151 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1153 )
1152 )
1154 ui.warn(msg % slow_path)
1153 ui.warn(msg % slow_path)
1155 if not ui.quiet:
1154 if not ui.quiet:
1156 ui.warn(_(b'falling back to default value: %s\n') % default)
1155 ui.warn(_(b'falling back to default value: %s\n') % default)
1157 slow_path = default
1156 slow_path = default
1158
1157
1159 msg = _(
1158 msg = _(
1160 b"accessing `persistent-nodemap` repository without associated "
1159 b"accessing `persistent-nodemap` repository without associated "
1161 b"fast implementation."
1160 b"fast implementation."
1162 )
1161 )
1163 hint = _(
1162 hint = _(
1164 b"check `hg help config.format.use-persistent-nodemap` "
1163 b"check `hg help config.format.use-persistent-nodemap` "
1165 b"for details"
1164 b"for details"
1166 )
1165 )
1167 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1166 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1168 if slow_path == b'warn':
1167 if slow_path == b'warn':
1169 msg = b"warning: " + msg + b'\n'
1168 msg = b"warning: " + msg + b'\n'
1170 ui.warn(msg)
1169 ui.warn(msg)
1171 if not ui.quiet:
1170 if not ui.quiet:
1172 hint = b'(' + hint + b')\n'
1171 hint = b'(' + hint + b')\n'
1173 ui.warn(hint)
1172 ui.warn(hint)
1174 if slow_path == b'abort':
1173 if slow_path == b'abort':
1175 raise error.Abort(msg, hint=hint)
1174 raise error.Abort(msg, hint=hint)
1176 options[b'persistent-nodemap'] = True
1175 options[b'persistent-nodemap'] = True
1177 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1176 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1178 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1177 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1179 if slow_path not in (b'allow', b'warn', b'abort'):
1178 if slow_path not in (b'allow', b'warn', b'abort'):
1180 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1179 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1181 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1180 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1182 ui.warn(msg % slow_path)
1181 ui.warn(msg % slow_path)
1183 if not ui.quiet:
1182 if not ui.quiet:
1184 ui.warn(_(b'falling back to default value: %s\n') % default)
1183 ui.warn(_(b'falling back to default value: %s\n') % default)
1185 slow_path = default
1184 slow_path = default
1186
1185
1187 msg = _(
1186 msg = _(
1188 b"accessing `dirstate-v2` repository without associated "
1187 b"accessing `dirstate-v2` repository without associated "
1189 b"fast implementation."
1188 b"fast implementation."
1190 )
1189 )
1191 hint = _(
1190 hint = _(
1192 b"check `hg help config.format.use-dirstate-v2` " b"for details"
1191 b"check `hg help config.format.use-dirstate-v2` " b"for details"
1193 )
1192 )
1194 if not dirstate.HAS_FAST_DIRSTATE_V2:
1193 if not dirstate.HAS_FAST_DIRSTATE_V2:
1195 if slow_path == b'warn':
1194 if slow_path == b'warn':
1196 msg = b"warning: " + msg + b'\n'
1195 msg = b"warning: " + msg + b'\n'
1197 ui.warn(msg)
1196 ui.warn(msg)
1198 if not ui.quiet:
1197 if not ui.quiet:
1199 hint = b'(' + hint + b')\n'
1198 hint = b'(' + hint + b')\n'
1200 ui.warn(hint)
1199 ui.warn(hint)
1201 if slow_path == b'abort':
1200 if slow_path == b'abort':
1202 raise error.Abort(msg, hint=hint)
1201 raise error.Abort(msg, hint=hint)
1203 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1202 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1204 options[b'persistent-nodemap.mmap'] = True
1203 options[b'persistent-nodemap.mmap'] = True
1205 if ui.configbool(b'devel', b'persistent-nodemap'):
1204 if ui.configbool(b'devel', b'persistent-nodemap'):
1206 options[b'devel-force-nodemap'] = True
1205 options[b'devel-force-nodemap'] = True
1207
1206
1208 return options
1207 return options
1209
1208
1210
1209
1211 def makemain(**kwargs):
1210 def makemain(**kwargs):
1212 """Produce a type conforming to ``ilocalrepositorymain``."""
1211 """Produce a type conforming to ``ilocalrepositorymain``."""
1213 return localrepository
1212 return localrepository
1214
1213
1215
1214
1216 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1215 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1217 class revlogfilestorage:
1216 class revlogfilestorage:
1218 """File storage when using revlogs."""
1217 """File storage when using revlogs."""
1219
1218
1220 def file(self, path):
1219 def file(self, path):
1221 if path.startswith(b'/'):
1220 if path.startswith(b'/'):
1222 path = path[1:]
1221 path = path[1:]
1223
1222
1224 return filelog.filelog(self.svfs, path)
1223 return filelog.filelog(self.svfs, path)
1225
1224
1226
1225
1227 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1226 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1228 class revlognarrowfilestorage:
1227 class revlognarrowfilestorage:
1229 """File storage when using revlogs and narrow files."""
1228 """File storage when using revlogs and narrow files."""
1230
1229
1231 def file(self, path):
1230 def file(self, path):
1232 if path.startswith(b'/'):
1231 if path.startswith(b'/'):
1233 path = path[1:]
1232 path = path[1:]
1234
1233
1235 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1234 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1236
1235
1237
1236
1238 def makefilestorage(requirements, features, **kwargs):
1237 def makefilestorage(requirements, features, **kwargs):
1239 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1238 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1240 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1239 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1241 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1240 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1242
1241
1243 if requirementsmod.NARROW_REQUIREMENT in requirements:
1242 if requirementsmod.NARROW_REQUIREMENT in requirements:
1244 return revlognarrowfilestorage
1243 return revlognarrowfilestorage
1245 else:
1244 else:
1246 return revlogfilestorage
1245 return revlogfilestorage
1247
1246
1248
1247
1249 # List of repository interfaces and factory functions for them. Each
1248 # List of repository interfaces and factory functions for them. Each
1250 # will be called in order during ``makelocalrepository()`` to iteratively
1249 # will be called in order during ``makelocalrepository()`` to iteratively
1251 # derive the final type for a local repository instance. We capture the
1250 # derive the final type for a local repository instance. We capture the
1252 # function as a lambda so we don't hold a reference and the module-level
1251 # function as a lambda so we don't hold a reference and the module-level
1253 # functions can be wrapped.
1252 # functions can be wrapped.
1254 REPO_INTERFACES = [
1253 REPO_INTERFACES = [
1255 (repository.ilocalrepositorymain, lambda: makemain),
1254 (repository.ilocalrepositorymain, lambda: makemain),
1256 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1255 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1257 ]
1256 ]
1258
1257
1259
1258
1260 @interfaceutil.implementer(repository.ilocalrepositorymain)
1259 @interfaceutil.implementer(repository.ilocalrepositorymain)
1261 class localrepository:
1260 class localrepository:
1262 """Main class for representing local repositories.
1261 """Main class for representing local repositories.
1263
1262
1264 All local repositories are instances of this class.
1263 All local repositories are instances of this class.
1265
1264
1266 Constructed on its own, instances of this class are not usable as
1265 Constructed on its own, instances of this class are not usable as
1267 repository objects. To obtain a usable repository object, call
1266 repository objects. To obtain a usable repository object, call
1268 ``hg.repository()``, ``localrepo.instance()``, or
1267 ``hg.repository()``, ``localrepo.instance()``, or
1269 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1268 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1270 ``instance()`` adds support for creating new repositories.
1269 ``instance()`` adds support for creating new repositories.
1271 ``hg.repository()`` adds more extension integration, including calling
1270 ``hg.repository()`` adds more extension integration, including calling
1272 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1271 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1273 used.
1272 used.
1274 """
1273 """
1275
1274
1276 _basesupported = {
1275 _basesupported = {
1277 requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT,
1276 requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT,
1278 requirementsmod.CHANGELOGV2_REQUIREMENT,
1277 requirementsmod.CHANGELOGV2_REQUIREMENT,
1279 requirementsmod.COPIESSDC_REQUIREMENT,
1278 requirementsmod.COPIESSDC_REQUIREMENT,
1280 requirementsmod.DIRSTATE_TRACKED_HINT_V1,
1279 requirementsmod.DIRSTATE_TRACKED_HINT_V1,
1281 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1280 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1282 requirementsmod.DOTENCODE_REQUIREMENT,
1281 requirementsmod.DOTENCODE_REQUIREMENT,
1283 requirementsmod.FNCACHE_REQUIREMENT,
1282 requirementsmod.FNCACHE_REQUIREMENT,
1284 requirementsmod.GENERALDELTA_REQUIREMENT,
1283 requirementsmod.GENERALDELTA_REQUIREMENT,
1285 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1284 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1286 requirementsmod.NODEMAP_REQUIREMENT,
1285 requirementsmod.NODEMAP_REQUIREMENT,
1287 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1286 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1288 requirementsmod.REVLOGV1_REQUIREMENT,
1287 requirementsmod.REVLOGV1_REQUIREMENT,
1289 requirementsmod.REVLOGV2_REQUIREMENT,
1288 requirementsmod.REVLOGV2_REQUIREMENT,
1290 requirementsmod.SHARED_REQUIREMENT,
1289 requirementsmod.SHARED_REQUIREMENT,
1291 requirementsmod.SHARESAFE_REQUIREMENT,
1290 requirementsmod.SHARESAFE_REQUIREMENT,
1292 requirementsmod.SPARSE_REQUIREMENT,
1291 requirementsmod.SPARSE_REQUIREMENT,
1293 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1292 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1294 requirementsmod.STORE_REQUIREMENT,
1293 requirementsmod.STORE_REQUIREMENT,
1295 requirementsmod.TREEMANIFEST_REQUIREMENT,
1294 requirementsmod.TREEMANIFEST_REQUIREMENT,
1296 }
1295 }
1297
1296
1298 # list of prefix for file which can be written without 'wlock'
1297 # list of prefix for file which can be written without 'wlock'
1299 # Extensions should extend this list when needed
1298 # Extensions should extend this list when needed
1300 _wlockfreeprefix = {
1299 _wlockfreeprefix = {
1301 # We migh consider requiring 'wlock' for the next
1300 # We migh consider requiring 'wlock' for the next
1302 # two, but pretty much all the existing code assume
1301 # two, but pretty much all the existing code assume
1303 # wlock is not needed so we keep them excluded for
1302 # wlock is not needed so we keep them excluded for
1304 # now.
1303 # now.
1305 b'hgrc',
1304 b'hgrc',
1306 b'requires',
1305 b'requires',
1307 # XXX cache is a complicatged business someone
1306 # XXX cache is a complicatged business someone
1308 # should investigate this in depth at some point
1307 # should investigate this in depth at some point
1309 b'cache/',
1308 b'cache/',
1310 # XXX shouldn't be dirstate covered by the wlock?
1309 # XXX shouldn't be dirstate covered by the wlock?
1311 b'dirstate',
1310 b'dirstate',
1312 # XXX bisect was still a bit too messy at the time
1311 # XXX bisect was still a bit too messy at the time
1313 # this changeset was introduced. Someone should fix
1312 # this changeset was introduced. Someone should fix
1314 # the remainig bit and drop this line
1313 # the remainig bit and drop this line
1315 b'bisect.state',
1314 b'bisect.state',
1316 }
1315 }
1317
1316
1318 def __init__(
1317 def __init__(
1319 self,
1318 self,
1320 baseui,
1319 baseui,
1321 ui,
1320 ui,
1322 origroot,
1321 origroot,
1323 wdirvfs,
1322 wdirvfs,
1324 hgvfs,
1323 hgvfs,
1325 requirements,
1324 requirements,
1326 supportedrequirements,
1325 supportedrequirements,
1327 sharedpath,
1326 sharedpath,
1328 store,
1327 store,
1329 cachevfs,
1328 cachevfs,
1330 wcachevfs,
1329 wcachevfs,
1331 features,
1330 features,
1332 intents=None,
1331 intents=None,
1333 ):
1332 ):
1334 """Create a new local repository instance.
1333 """Create a new local repository instance.
1335
1334
1336 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1335 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1337 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1336 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1338 object.
1337 object.
1339
1338
1340 Arguments:
1339 Arguments:
1341
1340
1342 baseui
1341 baseui
1343 ``ui.ui`` instance that ``ui`` argument was based off of.
1342 ``ui.ui`` instance that ``ui`` argument was based off of.
1344
1343
1345 ui
1344 ui
1346 ``ui.ui`` instance for use by the repository.
1345 ``ui.ui`` instance for use by the repository.
1347
1346
1348 origroot
1347 origroot
1349 ``bytes`` path to working directory root of this repository.
1348 ``bytes`` path to working directory root of this repository.
1350
1349
1351 wdirvfs
1350 wdirvfs
1352 ``vfs.vfs`` rooted at the working directory.
1351 ``vfs.vfs`` rooted at the working directory.
1353
1352
1354 hgvfs
1353 hgvfs
1355 ``vfs.vfs`` rooted at .hg/
1354 ``vfs.vfs`` rooted at .hg/
1356
1355
1357 requirements
1356 requirements
1358 ``set`` of bytestrings representing repository opening requirements.
1357 ``set`` of bytestrings representing repository opening requirements.
1359
1358
1360 supportedrequirements
1359 supportedrequirements
1361 ``set`` of bytestrings representing repository requirements that we
1360 ``set`` of bytestrings representing repository requirements that we
1362 know how to open. May be a supetset of ``requirements``.
1361 know how to open. May be a supetset of ``requirements``.
1363
1362
1364 sharedpath
1363 sharedpath
1365 ``bytes`` Defining path to storage base directory. Points to a
1364 ``bytes`` Defining path to storage base directory. Points to a
1366 ``.hg/`` directory somewhere.
1365 ``.hg/`` directory somewhere.
1367
1366
1368 store
1367 store
1369 ``store.basicstore`` (or derived) instance providing access to
1368 ``store.basicstore`` (or derived) instance providing access to
1370 versioned storage.
1369 versioned storage.
1371
1370
1372 cachevfs
1371 cachevfs
1373 ``vfs.vfs`` used for cache files.
1372 ``vfs.vfs`` used for cache files.
1374
1373
1375 wcachevfs
1374 wcachevfs
1376 ``vfs.vfs`` used for cache files related to the working copy.
1375 ``vfs.vfs`` used for cache files related to the working copy.
1377
1376
1378 features
1377 features
1379 ``set`` of bytestrings defining features/capabilities of this
1378 ``set`` of bytestrings defining features/capabilities of this
1380 instance.
1379 instance.
1381
1380
1382 intents
1381 intents
1383 ``set`` of system strings indicating what this repo will be used
1382 ``set`` of system strings indicating what this repo will be used
1384 for.
1383 for.
1385 """
1384 """
1386 self.baseui = baseui
1385 self.baseui = baseui
1387 self.ui = ui
1386 self.ui = ui
1388 self.origroot = origroot
1387 self.origroot = origroot
1389 # vfs rooted at working directory.
1388 # vfs rooted at working directory.
1390 self.wvfs = wdirvfs
1389 self.wvfs = wdirvfs
1391 self.root = wdirvfs.base
1390 self.root = wdirvfs.base
1392 # vfs rooted at .hg/. Used to access most non-store paths.
1391 # vfs rooted at .hg/. Used to access most non-store paths.
1393 self.vfs = hgvfs
1392 self.vfs = hgvfs
1394 self.path = hgvfs.base
1393 self.path = hgvfs.base
1395 self.requirements = requirements
1394 self.requirements = requirements
1396 self.nodeconstants = sha1nodeconstants
1395 self.nodeconstants = sha1nodeconstants
1397 self.nullid = self.nodeconstants.nullid
1396 self.nullid = self.nodeconstants.nullid
1398 self.supported = supportedrequirements
1397 self.supported = supportedrequirements
1399 self.sharedpath = sharedpath
1398 self.sharedpath = sharedpath
1400 self.store = store
1399 self.store = store
1401 self.cachevfs = cachevfs
1400 self.cachevfs = cachevfs
1402 self.wcachevfs = wcachevfs
1401 self.wcachevfs = wcachevfs
1403 self.features = features
1402 self.features = features
1404
1403
1405 self.filtername = None
1404 self.filtername = None
1406
1405
1407 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1406 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1408 b'devel', b'check-locks'
1407 b'devel', b'check-locks'
1409 ):
1408 ):
1410 self.vfs.audit = self._getvfsward(self.vfs.audit)
1409 self.vfs.audit = self._getvfsward(self.vfs.audit)
1411 # A list of callback to shape the phase if no data were found.
1410 # A list of callback to shape the phase if no data were found.
1412 # Callback are in the form: func(repo, roots) --> processed root.
1411 # Callback are in the form: func(repo, roots) --> processed root.
1413 # This list it to be filled by extension during repo setup
1412 # This list it to be filled by extension during repo setup
1414 self._phasedefaults = []
1413 self._phasedefaults = []
1415
1414
1416 color.setup(self.ui)
1415 color.setup(self.ui)
1417
1416
1418 self.spath = self.store.path
1417 self.spath = self.store.path
1419 self.svfs = self.store.vfs
1418 self.svfs = self.store.vfs
1420 self.sjoin = self.store.join
1419 self.sjoin = self.store.join
1421 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1420 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1422 b'devel', b'check-locks'
1421 b'devel', b'check-locks'
1423 ):
1422 ):
1424 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1423 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1425 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1424 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1426 else: # standard vfs
1425 else: # standard vfs
1427 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1426 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1428
1427
1429 self._dirstatevalidatewarned = False
1428 self._dirstatevalidatewarned = False
1430
1429
1431 self._branchcaches = branchmap.BranchMapCache()
1430 self._branchcaches = branchmap.BranchMapCache()
1432 self._revbranchcache = None
1431 self._revbranchcache = None
1433 self._filterpats = {}
1432 self._filterpats = {}
1434 self._datafilters = {}
1433 self._datafilters = {}
1435 self._transref = self._lockref = self._wlockref = None
1434 self._transref = self._lockref = self._wlockref = None
1436
1435
1437 # A cache for various files under .hg/ that tracks file changes,
1436 # A cache for various files under .hg/ that tracks file changes,
1438 # (used by the filecache decorator)
1437 # (used by the filecache decorator)
1439 #
1438 #
1440 # Maps a property name to its util.filecacheentry
1439 # Maps a property name to its util.filecacheentry
1441 self._filecache = {}
1440 self._filecache = {}
1442
1441
1443 # hold sets of revision to be filtered
1442 # hold sets of revision to be filtered
1444 # should be cleared when something might have changed the filter value:
1443 # should be cleared when something might have changed the filter value:
1445 # - new changesets,
1444 # - new changesets,
1446 # - phase change,
1445 # - phase change,
1447 # - new obsolescence marker,
1446 # - new obsolescence marker,
1448 # - working directory parent change,
1447 # - working directory parent change,
1449 # - bookmark changes
1448 # - bookmark changes
1450 self.filteredrevcache = {}
1449 self.filteredrevcache = {}
1451
1450
1452 # post-dirstate-status hooks
1451 # post-dirstate-status hooks
1453 self._postdsstatus = []
1452 self._postdsstatus = []
1454
1453
1455 # generic mapping between names and nodes
1454 # generic mapping between names and nodes
1456 self.names = namespaces.namespaces()
1455 self.names = namespaces.namespaces()
1457
1456
1458 # Key to signature value.
1457 # Key to signature value.
1459 self._sparsesignaturecache = {}
1458 self._sparsesignaturecache = {}
1460 # Signature to cached matcher instance.
1459 # Signature to cached matcher instance.
1461 self._sparsematchercache = {}
1460 self._sparsematchercache = {}
1462
1461
1463 self._extrafilterid = repoview.extrafilter(ui)
1462 self._extrafilterid = repoview.extrafilter(ui)
1464
1463
1465 self.filecopiesmode = None
1464 self.filecopiesmode = None
1466 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1465 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1467 self.filecopiesmode = b'changeset-sidedata'
1466 self.filecopiesmode = b'changeset-sidedata'
1468
1467
1469 self._wanted_sidedata = set()
1468 self._wanted_sidedata = set()
1470 self._sidedata_computers = {}
1469 self._sidedata_computers = {}
1471 sidedatamod.set_sidedata_spec_for_repo(self)
1470 sidedatamod.set_sidedata_spec_for_repo(self)
1472
1471
1473 def _getvfsward(self, origfunc):
1472 def _getvfsward(self, origfunc):
1474 """build a ward for self.vfs"""
1473 """build a ward for self.vfs"""
1475 rref = weakref.ref(self)
1474 rref = weakref.ref(self)
1476
1475
1477 def checkvfs(path, mode=None):
1476 def checkvfs(path, mode=None):
1478 ret = origfunc(path, mode=mode)
1477 ret = origfunc(path, mode=mode)
1479 repo = rref()
1478 repo = rref()
1480 if (
1479 if (
1481 repo is None
1480 repo is None
1482 or not util.safehasattr(repo, b'_wlockref')
1481 or not util.safehasattr(repo, b'_wlockref')
1483 or not util.safehasattr(repo, b'_lockref')
1482 or not util.safehasattr(repo, b'_lockref')
1484 ):
1483 ):
1485 return
1484 return
1486 if mode in (None, b'r', b'rb'):
1485 if mode in (None, b'r', b'rb'):
1487 return
1486 return
1488 if path.startswith(repo.path):
1487 if path.startswith(repo.path):
1489 # truncate name relative to the repository (.hg)
1488 # truncate name relative to the repository (.hg)
1490 path = path[len(repo.path) + 1 :]
1489 path = path[len(repo.path) + 1 :]
1491 if path.startswith(b'cache/'):
1490 if path.startswith(b'cache/'):
1492 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1491 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1493 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1492 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1494 # path prefixes covered by 'lock'
1493 # path prefixes covered by 'lock'
1495 vfs_path_prefixes = (
1494 vfs_path_prefixes = (
1496 b'journal.',
1495 b'journal.',
1497 b'undo.',
1496 b'undo.',
1498 b'strip-backup/',
1497 b'strip-backup/',
1499 b'cache/',
1498 b'cache/',
1500 )
1499 )
1501 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1500 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1502 if repo._currentlock(repo._lockref) is None:
1501 if repo._currentlock(repo._lockref) is None:
1503 repo.ui.develwarn(
1502 repo.ui.develwarn(
1504 b'write with no lock: "%s"' % path,
1503 b'write with no lock: "%s"' % path,
1505 stacklevel=3,
1504 stacklevel=3,
1506 config=b'check-locks',
1505 config=b'check-locks',
1507 )
1506 )
1508 elif repo._currentlock(repo._wlockref) is None:
1507 elif repo._currentlock(repo._wlockref) is None:
1509 # rest of vfs files are covered by 'wlock'
1508 # rest of vfs files are covered by 'wlock'
1510 #
1509 #
1511 # exclude special files
1510 # exclude special files
1512 for prefix in self._wlockfreeprefix:
1511 for prefix in self._wlockfreeprefix:
1513 if path.startswith(prefix):
1512 if path.startswith(prefix):
1514 return
1513 return
1515 repo.ui.develwarn(
1514 repo.ui.develwarn(
1516 b'write with no wlock: "%s"' % path,
1515 b'write with no wlock: "%s"' % path,
1517 stacklevel=3,
1516 stacklevel=3,
1518 config=b'check-locks',
1517 config=b'check-locks',
1519 )
1518 )
1520 return ret
1519 return ret
1521
1520
1522 return checkvfs
1521 return checkvfs
1523
1522
1524 def _getsvfsward(self, origfunc):
1523 def _getsvfsward(self, origfunc):
1525 """build a ward for self.svfs"""
1524 """build a ward for self.svfs"""
1526 rref = weakref.ref(self)
1525 rref = weakref.ref(self)
1527
1526
1528 def checksvfs(path, mode=None):
1527 def checksvfs(path, mode=None):
1529 ret = origfunc(path, mode=mode)
1528 ret = origfunc(path, mode=mode)
1530 repo = rref()
1529 repo = rref()
1531 if repo is None or not util.safehasattr(repo, b'_lockref'):
1530 if repo is None or not util.safehasattr(repo, b'_lockref'):
1532 return
1531 return
1533 if mode in (None, b'r', b'rb'):
1532 if mode in (None, b'r', b'rb'):
1534 return
1533 return
1535 if path.startswith(repo.sharedpath):
1534 if path.startswith(repo.sharedpath):
1536 # truncate name relative to the repository (.hg)
1535 # truncate name relative to the repository (.hg)
1537 path = path[len(repo.sharedpath) + 1 :]
1536 path = path[len(repo.sharedpath) + 1 :]
1538 if repo._currentlock(repo._lockref) is None:
1537 if repo._currentlock(repo._lockref) is None:
1539 repo.ui.develwarn(
1538 repo.ui.develwarn(
1540 b'write with no lock: "%s"' % path, stacklevel=4
1539 b'write with no lock: "%s"' % path, stacklevel=4
1541 )
1540 )
1542 return ret
1541 return ret
1543
1542
1544 return checksvfs
1543 return checksvfs
1545
1544
1546 def close(self):
1545 def close(self):
1547 self._writecaches()
1546 self._writecaches()
1548
1547
1549 def _writecaches(self):
1548 def _writecaches(self):
1550 if self._revbranchcache:
1549 if self._revbranchcache:
1551 self._revbranchcache.write()
1550 self._revbranchcache.write()
1552
1551
1553 def _restrictcapabilities(self, caps):
1552 def _restrictcapabilities(self, caps):
1554 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1553 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1555 caps = set(caps)
1554 caps = set(caps)
1556 capsblob = bundle2.encodecaps(
1555 capsblob = bundle2.encodecaps(
1557 bundle2.getrepocaps(self, role=b'client')
1556 bundle2.getrepocaps(self, role=b'client')
1558 )
1557 )
1559 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1558 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1560 if self.ui.configbool(b'experimental', b'narrow'):
1559 if self.ui.configbool(b'experimental', b'narrow'):
1561 caps.add(wireprototypes.NARROWCAP)
1560 caps.add(wireprototypes.NARROWCAP)
1562 return caps
1561 return caps
1563
1562
1564 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1563 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1565 # self -> auditor -> self._checknested -> self
1564 # self -> auditor -> self._checknested -> self
1566
1565
1567 @property
1566 @property
1568 def auditor(self):
1567 def auditor(self):
1569 # This is only used by context.workingctx.match in order to
1568 # This is only used by context.workingctx.match in order to
1570 # detect files in subrepos.
1569 # detect files in subrepos.
1571 return pathutil.pathauditor(self.root, callback=self._checknested)
1570 return pathutil.pathauditor(self.root, callback=self._checknested)
1572
1571
1573 @property
1572 @property
1574 def nofsauditor(self):
1573 def nofsauditor(self):
1575 # This is only used by context.basectx.match in order to detect
1574 # This is only used by context.basectx.match in order to detect
1576 # files in subrepos.
1575 # files in subrepos.
1577 return pathutil.pathauditor(
1576 return pathutil.pathauditor(
1578 self.root, callback=self._checknested, realfs=False, cached=True
1577 self.root, callback=self._checknested, realfs=False, cached=True
1579 )
1578 )
1580
1579
1581 def _checknested(self, path):
1580 def _checknested(self, path):
1582 """Determine if path is a legal nested repository."""
1581 """Determine if path is a legal nested repository."""
1583 if not path.startswith(self.root):
1582 if not path.startswith(self.root):
1584 return False
1583 return False
1585 subpath = path[len(self.root) + 1 :]
1584 subpath = path[len(self.root) + 1 :]
1586 normsubpath = util.pconvert(subpath)
1585 normsubpath = util.pconvert(subpath)
1587
1586
1588 # XXX: Checking against the current working copy is wrong in
1587 # XXX: Checking against the current working copy is wrong in
1589 # the sense that it can reject things like
1588 # the sense that it can reject things like
1590 #
1589 #
1591 # $ hg cat -r 10 sub/x.txt
1590 # $ hg cat -r 10 sub/x.txt
1592 #
1591 #
1593 # if sub/ is no longer a subrepository in the working copy
1592 # if sub/ is no longer a subrepository in the working copy
1594 # parent revision.
1593 # parent revision.
1595 #
1594 #
1596 # However, it can of course also allow things that would have
1595 # However, it can of course also allow things that would have
1597 # been rejected before, such as the above cat command if sub/
1596 # been rejected before, such as the above cat command if sub/
1598 # is a subrepository now, but was a normal directory before.
1597 # is a subrepository now, but was a normal directory before.
1599 # The old path auditor would have rejected by mistake since it
1598 # The old path auditor would have rejected by mistake since it
1600 # panics when it sees sub/.hg/.
1599 # panics when it sees sub/.hg/.
1601 #
1600 #
1602 # All in all, checking against the working copy seems sensible
1601 # All in all, checking against the working copy seems sensible
1603 # since we want to prevent access to nested repositories on
1602 # since we want to prevent access to nested repositories on
1604 # the filesystem *now*.
1603 # the filesystem *now*.
1605 ctx = self[None]
1604 ctx = self[None]
1606 parts = util.splitpath(subpath)
1605 parts = util.splitpath(subpath)
1607 while parts:
1606 while parts:
1608 prefix = b'/'.join(parts)
1607 prefix = b'/'.join(parts)
1609 if prefix in ctx.substate:
1608 if prefix in ctx.substate:
1610 if prefix == normsubpath:
1609 if prefix == normsubpath:
1611 return True
1610 return True
1612 else:
1611 else:
1613 sub = ctx.sub(prefix)
1612 sub = ctx.sub(prefix)
1614 return sub.checknested(subpath[len(prefix) + 1 :])
1613 return sub.checknested(subpath[len(prefix) + 1 :])
1615 else:
1614 else:
1616 parts.pop()
1615 parts.pop()
1617 return False
1616 return False
1618
1617
1619 def peer(self):
1618 def peer(self):
1620 return localpeer(self) # not cached to avoid reference cycle
1619 return localpeer(self) # not cached to avoid reference cycle
1621
1620
1622 def unfiltered(self):
1621 def unfiltered(self):
1623 """Return unfiltered version of the repository
1622 """Return unfiltered version of the repository
1624
1623
1625 Intended to be overwritten by filtered repo."""
1624 Intended to be overwritten by filtered repo."""
1626 return self
1625 return self
1627
1626
1628 def filtered(self, name, visibilityexceptions=None):
1627 def filtered(self, name, visibilityexceptions=None):
1629 """Return a filtered version of a repository
1628 """Return a filtered version of a repository
1630
1629
1631 The `name` parameter is the identifier of the requested view. This
1630 The `name` parameter is the identifier of the requested view. This
1632 will return a repoview object set "exactly" to the specified view.
1631 will return a repoview object set "exactly" to the specified view.
1633
1632
1634 This function does not apply recursive filtering to a repository. For
1633 This function does not apply recursive filtering to a repository. For
1635 example calling `repo.filtered("served")` will return a repoview using
1634 example calling `repo.filtered("served")` will return a repoview using
1636 the "served" view, regardless of the initial view used by `repo`.
1635 the "served" view, regardless of the initial view used by `repo`.
1637
1636
1638 In other word, there is always only one level of `repoview` "filtering".
1637 In other word, there is always only one level of `repoview` "filtering".
1639 """
1638 """
1640 if self._extrafilterid is not None and b'%' not in name:
1639 if self._extrafilterid is not None and b'%' not in name:
1641 name = name + b'%' + self._extrafilterid
1640 name = name + b'%' + self._extrafilterid
1642
1641
1643 cls = repoview.newtype(self.unfiltered().__class__)
1642 cls = repoview.newtype(self.unfiltered().__class__)
1644 return cls(self, name, visibilityexceptions)
1643 return cls(self, name, visibilityexceptions)
1645
1644
1646 @mixedrepostorecache(
1645 @mixedrepostorecache(
1647 (b'bookmarks', b'plain'),
1646 (b'bookmarks', b'plain'),
1648 (b'bookmarks.current', b'plain'),
1647 (b'bookmarks.current', b'plain'),
1649 (b'bookmarks', b''),
1648 (b'bookmarks', b''),
1650 (b'00changelog.i', b''),
1649 (b'00changelog.i', b''),
1651 )
1650 )
1652 def _bookmarks(self):
1651 def _bookmarks(self):
1653 # Since the multiple files involved in the transaction cannot be
1652 # Since the multiple files involved in the transaction cannot be
1654 # written atomically (with current repository format), there is a race
1653 # written atomically (with current repository format), there is a race
1655 # condition here.
1654 # condition here.
1656 #
1655 #
1657 # 1) changelog content A is read
1656 # 1) changelog content A is read
1658 # 2) outside transaction update changelog to content B
1657 # 2) outside transaction update changelog to content B
1659 # 3) outside transaction update bookmark file referring to content B
1658 # 3) outside transaction update bookmark file referring to content B
1660 # 4) bookmarks file content is read and filtered against changelog-A
1659 # 4) bookmarks file content is read and filtered against changelog-A
1661 #
1660 #
1662 # When this happens, bookmarks against nodes missing from A are dropped.
1661 # When this happens, bookmarks against nodes missing from A are dropped.
1663 #
1662 #
1664 # Having this happening during read is not great, but it become worse
1663 # Having this happening during read is not great, but it become worse
1665 # when this happen during write because the bookmarks to the "unknown"
1664 # when this happen during write because the bookmarks to the "unknown"
1666 # nodes will be dropped for good. However, writes happen within locks.
1665 # nodes will be dropped for good. However, writes happen within locks.
1667 # This locking makes it possible to have a race free consistent read.
1666 # This locking makes it possible to have a race free consistent read.
1668 # For this purpose data read from disc before locking are
1667 # For this purpose data read from disc before locking are
1669 # "invalidated" right after the locks are taken. This invalidations are
1668 # "invalidated" right after the locks are taken. This invalidations are
1670 # "light", the `filecache` mechanism keep the data in memory and will
1669 # "light", the `filecache` mechanism keep the data in memory and will
1671 # reuse them if the underlying files did not changed. Not parsing the
1670 # reuse them if the underlying files did not changed. Not parsing the
1672 # same data multiple times helps performances.
1671 # same data multiple times helps performances.
1673 #
1672 #
1674 # Unfortunately in the case describe above, the files tracked by the
1673 # Unfortunately in the case describe above, the files tracked by the
1675 # bookmarks file cache might not have changed, but the in-memory
1674 # bookmarks file cache might not have changed, but the in-memory
1676 # content is still "wrong" because we used an older changelog content
1675 # content is still "wrong" because we used an older changelog content
1677 # to process the on-disk data. So after locking, the changelog would be
1676 # to process the on-disk data. So after locking, the changelog would be
1678 # refreshed but `_bookmarks` would be preserved.
1677 # refreshed but `_bookmarks` would be preserved.
1679 # Adding `00changelog.i` to the list of tracked file is not
1678 # Adding `00changelog.i` to the list of tracked file is not
1680 # enough, because at the time we build the content for `_bookmarks` in
1679 # enough, because at the time we build the content for `_bookmarks` in
1681 # (4), the changelog file has already diverged from the content used
1680 # (4), the changelog file has already diverged from the content used
1682 # for loading `changelog` in (1)
1681 # for loading `changelog` in (1)
1683 #
1682 #
1684 # To prevent the issue, we force the changelog to be explicitly
1683 # To prevent the issue, we force the changelog to be explicitly
1685 # reloaded while computing `_bookmarks`. The data race can still happen
1684 # reloaded while computing `_bookmarks`. The data race can still happen
1686 # without the lock (with a narrower window), but it would no longer go
1685 # without the lock (with a narrower window), but it would no longer go
1687 # undetected during the lock time refresh.
1686 # undetected during the lock time refresh.
1688 #
1687 #
1689 # The new schedule is as follow
1688 # The new schedule is as follow
1690 #
1689 #
1691 # 1) filecache logic detect that `_bookmarks` needs to be computed
1690 # 1) filecache logic detect that `_bookmarks` needs to be computed
1692 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1691 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1693 # 3) We force `changelog` filecache to be tested
1692 # 3) We force `changelog` filecache to be tested
1694 # 4) cachestat for `changelog` are captured (for changelog)
1693 # 4) cachestat for `changelog` are captured (for changelog)
1695 # 5) `_bookmarks` is computed and cached
1694 # 5) `_bookmarks` is computed and cached
1696 #
1695 #
1697 # The step in (3) ensure we have a changelog at least as recent as the
1696 # The step in (3) ensure we have a changelog at least as recent as the
1698 # cache stat computed in (1). As a result at locking time:
1697 # cache stat computed in (1). As a result at locking time:
1699 # * if the changelog did not changed since (1) -> we can reuse the data
1698 # * if the changelog did not changed since (1) -> we can reuse the data
1700 # * otherwise -> the bookmarks get refreshed.
1699 # * otherwise -> the bookmarks get refreshed.
1701 self._refreshchangelog()
1700 self._refreshchangelog()
1702 return bookmarks.bmstore(self)
1701 return bookmarks.bmstore(self)
1703
1702
1704 def _refreshchangelog(self):
1703 def _refreshchangelog(self):
1705 """make sure the in memory changelog match the on-disk one"""
1704 """make sure the in memory changelog match the on-disk one"""
1706 if 'changelog' in vars(self) and self.currenttransaction() is None:
1705 if 'changelog' in vars(self) and self.currenttransaction() is None:
1707 del self.changelog
1706 del self.changelog
1708
1707
1709 @property
1708 @property
1710 def _activebookmark(self):
1709 def _activebookmark(self):
1711 return self._bookmarks.active
1710 return self._bookmarks.active
1712
1711
1713 # _phasesets depend on changelog. what we need is to call
1712 # _phasesets depend on changelog. what we need is to call
1714 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1713 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1715 # can't be easily expressed in filecache mechanism.
1714 # can't be easily expressed in filecache mechanism.
1716 @storecache(b'phaseroots', b'00changelog.i')
1715 @storecache(b'phaseroots', b'00changelog.i')
1717 def _phasecache(self):
1716 def _phasecache(self):
1718 return phases.phasecache(self, self._phasedefaults)
1717 return phases.phasecache(self, self._phasedefaults)
1719
1718
1720 @storecache(b'obsstore')
1719 @storecache(b'obsstore')
1721 def obsstore(self):
1720 def obsstore(self):
1722 return obsolete.makestore(self.ui, self)
1721 return obsolete.makestore(self.ui, self)
1723
1722
1724 @changelogcache()
1723 @changelogcache()
1725 def changelog(repo):
1724 def changelog(repo):
1726 # load dirstate before changelog to avoid race see issue6303
1725 # load dirstate before changelog to avoid race see issue6303
1727 repo.dirstate.prefetch_parents()
1726 repo.dirstate.prefetch_parents()
1728 return repo.store.changelog(
1727 return repo.store.changelog(
1729 txnutil.mayhavepending(repo.root),
1728 txnutil.mayhavepending(repo.root),
1730 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1729 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1731 )
1730 )
1732
1731
1733 @manifestlogcache()
1732 @manifestlogcache()
1734 def manifestlog(self):
1733 def manifestlog(self):
1735 return self.store.manifestlog(self, self._storenarrowmatch)
1734 return self.store.manifestlog(self, self._storenarrowmatch)
1736
1735
1737 @repofilecache(b'dirstate')
1736 @repofilecache(b'dirstate')
1738 def dirstate(self):
1737 def dirstate(self):
1739 return self._makedirstate()
1738 return self._makedirstate()
1740
1739
1741 def _makedirstate(self):
1740 def _makedirstate(self):
1742 """Extension point for wrapping the dirstate per-repo."""
1741 """Extension point for wrapping the dirstate per-repo."""
1743 sparsematchfn = lambda: sparse.matcher(self)
1742 sparsematchfn = lambda: sparse.matcher(self)
1744 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1743 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1745 th = requirementsmod.DIRSTATE_TRACKED_HINT_V1
1744 th = requirementsmod.DIRSTATE_TRACKED_HINT_V1
1746 use_dirstate_v2 = v2_req in self.requirements
1745 use_dirstate_v2 = v2_req in self.requirements
1747 use_tracked_hint = th in self.requirements
1746 use_tracked_hint = th in self.requirements
1748
1747
1749 return dirstate.dirstate(
1748 return dirstate.dirstate(
1750 self.vfs,
1749 self.vfs,
1751 self.ui,
1750 self.ui,
1752 self.root,
1751 self.root,
1753 self._dirstatevalidate,
1752 self._dirstatevalidate,
1754 sparsematchfn,
1753 sparsematchfn,
1755 self.nodeconstants,
1754 self.nodeconstants,
1756 use_dirstate_v2,
1755 use_dirstate_v2,
1757 use_tracked_hint=use_tracked_hint,
1756 use_tracked_hint=use_tracked_hint,
1758 )
1757 )
1759
1758
1760 def _dirstatevalidate(self, node):
1759 def _dirstatevalidate(self, node):
1761 try:
1760 try:
1762 self.changelog.rev(node)
1761 self.changelog.rev(node)
1763 return node
1762 return node
1764 except error.LookupError:
1763 except error.LookupError:
1765 if not self._dirstatevalidatewarned:
1764 if not self._dirstatevalidatewarned:
1766 self._dirstatevalidatewarned = True
1765 self._dirstatevalidatewarned = True
1767 self.ui.warn(
1766 self.ui.warn(
1768 _(b"warning: ignoring unknown working parent %s!\n")
1767 _(b"warning: ignoring unknown working parent %s!\n")
1769 % short(node)
1768 % short(node)
1770 )
1769 )
1771 return self.nullid
1770 return self.nullid
1772
1771
1773 @storecache(narrowspec.FILENAME)
1772 @storecache(narrowspec.FILENAME)
1774 def narrowpats(self):
1773 def narrowpats(self):
1775 """matcher patterns for this repository's narrowspec
1774 """matcher patterns for this repository's narrowspec
1776
1775
1777 A tuple of (includes, excludes).
1776 A tuple of (includes, excludes).
1778 """
1777 """
1779 return narrowspec.load(self)
1778 return narrowspec.load(self)
1780
1779
1781 @storecache(narrowspec.FILENAME)
1780 @storecache(narrowspec.FILENAME)
1782 def _storenarrowmatch(self):
1781 def _storenarrowmatch(self):
1783 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1782 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1784 return matchmod.always()
1783 return matchmod.always()
1785 include, exclude = self.narrowpats
1784 include, exclude = self.narrowpats
1786 return narrowspec.match(self.root, include=include, exclude=exclude)
1785 return narrowspec.match(self.root, include=include, exclude=exclude)
1787
1786
1788 @storecache(narrowspec.FILENAME)
1787 @storecache(narrowspec.FILENAME)
1789 def _narrowmatch(self):
1788 def _narrowmatch(self):
1790 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1789 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1791 return matchmod.always()
1790 return matchmod.always()
1792 narrowspec.checkworkingcopynarrowspec(self)
1791 narrowspec.checkworkingcopynarrowspec(self)
1793 include, exclude = self.narrowpats
1792 include, exclude = self.narrowpats
1794 return narrowspec.match(self.root, include=include, exclude=exclude)
1793 return narrowspec.match(self.root, include=include, exclude=exclude)
1795
1794
1796 def narrowmatch(self, match=None, includeexact=False):
1795 def narrowmatch(self, match=None, includeexact=False):
1797 """matcher corresponding the the repo's narrowspec
1796 """matcher corresponding the the repo's narrowspec
1798
1797
1799 If `match` is given, then that will be intersected with the narrow
1798 If `match` is given, then that will be intersected with the narrow
1800 matcher.
1799 matcher.
1801
1800
1802 If `includeexact` is True, then any exact matches from `match` will
1801 If `includeexact` is True, then any exact matches from `match` will
1803 be included even if they're outside the narrowspec.
1802 be included even if they're outside the narrowspec.
1804 """
1803 """
1805 if match:
1804 if match:
1806 if includeexact and not self._narrowmatch.always():
1805 if includeexact and not self._narrowmatch.always():
1807 # do not exclude explicitly-specified paths so that they can
1806 # do not exclude explicitly-specified paths so that they can
1808 # be warned later on
1807 # be warned later on
1809 em = matchmod.exact(match.files())
1808 em = matchmod.exact(match.files())
1810 nm = matchmod.unionmatcher([self._narrowmatch, em])
1809 nm = matchmod.unionmatcher([self._narrowmatch, em])
1811 return matchmod.intersectmatchers(match, nm)
1810 return matchmod.intersectmatchers(match, nm)
1812 return matchmod.intersectmatchers(match, self._narrowmatch)
1811 return matchmod.intersectmatchers(match, self._narrowmatch)
1813 return self._narrowmatch
1812 return self._narrowmatch
1814
1813
1815 def setnarrowpats(self, newincludes, newexcludes):
1814 def setnarrowpats(self, newincludes, newexcludes):
1816 narrowspec.save(self, newincludes, newexcludes)
1815 narrowspec.save(self, newincludes, newexcludes)
1817 self.invalidate(clearfilecache=True)
1816 self.invalidate(clearfilecache=True)
1818
1817
1819 @unfilteredpropertycache
1818 @unfilteredpropertycache
1820 def _quick_access_changeid_null(self):
1819 def _quick_access_changeid_null(self):
1821 return {
1820 return {
1822 b'null': (nullrev, self.nodeconstants.nullid),
1821 b'null': (nullrev, self.nodeconstants.nullid),
1823 nullrev: (nullrev, self.nodeconstants.nullid),
1822 nullrev: (nullrev, self.nodeconstants.nullid),
1824 self.nullid: (nullrev, self.nullid),
1823 self.nullid: (nullrev, self.nullid),
1825 }
1824 }
1826
1825
1827 @unfilteredpropertycache
1826 @unfilteredpropertycache
1828 def _quick_access_changeid_wc(self):
1827 def _quick_access_changeid_wc(self):
1829 # also fast path access to the working copy parents
1828 # also fast path access to the working copy parents
1830 # however, only do it for filter that ensure wc is visible.
1829 # however, only do it for filter that ensure wc is visible.
1831 quick = self._quick_access_changeid_null.copy()
1830 quick = self._quick_access_changeid_null.copy()
1832 cl = self.unfiltered().changelog
1831 cl = self.unfiltered().changelog
1833 for node in self.dirstate.parents():
1832 for node in self.dirstate.parents():
1834 if node == self.nullid:
1833 if node == self.nullid:
1835 continue
1834 continue
1836 rev = cl.index.get_rev(node)
1835 rev = cl.index.get_rev(node)
1837 if rev is None:
1836 if rev is None:
1838 # unknown working copy parent case:
1837 # unknown working copy parent case:
1839 #
1838 #
1840 # skip the fast path and let higher code deal with it
1839 # skip the fast path and let higher code deal with it
1841 continue
1840 continue
1842 pair = (rev, node)
1841 pair = (rev, node)
1843 quick[rev] = pair
1842 quick[rev] = pair
1844 quick[node] = pair
1843 quick[node] = pair
1845 # also add the parents of the parents
1844 # also add the parents of the parents
1846 for r in cl.parentrevs(rev):
1845 for r in cl.parentrevs(rev):
1847 if r == nullrev:
1846 if r == nullrev:
1848 continue
1847 continue
1849 n = cl.node(r)
1848 n = cl.node(r)
1850 pair = (r, n)
1849 pair = (r, n)
1851 quick[r] = pair
1850 quick[r] = pair
1852 quick[n] = pair
1851 quick[n] = pair
1853 p1node = self.dirstate.p1()
1852 p1node = self.dirstate.p1()
1854 if p1node != self.nullid:
1853 if p1node != self.nullid:
1855 quick[b'.'] = quick[p1node]
1854 quick[b'.'] = quick[p1node]
1856 return quick
1855 return quick
1857
1856
1858 @unfilteredmethod
1857 @unfilteredmethod
1859 def _quick_access_changeid_invalidate(self):
1858 def _quick_access_changeid_invalidate(self):
1860 if '_quick_access_changeid_wc' in vars(self):
1859 if '_quick_access_changeid_wc' in vars(self):
1861 del self.__dict__['_quick_access_changeid_wc']
1860 del self.__dict__['_quick_access_changeid_wc']
1862
1861
1863 @property
1862 @property
1864 def _quick_access_changeid(self):
1863 def _quick_access_changeid(self):
1865 """an helper dictionnary for __getitem__ calls
1864 """an helper dictionnary for __getitem__ calls
1866
1865
1867 This contains a list of symbol we can recognise right away without
1866 This contains a list of symbol we can recognise right away without
1868 further processing.
1867 further processing.
1869 """
1868 """
1870 if self.filtername in repoview.filter_has_wc:
1869 if self.filtername in repoview.filter_has_wc:
1871 return self._quick_access_changeid_wc
1870 return self._quick_access_changeid_wc
1872 return self._quick_access_changeid_null
1871 return self._quick_access_changeid_null
1873
1872
1874 def __getitem__(self, changeid):
1873 def __getitem__(self, changeid):
1875 # dealing with special cases
1874 # dealing with special cases
1876 if changeid is None:
1875 if changeid is None:
1877 return context.workingctx(self)
1876 return context.workingctx(self)
1878 if isinstance(changeid, context.basectx):
1877 if isinstance(changeid, context.basectx):
1879 return changeid
1878 return changeid
1880
1879
1881 # dealing with multiple revisions
1880 # dealing with multiple revisions
1882 if isinstance(changeid, slice):
1881 if isinstance(changeid, slice):
1883 # wdirrev isn't contiguous so the slice shouldn't include it
1882 # wdirrev isn't contiguous so the slice shouldn't include it
1884 return [
1883 return [
1885 self[i]
1884 self[i]
1886 for i in range(*changeid.indices(len(self)))
1885 for i in range(*changeid.indices(len(self)))
1887 if i not in self.changelog.filteredrevs
1886 if i not in self.changelog.filteredrevs
1888 ]
1887 ]
1889
1888
1890 # dealing with some special values
1889 # dealing with some special values
1891 quick_access = self._quick_access_changeid.get(changeid)
1890 quick_access = self._quick_access_changeid.get(changeid)
1892 if quick_access is not None:
1891 if quick_access is not None:
1893 rev, node = quick_access
1892 rev, node = quick_access
1894 return context.changectx(self, rev, node, maybe_filtered=False)
1893 return context.changectx(self, rev, node, maybe_filtered=False)
1895 if changeid == b'tip':
1894 if changeid == b'tip':
1896 node = self.changelog.tip()
1895 node = self.changelog.tip()
1897 rev = self.changelog.rev(node)
1896 rev = self.changelog.rev(node)
1898 return context.changectx(self, rev, node)
1897 return context.changectx(self, rev, node)
1899
1898
1900 # dealing with arbitrary values
1899 # dealing with arbitrary values
1901 try:
1900 try:
1902 if isinstance(changeid, int):
1901 if isinstance(changeid, int):
1903 node = self.changelog.node(changeid)
1902 node = self.changelog.node(changeid)
1904 rev = changeid
1903 rev = changeid
1905 elif changeid == b'.':
1904 elif changeid == b'.':
1906 # this is a hack to delay/avoid loading obsmarkers
1905 # this is a hack to delay/avoid loading obsmarkers
1907 # when we know that '.' won't be hidden
1906 # when we know that '.' won't be hidden
1908 node = self.dirstate.p1()
1907 node = self.dirstate.p1()
1909 rev = self.unfiltered().changelog.rev(node)
1908 rev = self.unfiltered().changelog.rev(node)
1910 elif len(changeid) == self.nodeconstants.nodelen:
1909 elif len(changeid) == self.nodeconstants.nodelen:
1911 try:
1910 try:
1912 node = changeid
1911 node = changeid
1913 rev = self.changelog.rev(changeid)
1912 rev = self.changelog.rev(changeid)
1914 except error.FilteredLookupError:
1913 except error.FilteredLookupError:
1915 changeid = hex(changeid) # for the error message
1914 changeid = hex(changeid) # for the error message
1916 raise
1915 raise
1917 except LookupError:
1916 except LookupError:
1918 # check if it might have come from damaged dirstate
1917 # check if it might have come from damaged dirstate
1919 #
1918 #
1920 # XXX we could avoid the unfiltered if we had a recognizable
1919 # XXX we could avoid the unfiltered if we had a recognizable
1921 # exception for filtered changeset access
1920 # exception for filtered changeset access
1922 if (
1921 if (
1923 self.local()
1922 self.local()
1924 and changeid in self.unfiltered().dirstate.parents()
1923 and changeid in self.unfiltered().dirstate.parents()
1925 ):
1924 ):
1926 msg = _(b"working directory has unknown parent '%s'!")
1925 msg = _(b"working directory has unknown parent '%s'!")
1927 raise error.Abort(msg % short(changeid))
1926 raise error.Abort(msg % short(changeid))
1928 changeid = hex(changeid) # for the error message
1927 changeid = hex(changeid) # for the error message
1929 raise
1928 raise
1930
1929
1931 elif len(changeid) == 2 * self.nodeconstants.nodelen:
1930 elif len(changeid) == 2 * self.nodeconstants.nodelen:
1932 node = bin(changeid)
1931 node = bin(changeid)
1933 rev = self.changelog.rev(node)
1932 rev = self.changelog.rev(node)
1934 else:
1933 else:
1935 raise error.ProgrammingError(
1934 raise error.ProgrammingError(
1936 b"unsupported changeid '%s' of type %s"
1935 b"unsupported changeid '%s' of type %s"
1937 % (changeid, pycompat.bytestr(type(changeid)))
1936 % (changeid, pycompat.bytestr(type(changeid)))
1938 )
1937 )
1939
1938
1940 return context.changectx(self, rev, node)
1939 return context.changectx(self, rev, node)
1941
1940
1942 except (error.FilteredIndexError, error.FilteredLookupError):
1941 except (error.FilteredIndexError, error.FilteredLookupError):
1943 raise error.FilteredRepoLookupError(
1942 raise error.FilteredRepoLookupError(
1944 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1943 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1945 )
1944 )
1946 except (IndexError, LookupError):
1945 except (IndexError, LookupError):
1947 raise error.RepoLookupError(
1946 raise error.RepoLookupError(
1948 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1947 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1949 )
1948 )
1950 except error.WdirUnsupported:
1949 except error.WdirUnsupported:
1951 return context.workingctx(self)
1950 return context.workingctx(self)
1952
1951
1953 def __contains__(self, changeid):
1952 def __contains__(self, changeid):
1954 """True if the given changeid exists"""
1953 """True if the given changeid exists"""
1955 try:
1954 try:
1956 self[changeid]
1955 self[changeid]
1957 return True
1956 return True
1958 except error.RepoLookupError:
1957 except error.RepoLookupError:
1959 return False
1958 return False
1960
1959
1961 def __nonzero__(self):
1960 def __nonzero__(self):
1962 return True
1961 return True
1963
1962
1964 __bool__ = __nonzero__
1963 __bool__ = __nonzero__
1965
1964
1966 def __len__(self):
1965 def __len__(self):
1967 # no need to pay the cost of repoview.changelog
1966 # no need to pay the cost of repoview.changelog
1968 unfi = self.unfiltered()
1967 unfi = self.unfiltered()
1969 return len(unfi.changelog)
1968 return len(unfi.changelog)
1970
1969
1971 def __iter__(self):
1970 def __iter__(self):
1972 return iter(self.changelog)
1971 return iter(self.changelog)
1973
1972
1974 def revs(self, expr, *args):
1973 def revs(self, expr, *args):
1975 """Find revisions matching a revset.
1974 """Find revisions matching a revset.
1976
1975
1977 The revset is specified as a string ``expr`` that may contain
1976 The revset is specified as a string ``expr`` that may contain
1978 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1977 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1979
1978
1980 Revset aliases from the configuration are not expanded. To expand
1979 Revset aliases from the configuration are not expanded. To expand
1981 user aliases, consider calling ``scmutil.revrange()`` or
1980 user aliases, consider calling ``scmutil.revrange()`` or
1982 ``repo.anyrevs([expr], user=True)``.
1981 ``repo.anyrevs([expr], user=True)``.
1983
1982
1984 Returns a smartset.abstractsmartset, which is a list-like interface
1983 Returns a smartset.abstractsmartset, which is a list-like interface
1985 that contains integer revisions.
1984 that contains integer revisions.
1986 """
1985 """
1987 tree = revsetlang.spectree(expr, *args)
1986 tree = revsetlang.spectree(expr, *args)
1988 return revset.makematcher(tree)(self)
1987 return revset.makematcher(tree)(self)
1989
1988
1990 def set(self, expr, *args):
1989 def set(self, expr, *args):
1991 """Find revisions matching a revset and emit changectx instances.
1990 """Find revisions matching a revset and emit changectx instances.
1992
1991
1993 This is a convenience wrapper around ``revs()`` that iterates the
1992 This is a convenience wrapper around ``revs()`` that iterates the
1994 result and is a generator of changectx instances.
1993 result and is a generator of changectx instances.
1995
1994
1996 Revset aliases from the configuration are not expanded. To expand
1995 Revset aliases from the configuration are not expanded. To expand
1997 user aliases, consider calling ``scmutil.revrange()``.
1996 user aliases, consider calling ``scmutil.revrange()``.
1998 """
1997 """
1999 for r in self.revs(expr, *args):
1998 for r in self.revs(expr, *args):
2000 yield self[r]
1999 yield self[r]
2001
2000
2002 def anyrevs(self, specs, user=False, localalias=None):
2001 def anyrevs(self, specs, user=False, localalias=None):
2003 """Find revisions matching one of the given revsets.
2002 """Find revisions matching one of the given revsets.
2004
2003
2005 Revset aliases from the configuration are not expanded by default. To
2004 Revset aliases from the configuration are not expanded by default. To
2006 expand user aliases, specify ``user=True``. To provide some local
2005 expand user aliases, specify ``user=True``. To provide some local
2007 definitions overriding user aliases, set ``localalias`` to
2006 definitions overriding user aliases, set ``localalias`` to
2008 ``{name: definitionstring}``.
2007 ``{name: definitionstring}``.
2009 """
2008 """
2010 if specs == [b'null']:
2009 if specs == [b'null']:
2011 return revset.baseset([nullrev])
2010 return revset.baseset([nullrev])
2012 if specs == [b'.']:
2011 if specs == [b'.']:
2013 quick_data = self._quick_access_changeid.get(b'.')
2012 quick_data = self._quick_access_changeid.get(b'.')
2014 if quick_data is not None:
2013 if quick_data is not None:
2015 return revset.baseset([quick_data[0]])
2014 return revset.baseset([quick_data[0]])
2016 if user:
2015 if user:
2017 m = revset.matchany(
2016 m = revset.matchany(
2018 self.ui,
2017 self.ui,
2019 specs,
2018 specs,
2020 lookup=revset.lookupfn(self),
2019 lookup=revset.lookupfn(self),
2021 localalias=localalias,
2020 localalias=localalias,
2022 )
2021 )
2023 else:
2022 else:
2024 m = revset.matchany(None, specs, localalias=localalias)
2023 m = revset.matchany(None, specs, localalias=localalias)
2025 return m(self)
2024 return m(self)
2026
2025
2027 def url(self):
2026 def url(self):
2028 return b'file:' + self.root
2027 return b'file:' + self.root
2029
2028
2030 def hook(self, name, throw=False, **args):
2029 def hook(self, name, throw=False, **args):
2031 """Call a hook, passing this repo instance.
2030 """Call a hook, passing this repo instance.
2032
2031
2033 This a convenience method to aid invoking hooks. Extensions likely
2032 This a convenience method to aid invoking hooks. Extensions likely
2034 won't call this unless they have registered a custom hook or are
2033 won't call this unless they have registered a custom hook or are
2035 replacing code that is expected to call a hook.
2034 replacing code that is expected to call a hook.
2036 """
2035 """
2037 return hook.hook(self.ui, self, name, throw, **args)
2036 return hook.hook(self.ui, self, name, throw, **args)
2038
2037
2039 @filteredpropertycache
2038 @filteredpropertycache
2040 def _tagscache(self):
2039 def _tagscache(self):
2041 """Returns a tagscache object that contains various tags related
2040 """Returns a tagscache object that contains various tags related
2042 caches."""
2041 caches."""
2043
2042
2044 # This simplifies its cache management by having one decorated
2043 # This simplifies its cache management by having one decorated
2045 # function (this one) and the rest simply fetch things from it.
2044 # function (this one) and the rest simply fetch things from it.
2046 class tagscache:
2045 class tagscache:
2047 def __init__(self):
2046 def __init__(self):
2048 # These two define the set of tags for this repository. tags
2047 # These two define the set of tags for this repository. tags
2049 # maps tag name to node; tagtypes maps tag name to 'global' or
2048 # maps tag name to node; tagtypes maps tag name to 'global' or
2050 # 'local'. (Global tags are defined by .hgtags across all
2049 # 'local'. (Global tags are defined by .hgtags across all
2051 # heads, and local tags are defined in .hg/localtags.)
2050 # heads, and local tags are defined in .hg/localtags.)
2052 # They constitute the in-memory cache of tags.
2051 # They constitute the in-memory cache of tags.
2053 self.tags = self.tagtypes = None
2052 self.tags = self.tagtypes = None
2054
2053
2055 self.nodetagscache = self.tagslist = None
2054 self.nodetagscache = self.tagslist = None
2056
2055
2057 cache = tagscache()
2056 cache = tagscache()
2058 cache.tags, cache.tagtypes = self._findtags()
2057 cache.tags, cache.tagtypes = self._findtags()
2059
2058
2060 return cache
2059 return cache
2061
2060
2062 def tags(self):
2061 def tags(self):
2063 '''return a mapping of tag to node'''
2062 '''return a mapping of tag to node'''
2064 t = {}
2063 t = {}
2065 if self.changelog.filteredrevs:
2064 if self.changelog.filteredrevs:
2066 tags, tt = self._findtags()
2065 tags, tt = self._findtags()
2067 else:
2066 else:
2068 tags = self._tagscache.tags
2067 tags = self._tagscache.tags
2069 rev = self.changelog.rev
2068 rev = self.changelog.rev
2070 for k, v in tags.items():
2069 for k, v in tags.items():
2071 try:
2070 try:
2072 # ignore tags to unknown nodes
2071 # ignore tags to unknown nodes
2073 rev(v)
2072 rev(v)
2074 t[k] = v
2073 t[k] = v
2075 except (error.LookupError, ValueError):
2074 except (error.LookupError, ValueError):
2076 pass
2075 pass
2077 return t
2076 return t
2078
2077
2079 def _findtags(self):
2078 def _findtags(self):
2080 """Do the hard work of finding tags. Return a pair of dicts
2079 """Do the hard work of finding tags. Return a pair of dicts
2081 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2080 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2082 maps tag name to a string like \'global\' or \'local\'.
2081 maps tag name to a string like \'global\' or \'local\'.
2083 Subclasses or extensions are free to add their own tags, but
2082 Subclasses or extensions are free to add their own tags, but
2084 should be aware that the returned dicts will be retained for the
2083 should be aware that the returned dicts will be retained for the
2085 duration of the localrepo object."""
2084 duration of the localrepo object."""
2086
2085
2087 # XXX what tagtype should subclasses/extensions use? Currently
2086 # XXX what tagtype should subclasses/extensions use? Currently
2088 # mq and bookmarks add tags, but do not set the tagtype at all.
2087 # mq and bookmarks add tags, but do not set the tagtype at all.
2089 # Should each extension invent its own tag type? Should there
2088 # Should each extension invent its own tag type? Should there
2090 # be one tagtype for all such "virtual" tags? Or is the status
2089 # be one tagtype for all such "virtual" tags? Or is the status
2091 # quo fine?
2090 # quo fine?
2092
2091
2093 # map tag name to (node, hist)
2092 # map tag name to (node, hist)
2094 alltags = tagsmod.findglobaltags(self.ui, self)
2093 alltags = tagsmod.findglobaltags(self.ui, self)
2095 # map tag name to tag type
2094 # map tag name to tag type
2096 tagtypes = {tag: b'global' for tag in alltags}
2095 tagtypes = {tag: b'global' for tag in alltags}
2097
2096
2098 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2097 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2099
2098
2100 # Build the return dicts. Have to re-encode tag names because
2099 # Build the return dicts. Have to re-encode tag names because
2101 # the tags module always uses UTF-8 (in order not to lose info
2100 # the tags module always uses UTF-8 (in order not to lose info
2102 # writing to the cache), but the rest of Mercurial wants them in
2101 # writing to the cache), but the rest of Mercurial wants them in
2103 # local encoding.
2102 # local encoding.
2104 tags = {}
2103 tags = {}
2105 for (name, (node, hist)) in alltags.items():
2104 for (name, (node, hist)) in alltags.items():
2106 if node != self.nullid:
2105 if node != self.nullid:
2107 tags[encoding.tolocal(name)] = node
2106 tags[encoding.tolocal(name)] = node
2108 tags[b'tip'] = self.changelog.tip()
2107 tags[b'tip'] = self.changelog.tip()
2109 tagtypes = {
2108 tagtypes = {
2110 encoding.tolocal(name): value for (name, value) in tagtypes.items()
2109 encoding.tolocal(name): value for (name, value) in tagtypes.items()
2111 }
2110 }
2112 return (tags, tagtypes)
2111 return (tags, tagtypes)
2113
2112
2114 def tagtype(self, tagname):
2113 def tagtype(self, tagname):
2115 """
2114 """
2116 return the type of the given tag. result can be:
2115 return the type of the given tag. result can be:
2117
2116
2118 'local' : a local tag
2117 'local' : a local tag
2119 'global' : a global tag
2118 'global' : a global tag
2120 None : tag does not exist
2119 None : tag does not exist
2121 """
2120 """
2122
2121
2123 return self._tagscache.tagtypes.get(tagname)
2122 return self._tagscache.tagtypes.get(tagname)
2124
2123
2125 def tagslist(self):
2124 def tagslist(self):
2126 '''return a list of tags ordered by revision'''
2125 '''return a list of tags ordered by revision'''
2127 if not self._tagscache.tagslist:
2126 if not self._tagscache.tagslist:
2128 l = []
2127 l = []
2129 for t, n in self.tags().items():
2128 for t, n in self.tags().items():
2130 l.append((self.changelog.rev(n), t, n))
2129 l.append((self.changelog.rev(n), t, n))
2131 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2130 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2132
2131
2133 return self._tagscache.tagslist
2132 return self._tagscache.tagslist
2134
2133
2135 def nodetags(self, node):
2134 def nodetags(self, node):
2136 '''return the tags associated with a node'''
2135 '''return the tags associated with a node'''
2137 if not self._tagscache.nodetagscache:
2136 if not self._tagscache.nodetagscache:
2138 nodetagscache = {}
2137 nodetagscache = {}
2139 for t, n in self._tagscache.tags.items():
2138 for t, n in self._tagscache.tags.items():
2140 nodetagscache.setdefault(n, []).append(t)
2139 nodetagscache.setdefault(n, []).append(t)
2141 for tags in nodetagscache.values():
2140 for tags in nodetagscache.values():
2142 tags.sort()
2141 tags.sort()
2143 self._tagscache.nodetagscache = nodetagscache
2142 self._tagscache.nodetagscache = nodetagscache
2144 return self._tagscache.nodetagscache.get(node, [])
2143 return self._tagscache.nodetagscache.get(node, [])
2145
2144
2146 def nodebookmarks(self, node):
2145 def nodebookmarks(self, node):
2147 """return the list of bookmarks pointing to the specified node"""
2146 """return the list of bookmarks pointing to the specified node"""
2148 return self._bookmarks.names(node)
2147 return self._bookmarks.names(node)
2149
2148
2150 def branchmap(self):
2149 def branchmap(self):
2151 """returns a dictionary {branch: [branchheads]} with branchheads
2150 """returns a dictionary {branch: [branchheads]} with branchheads
2152 ordered by increasing revision number"""
2151 ordered by increasing revision number"""
2153 return self._branchcaches[self]
2152 return self._branchcaches[self]
2154
2153
2155 @unfilteredmethod
2154 @unfilteredmethod
2156 def revbranchcache(self):
2155 def revbranchcache(self):
2157 if not self._revbranchcache:
2156 if not self._revbranchcache:
2158 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2157 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2159 return self._revbranchcache
2158 return self._revbranchcache
2160
2159
2161 def register_changeset(self, rev, changelogrevision):
2160 def register_changeset(self, rev, changelogrevision):
2162 self.revbranchcache().setdata(rev, changelogrevision)
2161 self.revbranchcache().setdata(rev, changelogrevision)
2163
2162
2164 def branchtip(self, branch, ignoremissing=False):
2163 def branchtip(self, branch, ignoremissing=False):
2165 """return the tip node for a given branch
2164 """return the tip node for a given branch
2166
2165
2167 If ignoremissing is True, then this method will not raise an error.
2166 If ignoremissing is True, then this method will not raise an error.
2168 This is helpful for callers that only expect None for a missing branch
2167 This is helpful for callers that only expect None for a missing branch
2169 (e.g. namespace).
2168 (e.g. namespace).
2170
2169
2171 """
2170 """
2172 try:
2171 try:
2173 return self.branchmap().branchtip(branch)
2172 return self.branchmap().branchtip(branch)
2174 except KeyError:
2173 except KeyError:
2175 if not ignoremissing:
2174 if not ignoremissing:
2176 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2175 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2177 else:
2176 else:
2178 pass
2177 pass
2179
2178
2180 def lookup(self, key):
2179 def lookup(self, key):
2181 node = scmutil.revsymbol(self, key).node()
2180 node = scmutil.revsymbol(self, key).node()
2182 if node is None:
2181 if node is None:
2183 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2182 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2184 return node
2183 return node
2185
2184
2186 def lookupbranch(self, key):
2185 def lookupbranch(self, key):
2187 if self.branchmap().hasbranch(key):
2186 if self.branchmap().hasbranch(key):
2188 return key
2187 return key
2189
2188
2190 return scmutil.revsymbol(self, key).branch()
2189 return scmutil.revsymbol(self, key).branch()
2191
2190
2192 def known(self, nodes):
2191 def known(self, nodes):
2193 cl = self.changelog
2192 cl = self.changelog
2194 get_rev = cl.index.get_rev
2193 get_rev = cl.index.get_rev
2195 filtered = cl.filteredrevs
2194 filtered = cl.filteredrevs
2196 result = []
2195 result = []
2197 for n in nodes:
2196 for n in nodes:
2198 r = get_rev(n)
2197 r = get_rev(n)
2199 resp = not (r is None or r in filtered)
2198 resp = not (r is None or r in filtered)
2200 result.append(resp)
2199 result.append(resp)
2201 return result
2200 return result
2202
2201
2203 def local(self):
2202 def local(self):
2204 return self
2203 return self
2205
2204
2206 def publishing(self):
2205 def publishing(self):
2207 # it's safe (and desirable) to trust the publish flag unconditionally
2206 # it's safe (and desirable) to trust the publish flag unconditionally
2208 # so that we don't finalize changes shared between users via ssh or nfs
2207 # so that we don't finalize changes shared between users via ssh or nfs
2209 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2208 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2210
2209
2211 def cancopy(self):
2210 def cancopy(self):
2212 # so statichttprepo's override of local() works
2211 # so statichttprepo's override of local() works
2213 if not self.local():
2212 if not self.local():
2214 return False
2213 return False
2215 if not self.publishing():
2214 if not self.publishing():
2216 return True
2215 return True
2217 # if publishing we can't copy if there is filtered content
2216 # if publishing we can't copy if there is filtered content
2218 return not self.filtered(b'visible').changelog.filteredrevs
2217 return not self.filtered(b'visible').changelog.filteredrevs
2219
2218
2220 def shared(self):
2219 def shared(self):
2221 '''the type of shared repository (None if not shared)'''
2220 '''the type of shared repository (None if not shared)'''
2222 if self.sharedpath != self.path:
2221 if self.sharedpath != self.path:
2223 return b'store'
2222 return b'store'
2224 return None
2223 return None
2225
2224
2226 def wjoin(self, f, *insidef):
2225 def wjoin(self, f, *insidef):
2227 return self.vfs.reljoin(self.root, f, *insidef)
2226 return self.vfs.reljoin(self.root, f, *insidef)
2228
2227
2229 def setparents(self, p1, p2=None):
2228 def setparents(self, p1, p2=None):
2230 if p2 is None:
2229 if p2 is None:
2231 p2 = self.nullid
2230 p2 = self.nullid
2232 self[None].setparents(p1, p2)
2231 self[None].setparents(p1, p2)
2233 self._quick_access_changeid_invalidate()
2232 self._quick_access_changeid_invalidate()
2234
2233
2235 def filectx(self, path, changeid=None, fileid=None, changectx=None):
2234 def filectx(self, path, changeid=None, fileid=None, changectx=None):
2236 """changeid must be a changeset revision, if specified.
2235 """changeid must be a changeset revision, if specified.
2237 fileid can be a file revision or node."""
2236 fileid can be a file revision or node."""
2238 return context.filectx(
2237 return context.filectx(
2239 self, path, changeid, fileid, changectx=changectx
2238 self, path, changeid, fileid, changectx=changectx
2240 )
2239 )
2241
2240
2242 def getcwd(self):
2241 def getcwd(self):
2243 return self.dirstate.getcwd()
2242 return self.dirstate.getcwd()
2244
2243
2245 def pathto(self, f, cwd=None):
2244 def pathto(self, f, cwd=None):
2246 return self.dirstate.pathto(f, cwd)
2245 return self.dirstate.pathto(f, cwd)
2247
2246
2248 def _loadfilter(self, filter):
2247 def _loadfilter(self, filter):
2249 if filter not in self._filterpats:
2248 if filter not in self._filterpats:
2250 l = []
2249 l = []
2251 for pat, cmd in self.ui.configitems(filter):
2250 for pat, cmd in self.ui.configitems(filter):
2252 if cmd == b'!':
2251 if cmd == b'!':
2253 continue
2252 continue
2254 mf = matchmod.match(self.root, b'', [pat])
2253 mf = matchmod.match(self.root, b'', [pat])
2255 fn = None
2254 fn = None
2256 params = cmd
2255 params = cmd
2257 for name, filterfn in self._datafilters.items():
2256 for name, filterfn in self._datafilters.items():
2258 if cmd.startswith(name):
2257 if cmd.startswith(name):
2259 fn = filterfn
2258 fn = filterfn
2260 params = cmd[len(name) :].lstrip()
2259 params = cmd[len(name) :].lstrip()
2261 break
2260 break
2262 if not fn:
2261 if not fn:
2263 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2262 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2264 fn.__name__ = 'commandfilter'
2263 fn.__name__ = 'commandfilter'
2265 # Wrap old filters not supporting keyword arguments
2264 # Wrap old filters not supporting keyword arguments
2266 if not pycompat.getargspec(fn)[2]:
2265 if not pycompat.getargspec(fn)[2]:
2267 oldfn = fn
2266 oldfn = fn
2268 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2267 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2269 fn.__name__ = 'compat-' + oldfn.__name__
2268 fn.__name__ = 'compat-' + oldfn.__name__
2270 l.append((mf, fn, params))
2269 l.append((mf, fn, params))
2271 self._filterpats[filter] = l
2270 self._filterpats[filter] = l
2272 return self._filterpats[filter]
2271 return self._filterpats[filter]
2273
2272
2274 def _filter(self, filterpats, filename, data):
2273 def _filter(self, filterpats, filename, data):
2275 for mf, fn, cmd in filterpats:
2274 for mf, fn, cmd in filterpats:
2276 if mf(filename):
2275 if mf(filename):
2277 self.ui.debug(
2276 self.ui.debug(
2278 b"filtering %s through %s\n"
2277 b"filtering %s through %s\n"
2279 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2278 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2280 )
2279 )
2281 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2280 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2282 break
2281 break
2283
2282
2284 return data
2283 return data
2285
2284
2286 @unfilteredpropertycache
2285 @unfilteredpropertycache
2287 def _encodefilterpats(self):
2286 def _encodefilterpats(self):
2288 return self._loadfilter(b'encode')
2287 return self._loadfilter(b'encode')
2289
2288
2290 @unfilteredpropertycache
2289 @unfilteredpropertycache
2291 def _decodefilterpats(self):
2290 def _decodefilterpats(self):
2292 return self._loadfilter(b'decode')
2291 return self._loadfilter(b'decode')
2293
2292
2294 def adddatafilter(self, name, filter):
2293 def adddatafilter(self, name, filter):
2295 self._datafilters[name] = filter
2294 self._datafilters[name] = filter
2296
2295
2297 def wread(self, filename):
2296 def wread(self, filename):
2298 if self.wvfs.islink(filename):
2297 if self.wvfs.islink(filename):
2299 data = self.wvfs.readlink(filename)
2298 data = self.wvfs.readlink(filename)
2300 else:
2299 else:
2301 data = self.wvfs.read(filename)
2300 data = self.wvfs.read(filename)
2302 return self._filter(self._encodefilterpats, filename, data)
2301 return self._filter(self._encodefilterpats, filename, data)
2303
2302
2304 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2303 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2305 """write ``data`` into ``filename`` in the working directory
2304 """write ``data`` into ``filename`` in the working directory
2306
2305
2307 This returns length of written (maybe decoded) data.
2306 This returns length of written (maybe decoded) data.
2308 """
2307 """
2309 data = self._filter(self._decodefilterpats, filename, data)
2308 data = self._filter(self._decodefilterpats, filename, data)
2310 if b'l' in flags:
2309 if b'l' in flags:
2311 self.wvfs.symlink(data, filename)
2310 self.wvfs.symlink(data, filename)
2312 else:
2311 else:
2313 self.wvfs.write(
2312 self.wvfs.write(
2314 filename, data, backgroundclose=backgroundclose, **kwargs
2313 filename, data, backgroundclose=backgroundclose, **kwargs
2315 )
2314 )
2316 if b'x' in flags:
2315 if b'x' in flags:
2317 self.wvfs.setflags(filename, False, True)
2316 self.wvfs.setflags(filename, False, True)
2318 else:
2317 else:
2319 self.wvfs.setflags(filename, False, False)
2318 self.wvfs.setflags(filename, False, False)
2320 return len(data)
2319 return len(data)
2321
2320
2322 def wwritedata(self, filename, data):
2321 def wwritedata(self, filename, data):
2323 return self._filter(self._decodefilterpats, filename, data)
2322 return self._filter(self._decodefilterpats, filename, data)
2324
2323
2325 def currenttransaction(self):
2324 def currenttransaction(self):
2326 """return the current transaction or None if non exists"""
2325 """return the current transaction or None if non exists"""
2327 if self._transref:
2326 if self._transref:
2328 tr = self._transref()
2327 tr = self._transref()
2329 else:
2328 else:
2330 tr = None
2329 tr = None
2331
2330
2332 if tr and tr.running():
2331 if tr and tr.running():
2333 return tr
2332 return tr
2334 return None
2333 return None
2335
2334
2336 def transaction(self, desc, report=None):
2335 def transaction(self, desc, report=None):
2337 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2336 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2338 b'devel', b'check-locks'
2337 b'devel', b'check-locks'
2339 ):
2338 ):
2340 if self._currentlock(self._lockref) is None:
2339 if self._currentlock(self._lockref) is None:
2341 raise error.ProgrammingError(b'transaction requires locking')
2340 raise error.ProgrammingError(b'transaction requires locking')
2342 tr = self.currenttransaction()
2341 tr = self.currenttransaction()
2343 if tr is not None:
2342 if tr is not None:
2344 return tr.nest(name=desc)
2343 return tr.nest(name=desc)
2345
2344
2346 # abort here if the journal already exists
2345 # abort here if the journal already exists
2347 if self.svfs.exists(b"journal"):
2346 if self.svfs.exists(b"journal"):
2348 raise error.RepoError(
2347 raise error.RepoError(
2349 _(b"abandoned transaction found"),
2348 _(b"abandoned transaction found"),
2350 hint=_(b"run 'hg recover' to clean up transaction"),
2349 hint=_(b"run 'hg recover' to clean up transaction"),
2351 )
2350 )
2352
2351
2353 idbase = b"%.40f#%f" % (random.random(), time.time())
2352 idbase = b"%.40f#%f" % (random.random(), time.time())
2354 ha = hex(hashutil.sha1(idbase).digest())
2353 ha = hex(hashutil.sha1(idbase).digest())
2355 txnid = b'TXN:' + ha
2354 txnid = b'TXN:' + ha
2356 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2355 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2357
2356
2358 self._writejournal(desc)
2357 self._writejournal(desc)
2359 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2358 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2360 if report:
2359 if report:
2361 rp = report
2360 rp = report
2362 else:
2361 else:
2363 rp = self.ui.warn
2362 rp = self.ui.warn
2364 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2363 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2365 # we must avoid cyclic reference between repo and transaction.
2364 # we must avoid cyclic reference between repo and transaction.
2366 reporef = weakref.ref(self)
2365 reporef = weakref.ref(self)
2367 # Code to track tag movement
2366 # Code to track tag movement
2368 #
2367 #
2369 # Since tags are all handled as file content, it is actually quite hard
2368 # Since tags are all handled as file content, it is actually quite hard
2370 # to track these movement from a code perspective. So we fallback to a
2369 # to track these movement from a code perspective. So we fallback to a
2371 # tracking at the repository level. One could envision to track changes
2370 # tracking at the repository level. One could envision to track changes
2372 # to the '.hgtags' file through changegroup apply but that fails to
2371 # to the '.hgtags' file through changegroup apply but that fails to
2373 # cope with case where transaction expose new heads without changegroup
2372 # cope with case where transaction expose new heads without changegroup
2374 # being involved (eg: phase movement).
2373 # being involved (eg: phase movement).
2375 #
2374 #
2376 # For now, We gate the feature behind a flag since this likely comes
2375 # For now, We gate the feature behind a flag since this likely comes
2377 # with performance impacts. The current code run more often than needed
2376 # with performance impacts. The current code run more often than needed
2378 # and do not use caches as much as it could. The current focus is on
2377 # and do not use caches as much as it could. The current focus is on
2379 # the behavior of the feature so we disable it by default. The flag
2378 # the behavior of the feature so we disable it by default. The flag
2380 # will be removed when we are happy with the performance impact.
2379 # will be removed when we are happy with the performance impact.
2381 #
2380 #
2382 # Once this feature is no longer experimental move the following
2381 # Once this feature is no longer experimental move the following
2383 # documentation to the appropriate help section:
2382 # documentation to the appropriate help section:
2384 #
2383 #
2385 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2384 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2386 # tags (new or changed or deleted tags). In addition the details of
2385 # tags (new or changed or deleted tags). In addition the details of
2387 # these changes are made available in a file at:
2386 # these changes are made available in a file at:
2388 # ``REPOROOT/.hg/changes/tags.changes``.
2387 # ``REPOROOT/.hg/changes/tags.changes``.
2389 # Make sure you check for HG_TAG_MOVED before reading that file as it
2388 # Make sure you check for HG_TAG_MOVED before reading that file as it
2390 # might exist from a previous transaction even if no tag were touched
2389 # might exist from a previous transaction even if no tag were touched
2391 # in this one. Changes are recorded in a line base format::
2390 # in this one. Changes are recorded in a line base format::
2392 #
2391 #
2393 # <action> <hex-node> <tag-name>\n
2392 # <action> <hex-node> <tag-name>\n
2394 #
2393 #
2395 # Actions are defined as follow:
2394 # Actions are defined as follow:
2396 # "-R": tag is removed,
2395 # "-R": tag is removed,
2397 # "+A": tag is added,
2396 # "+A": tag is added,
2398 # "-M": tag is moved (old value),
2397 # "-M": tag is moved (old value),
2399 # "+M": tag is moved (new value),
2398 # "+M": tag is moved (new value),
2400 tracktags = lambda x: None
2399 tracktags = lambda x: None
2401 # experimental config: experimental.hook-track-tags
2400 # experimental config: experimental.hook-track-tags
2402 shouldtracktags = self.ui.configbool(
2401 shouldtracktags = self.ui.configbool(
2403 b'experimental', b'hook-track-tags'
2402 b'experimental', b'hook-track-tags'
2404 )
2403 )
2405 if desc != b'strip' and shouldtracktags:
2404 if desc != b'strip' and shouldtracktags:
2406 oldheads = self.changelog.headrevs()
2405 oldheads = self.changelog.headrevs()
2407
2406
2408 def tracktags(tr2):
2407 def tracktags(tr2):
2409 repo = reporef()
2408 repo = reporef()
2410 assert repo is not None # help pytype
2409 assert repo is not None # help pytype
2411 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2410 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2412 newheads = repo.changelog.headrevs()
2411 newheads = repo.changelog.headrevs()
2413 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2412 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2414 # notes: we compare lists here.
2413 # notes: we compare lists here.
2415 # As we do it only once buiding set would not be cheaper
2414 # As we do it only once buiding set would not be cheaper
2416 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2415 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2417 if changes:
2416 if changes:
2418 tr2.hookargs[b'tag_moved'] = b'1'
2417 tr2.hookargs[b'tag_moved'] = b'1'
2419 with repo.vfs(
2418 with repo.vfs(
2420 b'changes/tags.changes', b'w', atomictemp=True
2419 b'changes/tags.changes', b'w', atomictemp=True
2421 ) as changesfile:
2420 ) as changesfile:
2422 # note: we do not register the file to the transaction
2421 # note: we do not register the file to the transaction
2423 # because we needs it to still exist on the transaction
2422 # because we needs it to still exist on the transaction
2424 # is close (for txnclose hooks)
2423 # is close (for txnclose hooks)
2425 tagsmod.writediff(changesfile, changes)
2424 tagsmod.writediff(changesfile, changes)
2426
2425
2427 def validate(tr2):
2426 def validate(tr2):
2428 """will run pre-closing hooks"""
2427 """will run pre-closing hooks"""
2429 # XXX the transaction API is a bit lacking here so we take a hacky
2428 # XXX the transaction API is a bit lacking here so we take a hacky
2430 # path for now
2429 # path for now
2431 #
2430 #
2432 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2431 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2433 # dict is copied before these run. In addition we needs the data
2432 # dict is copied before these run. In addition we needs the data
2434 # available to in memory hooks too.
2433 # available to in memory hooks too.
2435 #
2434 #
2436 # Moreover, we also need to make sure this runs before txnclose
2435 # Moreover, we also need to make sure this runs before txnclose
2437 # hooks and there is no "pending" mechanism that would execute
2436 # hooks and there is no "pending" mechanism that would execute
2438 # logic only if hooks are about to run.
2437 # logic only if hooks are about to run.
2439 #
2438 #
2440 # Fixing this limitation of the transaction is also needed to track
2439 # Fixing this limitation of the transaction is also needed to track
2441 # other families of changes (bookmarks, phases, obsolescence).
2440 # other families of changes (bookmarks, phases, obsolescence).
2442 #
2441 #
2443 # This will have to be fixed before we remove the experimental
2442 # This will have to be fixed before we remove the experimental
2444 # gating.
2443 # gating.
2445 tracktags(tr2)
2444 tracktags(tr2)
2446 repo = reporef()
2445 repo = reporef()
2447 assert repo is not None # help pytype
2446 assert repo is not None # help pytype
2448
2447
2449 singleheadopt = (b'experimental', b'single-head-per-branch')
2448 singleheadopt = (b'experimental', b'single-head-per-branch')
2450 singlehead = repo.ui.configbool(*singleheadopt)
2449 singlehead = repo.ui.configbool(*singleheadopt)
2451 if singlehead:
2450 if singlehead:
2452 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2451 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2453 accountclosed = singleheadsub.get(
2452 accountclosed = singleheadsub.get(
2454 b"account-closed-heads", False
2453 b"account-closed-heads", False
2455 )
2454 )
2456 if singleheadsub.get(b"public-changes-only", False):
2455 if singleheadsub.get(b"public-changes-only", False):
2457 filtername = b"immutable"
2456 filtername = b"immutable"
2458 else:
2457 else:
2459 filtername = b"visible"
2458 filtername = b"visible"
2460 scmutil.enforcesinglehead(
2459 scmutil.enforcesinglehead(
2461 repo, tr2, desc, accountclosed, filtername
2460 repo, tr2, desc, accountclosed, filtername
2462 )
2461 )
2463 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2462 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2464 for name, (old, new) in sorted(
2463 for name, (old, new) in sorted(
2465 tr.changes[b'bookmarks'].items()
2464 tr.changes[b'bookmarks'].items()
2466 ):
2465 ):
2467 args = tr.hookargs.copy()
2466 args = tr.hookargs.copy()
2468 args.update(bookmarks.preparehookargs(name, old, new))
2467 args.update(bookmarks.preparehookargs(name, old, new))
2469 repo.hook(
2468 repo.hook(
2470 b'pretxnclose-bookmark',
2469 b'pretxnclose-bookmark',
2471 throw=True,
2470 throw=True,
2472 **pycompat.strkwargs(args)
2471 **pycompat.strkwargs(args)
2473 )
2472 )
2474 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2473 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2475 cl = repo.unfiltered().changelog
2474 cl = repo.unfiltered().changelog
2476 for revs, (old, new) in tr.changes[b'phases']:
2475 for revs, (old, new) in tr.changes[b'phases']:
2477 for rev in revs:
2476 for rev in revs:
2478 args = tr.hookargs.copy()
2477 args = tr.hookargs.copy()
2479 node = hex(cl.node(rev))
2478 node = hex(cl.node(rev))
2480 args.update(phases.preparehookargs(node, old, new))
2479 args.update(phases.preparehookargs(node, old, new))
2481 repo.hook(
2480 repo.hook(
2482 b'pretxnclose-phase',
2481 b'pretxnclose-phase',
2483 throw=True,
2482 throw=True,
2484 **pycompat.strkwargs(args)
2483 **pycompat.strkwargs(args)
2485 )
2484 )
2486
2485
2487 repo.hook(
2486 repo.hook(
2488 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2487 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2489 )
2488 )
2490
2489
2491 def releasefn(tr, success):
2490 def releasefn(tr, success):
2492 repo = reporef()
2491 repo = reporef()
2493 if repo is None:
2492 if repo is None:
2494 # If the repo has been GC'd (and this release function is being
2493 # If the repo has been GC'd (and this release function is being
2495 # called from transaction.__del__), there's not much we can do,
2494 # called from transaction.__del__), there's not much we can do,
2496 # so just leave the unfinished transaction there and let the
2495 # so just leave the unfinished transaction there and let the
2497 # user run `hg recover`.
2496 # user run `hg recover`.
2498 return
2497 return
2499 if success:
2498 if success:
2500 # this should be explicitly invoked here, because
2499 # this should be explicitly invoked here, because
2501 # in-memory changes aren't written out at closing
2500 # in-memory changes aren't written out at closing
2502 # transaction, if tr.addfilegenerator (via
2501 # transaction, if tr.addfilegenerator (via
2503 # dirstate.write or so) isn't invoked while
2502 # dirstate.write or so) isn't invoked while
2504 # transaction running
2503 # transaction running
2505 repo.dirstate.write(None)
2504 repo.dirstate.write(None)
2506 else:
2505 else:
2507 # discard all changes (including ones already written
2506 # discard all changes (including ones already written
2508 # out) in this transaction
2507 # out) in this transaction
2509 narrowspec.restorebackup(self, b'journal.narrowspec')
2508 narrowspec.restorebackup(self, b'journal.narrowspec')
2510 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2509 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2511 repo.dirstate.restorebackup(None, b'journal.dirstate')
2510 repo.dirstate.restorebackup(None, b'journal.dirstate')
2512
2511
2513 repo.invalidate(clearfilecache=True)
2512 repo.invalidate(clearfilecache=True)
2514
2513
2515 tr = transaction.transaction(
2514 tr = transaction.transaction(
2516 rp,
2515 rp,
2517 self.svfs,
2516 self.svfs,
2518 vfsmap,
2517 vfsmap,
2519 b"journal",
2518 b"journal",
2520 b"undo",
2519 b"undo",
2521 aftertrans(renames),
2520 aftertrans(renames),
2522 self.store.createmode,
2521 self.store.createmode,
2523 validator=validate,
2522 validator=validate,
2524 releasefn=releasefn,
2523 releasefn=releasefn,
2525 checkambigfiles=_cachedfiles,
2524 checkambigfiles=_cachedfiles,
2526 name=desc,
2525 name=desc,
2527 )
2526 )
2528 tr.changes[b'origrepolen'] = len(self)
2527 tr.changes[b'origrepolen'] = len(self)
2529 tr.changes[b'obsmarkers'] = set()
2528 tr.changes[b'obsmarkers'] = set()
2530 tr.changes[b'phases'] = []
2529 tr.changes[b'phases'] = []
2531 tr.changes[b'bookmarks'] = {}
2530 tr.changes[b'bookmarks'] = {}
2532
2531
2533 tr.hookargs[b'txnid'] = txnid
2532 tr.hookargs[b'txnid'] = txnid
2534 tr.hookargs[b'txnname'] = desc
2533 tr.hookargs[b'txnname'] = desc
2535 tr.hookargs[b'changes'] = tr.changes
2534 tr.hookargs[b'changes'] = tr.changes
2536 # note: writing the fncache only during finalize mean that the file is
2535 # note: writing the fncache only during finalize mean that the file is
2537 # outdated when running hooks. As fncache is used for streaming clone,
2536 # outdated when running hooks. As fncache is used for streaming clone,
2538 # this is not expected to break anything that happen during the hooks.
2537 # this is not expected to break anything that happen during the hooks.
2539 tr.addfinalize(b'flush-fncache', self.store.write)
2538 tr.addfinalize(b'flush-fncache', self.store.write)
2540
2539
2541 def txnclosehook(tr2):
2540 def txnclosehook(tr2):
2542 """To be run if transaction is successful, will schedule a hook run"""
2541 """To be run if transaction is successful, will schedule a hook run"""
2543 # Don't reference tr2 in hook() so we don't hold a reference.
2542 # Don't reference tr2 in hook() so we don't hold a reference.
2544 # This reduces memory consumption when there are multiple
2543 # This reduces memory consumption when there are multiple
2545 # transactions per lock. This can likely go away if issue5045
2544 # transactions per lock. This can likely go away if issue5045
2546 # fixes the function accumulation.
2545 # fixes the function accumulation.
2547 hookargs = tr2.hookargs
2546 hookargs = tr2.hookargs
2548
2547
2549 def hookfunc(unused_success):
2548 def hookfunc(unused_success):
2550 repo = reporef()
2549 repo = reporef()
2551 assert repo is not None # help pytype
2550 assert repo is not None # help pytype
2552
2551
2553 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2552 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2554 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2553 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2555 for name, (old, new) in bmchanges:
2554 for name, (old, new) in bmchanges:
2556 args = tr.hookargs.copy()
2555 args = tr.hookargs.copy()
2557 args.update(bookmarks.preparehookargs(name, old, new))
2556 args.update(bookmarks.preparehookargs(name, old, new))
2558 repo.hook(
2557 repo.hook(
2559 b'txnclose-bookmark',
2558 b'txnclose-bookmark',
2560 throw=False,
2559 throw=False,
2561 **pycompat.strkwargs(args)
2560 **pycompat.strkwargs(args)
2562 )
2561 )
2563
2562
2564 if hook.hashook(repo.ui, b'txnclose-phase'):
2563 if hook.hashook(repo.ui, b'txnclose-phase'):
2565 cl = repo.unfiltered().changelog
2564 cl = repo.unfiltered().changelog
2566 phasemv = sorted(
2565 phasemv = sorted(
2567 tr.changes[b'phases'], key=lambda r: r[0][0]
2566 tr.changes[b'phases'], key=lambda r: r[0][0]
2568 )
2567 )
2569 for revs, (old, new) in phasemv:
2568 for revs, (old, new) in phasemv:
2570 for rev in revs:
2569 for rev in revs:
2571 args = tr.hookargs.copy()
2570 args = tr.hookargs.copy()
2572 node = hex(cl.node(rev))
2571 node = hex(cl.node(rev))
2573 args.update(phases.preparehookargs(node, old, new))
2572 args.update(phases.preparehookargs(node, old, new))
2574 repo.hook(
2573 repo.hook(
2575 b'txnclose-phase',
2574 b'txnclose-phase',
2576 throw=False,
2575 throw=False,
2577 **pycompat.strkwargs(args)
2576 **pycompat.strkwargs(args)
2578 )
2577 )
2579
2578
2580 repo.hook(
2579 repo.hook(
2581 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2580 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2582 )
2581 )
2583
2582
2584 repo = reporef()
2583 repo = reporef()
2585 assert repo is not None # help pytype
2584 assert repo is not None # help pytype
2586 repo._afterlock(hookfunc)
2585 repo._afterlock(hookfunc)
2587
2586
2588 tr.addfinalize(b'txnclose-hook', txnclosehook)
2587 tr.addfinalize(b'txnclose-hook', txnclosehook)
2589 # Include a leading "-" to make it happen before the transaction summary
2588 # Include a leading "-" to make it happen before the transaction summary
2590 # reports registered via scmutil.registersummarycallback() whose names
2589 # reports registered via scmutil.registersummarycallback() whose names
2591 # are 00-txnreport etc. That way, the caches will be warm when the
2590 # are 00-txnreport etc. That way, the caches will be warm when the
2592 # callbacks run.
2591 # callbacks run.
2593 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2592 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2594
2593
2595 def txnaborthook(tr2):
2594 def txnaborthook(tr2):
2596 """To be run if transaction is aborted"""
2595 """To be run if transaction is aborted"""
2597 repo = reporef()
2596 repo = reporef()
2598 assert repo is not None # help pytype
2597 assert repo is not None # help pytype
2599 repo.hook(
2598 repo.hook(
2600 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2599 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2601 )
2600 )
2602
2601
2603 tr.addabort(b'txnabort-hook', txnaborthook)
2602 tr.addabort(b'txnabort-hook', txnaborthook)
2604 # avoid eager cache invalidation. in-memory data should be identical
2603 # avoid eager cache invalidation. in-memory data should be identical
2605 # to stored data if transaction has no error.
2604 # to stored data if transaction has no error.
2606 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2605 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2607 self._transref = weakref.ref(tr)
2606 self._transref = weakref.ref(tr)
2608 scmutil.registersummarycallback(self, tr, desc)
2607 scmutil.registersummarycallback(self, tr, desc)
2609 return tr
2608 return tr
2610
2609
2611 def _journalfiles(self):
2610 def _journalfiles(self):
2612 return (
2611 return (
2613 (self.svfs, b'journal'),
2612 (self.svfs, b'journal'),
2614 (self.svfs, b'journal.narrowspec'),
2613 (self.svfs, b'journal.narrowspec'),
2615 (self.vfs, b'journal.narrowspec.dirstate'),
2614 (self.vfs, b'journal.narrowspec.dirstate'),
2616 (self.vfs, b'journal.dirstate'),
2615 (self.vfs, b'journal.dirstate'),
2617 (self.vfs, b'journal.branch'),
2616 (self.vfs, b'journal.branch'),
2618 (self.vfs, b'journal.desc'),
2617 (self.vfs, b'journal.desc'),
2619 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2618 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2620 (self.svfs, b'journal.phaseroots'),
2619 (self.svfs, b'journal.phaseroots'),
2621 )
2620 )
2622
2621
2623 def undofiles(self):
2622 def undofiles(self):
2624 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2623 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2625
2624
2626 @unfilteredmethod
2625 @unfilteredmethod
2627 def _writejournal(self, desc):
2626 def _writejournal(self, desc):
2628 self.dirstate.savebackup(None, b'journal.dirstate')
2627 self.dirstate.savebackup(None, b'journal.dirstate')
2629 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2628 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2630 narrowspec.savebackup(self, b'journal.narrowspec')
2629 narrowspec.savebackup(self, b'journal.narrowspec')
2631 self.vfs.write(
2630 self.vfs.write(
2632 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2631 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2633 )
2632 )
2634 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2633 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2635 bookmarksvfs = bookmarks.bookmarksvfs(self)
2634 bookmarksvfs = bookmarks.bookmarksvfs(self)
2636 bookmarksvfs.write(
2635 bookmarksvfs.write(
2637 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2636 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2638 )
2637 )
2639 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2638 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2640
2639
2641 def recover(self):
2640 def recover(self):
2642 with self.lock():
2641 with self.lock():
2643 if self.svfs.exists(b"journal"):
2642 if self.svfs.exists(b"journal"):
2644 self.ui.status(_(b"rolling back interrupted transaction\n"))
2643 self.ui.status(_(b"rolling back interrupted transaction\n"))
2645 vfsmap = {
2644 vfsmap = {
2646 b'': self.svfs,
2645 b'': self.svfs,
2647 b'plain': self.vfs,
2646 b'plain': self.vfs,
2648 }
2647 }
2649 transaction.rollback(
2648 transaction.rollback(
2650 self.svfs,
2649 self.svfs,
2651 vfsmap,
2650 vfsmap,
2652 b"journal",
2651 b"journal",
2653 self.ui.warn,
2652 self.ui.warn,
2654 checkambigfiles=_cachedfiles,
2653 checkambigfiles=_cachedfiles,
2655 )
2654 )
2656 self.invalidate()
2655 self.invalidate()
2657 return True
2656 return True
2658 else:
2657 else:
2659 self.ui.warn(_(b"no interrupted transaction available\n"))
2658 self.ui.warn(_(b"no interrupted transaction available\n"))
2660 return False
2659 return False
2661
2660
2662 def rollback(self, dryrun=False, force=False):
2661 def rollback(self, dryrun=False, force=False):
2663 wlock = lock = dsguard = None
2662 wlock = lock = dsguard = None
2664 try:
2663 try:
2665 wlock = self.wlock()
2664 wlock = self.wlock()
2666 lock = self.lock()
2665 lock = self.lock()
2667 if self.svfs.exists(b"undo"):
2666 if self.svfs.exists(b"undo"):
2668 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2667 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2669
2668
2670 return self._rollback(dryrun, force, dsguard)
2669 return self._rollback(dryrun, force, dsguard)
2671 else:
2670 else:
2672 self.ui.warn(_(b"no rollback information available\n"))
2671 self.ui.warn(_(b"no rollback information available\n"))
2673 return 1
2672 return 1
2674 finally:
2673 finally:
2675 release(dsguard, lock, wlock)
2674 release(dsguard, lock, wlock)
2676
2675
2677 @unfilteredmethod # Until we get smarter cache management
2676 @unfilteredmethod # Until we get smarter cache management
2678 def _rollback(self, dryrun, force, dsguard):
2677 def _rollback(self, dryrun, force, dsguard):
2679 ui = self.ui
2678 ui = self.ui
2680 try:
2679 try:
2681 args = self.vfs.read(b'undo.desc').splitlines()
2680 args = self.vfs.read(b'undo.desc').splitlines()
2682 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2681 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2683 if len(args) >= 3:
2682 if len(args) >= 3:
2684 detail = args[2]
2683 detail = args[2]
2685 oldtip = oldlen - 1
2684 oldtip = oldlen - 1
2686
2685
2687 if detail and ui.verbose:
2686 if detail and ui.verbose:
2688 msg = _(
2687 msg = _(
2689 b'repository tip rolled back to revision %d'
2688 b'repository tip rolled back to revision %d'
2690 b' (undo %s: %s)\n'
2689 b' (undo %s: %s)\n'
2691 ) % (oldtip, desc, detail)
2690 ) % (oldtip, desc, detail)
2692 else:
2691 else:
2693 msg = _(
2692 msg = _(
2694 b'repository tip rolled back to revision %d (undo %s)\n'
2693 b'repository tip rolled back to revision %d (undo %s)\n'
2695 ) % (oldtip, desc)
2694 ) % (oldtip, desc)
2696 except IOError:
2695 except IOError:
2697 msg = _(b'rolling back unknown transaction\n')
2696 msg = _(b'rolling back unknown transaction\n')
2698 desc = None
2697 desc = None
2699
2698
2700 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2699 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2701 raise error.Abort(
2700 raise error.Abort(
2702 _(
2701 _(
2703 b'rollback of last commit while not checked out '
2702 b'rollback of last commit while not checked out '
2704 b'may lose data'
2703 b'may lose data'
2705 ),
2704 ),
2706 hint=_(b'use -f to force'),
2705 hint=_(b'use -f to force'),
2707 )
2706 )
2708
2707
2709 ui.status(msg)
2708 ui.status(msg)
2710 if dryrun:
2709 if dryrun:
2711 return 0
2710 return 0
2712
2711
2713 parents = self.dirstate.parents()
2712 parents = self.dirstate.parents()
2714 self.destroying()
2713 self.destroying()
2715 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2714 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2716 transaction.rollback(
2715 transaction.rollback(
2717 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2716 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2718 )
2717 )
2719 bookmarksvfs = bookmarks.bookmarksvfs(self)
2718 bookmarksvfs = bookmarks.bookmarksvfs(self)
2720 if bookmarksvfs.exists(b'undo.bookmarks'):
2719 if bookmarksvfs.exists(b'undo.bookmarks'):
2721 bookmarksvfs.rename(
2720 bookmarksvfs.rename(
2722 b'undo.bookmarks', b'bookmarks', checkambig=True
2721 b'undo.bookmarks', b'bookmarks', checkambig=True
2723 )
2722 )
2724 if self.svfs.exists(b'undo.phaseroots'):
2723 if self.svfs.exists(b'undo.phaseroots'):
2725 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2724 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2726 self.invalidate()
2725 self.invalidate()
2727
2726
2728 has_node = self.changelog.index.has_node
2727 has_node = self.changelog.index.has_node
2729 parentgone = any(not has_node(p) for p in parents)
2728 parentgone = any(not has_node(p) for p in parents)
2730 if parentgone:
2729 if parentgone:
2731 # prevent dirstateguard from overwriting already restored one
2730 # prevent dirstateguard from overwriting already restored one
2732 dsguard.close()
2731 dsguard.close()
2733
2732
2734 narrowspec.restorebackup(self, b'undo.narrowspec')
2733 narrowspec.restorebackup(self, b'undo.narrowspec')
2735 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2734 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2736 self.dirstate.restorebackup(None, b'undo.dirstate')
2735 self.dirstate.restorebackup(None, b'undo.dirstate')
2737 try:
2736 try:
2738 branch = self.vfs.read(b'undo.branch')
2737 branch = self.vfs.read(b'undo.branch')
2739 self.dirstate.setbranch(encoding.tolocal(branch))
2738 self.dirstate.setbranch(encoding.tolocal(branch))
2740 except IOError:
2739 except IOError:
2741 ui.warn(
2740 ui.warn(
2742 _(
2741 _(
2743 b'named branch could not be reset: '
2742 b'named branch could not be reset: '
2744 b'current branch is still \'%s\'\n'
2743 b'current branch is still \'%s\'\n'
2745 )
2744 )
2746 % self.dirstate.branch()
2745 % self.dirstate.branch()
2747 )
2746 )
2748
2747
2749 parents = tuple([p.rev() for p in self[None].parents()])
2748 parents = tuple([p.rev() for p in self[None].parents()])
2750 if len(parents) > 1:
2749 if len(parents) > 1:
2751 ui.status(
2750 ui.status(
2752 _(
2751 _(
2753 b'working directory now based on '
2752 b'working directory now based on '
2754 b'revisions %d and %d\n'
2753 b'revisions %d and %d\n'
2755 )
2754 )
2756 % parents
2755 % parents
2757 )
2756 )
2758 else:
2757 else:
2759 ui.status(
2758 ui.status(
2760 _(b'working directory now based on revision %d\n') % parents
2759 _(b'working directory now based on revision %d\n') % parents
2761 )
2760 )
2762 mergestatemod.mergestate.clean(self)
2761 mergestatemod.mergestate.clean(self)
2763
2762
2764 # TODO: if we know which new heads may result from this rollback, pass
2763 # TODO: if we know which new heads may result from this rollback, pass
2765 # them to destroy(), which will prevent the branchhead cache from being
2764 # them to destroy(), which will prevent the branchhead cache from being
2766 # invalidated.
2765 # invalidated.
2767 self.destroyed()
2766 self.destroyed()
2768 return 0
2767 return 0
2769
2768
2770 def _buildcacheupdater(self, newtransaction):
2769 def _buildcacheupdater(self, newtransaction):
2771 """called during transaction to build the callback updating cache
2770 """called during transaction to build the callback updating cache
2772
2771
2773 Lives on the repository to help extension who might want to augment
2772 Lives on the repository to help extension who might want to augment
2774 this logic. For this purpose, the created transaction is passed to the
2773 this logic. For this purpose, the created transaction is passed to the
2775 method.
2774 method.
2776 """
2775 """
2777 # we must avoid cyclic reference between repo and transaction.
2776 # we must avoid cyclic reference between repo and transaction.
2778 reporef = weakref.ref(self)
2777 reporef = weakref.ref(self)
2779
2778
2780 def updater(tr):
2779 def updater(tr):
2781 repo = reporef()
2780 repo = reporef()
2782 assert repo is not None # help pytype
2781 assert repo is not None # help pytype
2783 repo.updatecaches(tr)
2782 repo.updatecaches(tr)
2784
2783
2785 return updater
2784 return updater
2786
2785
2787 @unfilteredmethod
2786 @unfilteredmethod
2788 def updatecaches(self, tr=None, full=False, caches=None):
2787 def updatecaches(self, tr=None, full=False, caches=None):
2789 """warm appropriate caches
2788 """warm appropriate caches
2790
2789
2791 If this function is called after a transaction closed. The transaction
2790 If this function is called after a transaction closed. The transaction
2792 will be available in the 'tr' argument. This can be used to selectively
2791 will be available in the 'tr' argument. This can be used to selectively
2793 update caches relevant to the changes in that transaction.
2792 update caches relevant to the changes in that transaction.
2794
2793
2795 If 'full' is set, make sure all caches the function knows about have
2794 If 'full' is set, make sure all caches the function knows about have
2796 up-to-date data. Even the ones usually loaded more lazily.
2795 up-to-date data. Even the ones usually loaded more lazily.
2797
2796
2798 The `full` argument can take a special "post-clone" value. In this case
2797 The `full` argument can take a special "post-clone" value. In this case
2799 the cache warming is made after a clone and of the slower cache might
2798 the cache warming is made after a clone and of the slower cache might
2800 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2799 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2801 as we plan for a cleaner way to deal with this for 5.9.
2800 as we plan for a cleaner way to deal with this for 5.9.
2802 """
2801 """
2803 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2802 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2804 # During strip, many caches are invalid but
2803 # During strip, many caches are invalid but
2805 # later call to `destroyed` will refresh them.
2804 # later call to `destroyed` will refresh them.
2806 return
2805 return
2807
2806
2808 unfi = self.unfiltered()
2807 unfi = self.unfiltered()
2809
2808
2810 if full:
2809 if full:
2811 msg = (
2810 msg = (
2812 "`full` argument for `repo.updatecaches` is deprecated\n"
2811 "`full` argument for `repo.updatecaches` is deprecated\n"
2813 "(use `caches=repository.CACHE_ALL` instead)"
2812 "(use `caches=repository.CACHE_ALL` instead)"
2814 )
2813 )
2815 self.ui.deprecwarn(msg, b"5.9")
2814 self.ui.deprecwarn(msg, b"5.9")
2816 caches = repository.CACHES_ALL
2815 caches = repository.CACHES_ALL
2817 if full == b"post-clone":
2816 if full == b"post-clone":
2818 caches = repository.CACHES_POST_CLONE
2817 caches = repository.CACHES_POST_CLONE
2819 caches = repository.CACHES_ALL
2818 caches = repository.CACHES_ALL
2820 elif caches is None:
2819 elif caches is None:
2821 caches = repository.CACHES_DEFAULT
2820 caches = repository.CACHES_DEFAULT
2822
2821
2823 if repository.CACHE_BRANCHMAP_SERVED in caches:
2822 if repository.CACHE_BRANCHMAP_SERVED in caches:
2824 if tr is None or tr.changes[b'origrepolen'] < len(self):
2823 if tr is None or tr.changes[b'origrepolen'] < len(self):
2825 # accessing the 'served' branchmap should refresh all the others,
2824 # accessing the 'served' branchmap should refresh all the others,
2826 self.ui.debug(b'updating the branch cache\n')
2825 self.ui.debug(b'updating the branch cache\n')
2827 self.filtered(b'served').branchmap()
2826 self.filtered(b'served').branchmap()
2828 self.filtered(b'served.hidden').branchmap()
2827 self.filtered(b'served.hidden').branchmap()
2829 # flush all possibly delayed write.
2828 # flush all possibly delayed write.
2830 self._branchcaches.write_delayed(self)
2829 self._branchcaches.write_delayed(self)
2831
2830
2832 if repository.CACHE_CHANGELOG_CACHE in caches:
2831 if repository.CACHE_CHANGELOG_CACHE in caches:
2833 self.changelog.update_caches(transaction=tr)
2832 self.changelog.update_caches(transaction=tr)
2834
2833
2835 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2834 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2836 self.manifestlog.update_caches(transaction=tr)
2835 self.manifestlog.update_caches(transaction=tr)
2837
2836
2838 if repository.CACHE_REV_BRANCH in caches:
2837 if repository.CACHE_REV_BRANCH in caches:
2839 rbc = unfi.revbranchcache()
2838 rbc = unfi.revbranchcache()
2840 for r in unfi.changelog:
2839 for r in unfi.changelog:
2841 rbc.branchinfo(r)
2840 rbc.branchinfo(r)
2842 rbc.write()
2841 rbc.write()
2843
2842
2844 if repository.CACHE_FULL_MANIFEST in caches:
2843 if repository.CACHE_FULL_MANIFEST in caches:
2845 # ensure the working copy parents are in the manifestfulltextcache
2844 # ensure the working copy parents are in the manifestfulltextcache
2846 for ctx in self[b'.'].parents():
2845 for ctx in self[b'.'].parents():
2847 ctx.manifest() # accessing the manifest is enough
2846 ctx.manifest() # accessing the manifest is enough
2848
2847
2849 if repository.CACHE_FILE_NODE_TAGS in caches:
2848 if repository.CACHE_FILE_NODE_TAGS in caches:
2850 # accessing fnode cache warms the cache
2849 # accessing fnode cache warms the cache
2851 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2850 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2852
2851
2853 if repository.CACHE_TAGS_DEFAULT in caches:
2852 if repository.CACHE_TAGS_DEFAULT in caches:
2854 # accessing tags warm the cache
2853 # accessing tags warm the cache
2855 self.tags()
2854 self.tags()
2856 if repository.CACHE_TAGS_SERVED in caches:
2855 if repository.CACHE_TAGS_SERVED in caches:
2857 self.filtered(b'served').tags()
2856 self.filtered(b'served').tags()
2858
2857
2859 if repository.CACHE_BRANCHMAP_ALL in caches:
2858 if repository.CACHE_BRANCHMAP_ALL in caches:
2860 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2859 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2861 # so we're forcing a write to cause these caches to be warmed up
2860 # so we're forcing a write to cause these caches to be warmed up
2862 # even if they haven't explicitly been requested yet (if they've
2861 # even if they haven't explicitly been requested yet (if they've
2863 # never been used by hg, they won't ever have been written, even if
2862 # never been used by hg, they won't ever have been written, even if
2864 # they're a subset of another kind of cache that *has* been used).
2863 # they're a subset of another kind of cache that *has* been used).
2865 for filt in repoview.filtertable.keys():
2864 for filt in repoview.filtertable.keys():
2866 filtered = self.filtered(filt)
2865 filtered = self.filtered(filt)
2867 filtered.branchmap().write(filtered)
2866 filtered.branchmap().write(filtered)
2868
2867
2869 def invalidatecaches(self):
2868 def invalidatecaches(self):
2870
2869
2871 if '_tagscache' in vars(self):
2870 if '_tagscache' in vars(self):
2872 # can't use delattr on proxy
2871 # can't use delattr on proxy
2873 del self.__dict__['_tagscache']
2872 del self.__dict__['_tagscache']
2874
2873
2875 self._branchcaches.clear()
2874 self._branchcaches.clear()
2876 self.invalidatevolatilesets()
2875 self.invalidatevolatilesets()
2877 self._sparsesignaturecache.clear()
2876 self._sparsesignaturecache.clear()
2878
2877
2879 def invalidatevolatilesets(self):
2878 def invalidatevolatilesets(self):
2880 self.filteredrevcache.clear()
2879 self.filteredrevcache.clear()
2881 obsolete.clearobscaches(self)
2880 obsolete.clearobscaches(self)
2882 self._quick_access_changeid_invalidate()
2881 self._quick_access_changeid_invalidate()
2883
2882
2884 def invalidatedirstate(self):
2883 def invalidatedirstate(self):
2885 """Invalidates the dirstate, causing the next call to dirstate
2884 """Invalidates the dirstate, causing the next call to dirstate
2886 to check if it was modified since the last time it was read,
2885 to check if it was modified since the last time it was read,
2887 rereading it if it has.
2886 rereading it if it has.
2888
2887
2889 This is different to dirstate.invalidate() that it doesn't always
2888 This is different to dirstate.invalidate() that it doesn't always
2890 rereads the dirstate. Use dirstate.invalidate() if you want to
2889 rereads the dirstate. Use dirstate.invalidate() if you want to
2891 explicitly read the dirstate again (i.e. restoring it to a previous
2890 explicitly read the dirstate again (i.e. restoring it to a previous
2892 known good state)."""
2891 known good state)."""
2893 if hasunfilteredcache(self, 'dirstate'):
2892 if hasunfilteredcache(self, 'dirstate'):
2894 for k in self.dirstate._filecache:
2893 for k in self.dirstate._filecache:
2895 try:
2894 try:
2896 delattr(self.dirstate, k)
2895 delattr(self.dirstate, k)
2897 except AttributeError:
2896 except AttributeError:
2898 pass
2897 pass
2899 delattr(self.unfiltered(), 'dirstate')
2898 delattr(self.unfiltered(), 'dirstate')
2900
2899
2901 def invalidate(self, clearfilecache=False):
2900 def invalidate(self, clearfilecache=False):
2902 """Invalidates both store and non-store parts other than dirstate
2901 """Invalidates both store and non-store parts other than dirstate
2903
2902
2904 If a transaction is running, invalidation of store is omitted,
2903 If a transaction is running, invalidation of store is omitted,
2905 because discarding in-memory changes might cause inconsistency
2904 because discarding in-memory changes might cause inconsistency
2906 (e.g. incomplete fncache causes unintentional failure, but
2905 (e.g. incomplete fncache causes unintentional failure, but
2907 redundant one doesn't).
2906 redundant one doesn't).
2908 """
2907 """
2909 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2908 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2910 for k in list(self._filecache.keys()):
2909 for k in list(self._filecache.keys()):
2911 # dirstate is invalidated separately in invalidatedirstate()
2910 # dirstate is invalidated separately in invalidatedirstate()
2912 if k == b'dirstate':
2911 if k == b'dirstate':
2913 continue
2912 continue
2914 if (
2913 if (
2915 k == b'changelog'
2914 k == b'changelog'
2916 and self.currenttransaction()
2915 and self.currenttransaction()
2917 and self.changelog._delayed
2916 and self.changelog._delayed
2918 ):
2917 ):
2919 # The changelog object may store unwritten revisions. We don't
2918 # The changelog object may store unwritten revisions. We don't
2920 # want to lose them.
2919 # want to lose them.
2921 # TODO: Solve the problem instead of working around it.
2920 # TODO: Solve the problem instead of working around it.
2922 continue
2921 continue
2923
2922
2924 if clearfilecache:
2923 if clearfilecache:
2925 del self._filecache[k]
2924 del self._filecache[k]
2926 try:
2925 try:
2927 delattr(unfiltered, k)
2926 delattr(unfiltered, k)
2928 except AttributeError:
2927 except AttributeError:
2929 pass
2928 pass
2930 self.invalidatecaches()
2929 self.invalidatecaches()
2931 if not self.currenttransaction():
2930 if not self.currenttransaction():
2932 # TODO: Changing contents of store outside transaction
2931 # TODO: Changing contents of store outside transaction
2933 # causes inconsistency. We should make in-memory store
2932 # causes inconsistency. We should make in-memory store
2934 # changes detectable, and abort if changed.
2933 # changes detectable, and abort if changed.
2935 self.store.invalidatecaches()
2934 self.store.invalidatecaches()
2936
2935
2937 def invalidateall(self):
2936 def invalidateall(self):
2938 """Fully invalidates both store and non-store parts, causing the
2937 """Fully invalidates both store and non-store parts, causing the
2939 subsequent operation to reread any outside changes."""
2938 subsequent operation to reread any outside changes."""
2940 # extension should hook this to invalidate its caches
2939 # extension should hook this to invalidate its caches
2941 self.invalidate()
2940 self.invalidate()
2942 self.invalidatedirstate()
2941 self.invalidatedirstate()
2943
2942
2944 @unfilteredmethod
2943 @unfilteredmethod
2945 def _refreshfilecachestats(self, tr):
2944 def _refreshfilecachestats(self, tr):
2946 """Reload stats of cached files so that they are flagged as valid"""
2945 """Reload stats of cached files so that they are flagged as valid"""
2947 for k, ce in self._filecache.items():
2946 for k, ce in self._filecache.items():
2948 k = pycompat.sysstr(k)
2947 k = pycompat.sysstr(k)
2949 if k == 'dirstate' or k not in self.__dict__:
2948 if k == 'dirstate' or k not in self.__dict__:
2950 continue
2949 continue
2951 ce.refresh()
2950 ce.refresh()
2952
2951
2953 def _lock(
2952 def _lock(
2954 self,
2953 self,
2955 vfs,
2954 vfs,
2956 lockname,
2955 lockname,
2957 wait,
2956 wait,
2958 releasefn,
2957 releasefn,
2959 acquirefn,
2958 acquirefn,
2960 desc,
2959 desc,
2961 ):
2960 ):
2962 timeout = 0
2961 timeout = 0
2963 warntimeout = 0
2962 warntimeout = 0
2964 if wait:
2963 if wait:
2965 timeout = self.ui.configint(b"ui", b"timeout")
2964 timeout = self.ui.configint(b"ui", b"timeout")
2966 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2965 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2967 # internal config: ui.signal-safe-lock
2966 # internal config: ui.signal-safe-lock
2968 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2967 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2969
2968
2970 l = lockmod.trylock(
2969 l = lockmod.trylock(
2971 self.ui,
2970 self.ui,
2972 vfs,
2971 vfs,
2973 lockname,
2972 lockname,
2974 timeout,
2973 timeout,
2975 warntimeout,
2974 warntimeout,
2976 releasefn=releasefn,
2975 releasefn=releasefn,
2977 acquirefn=acquirefn,
2976 acquirefn=acquirefn,
2978 desc=desc,
2977 desc=desc,
2979 signalsafe=signalsafe,
2978 signalsafe=signalsafe,
2980 )
2979 )
2981 return l
2980 return l
2982
2981
2983 def _afterlock(self, callback):
2982 def _afterlock(self, callback):
2984 """add a callback to be run when the repository is fully unlocked
2983 """add a callback to be run when the repository is fully unlocked
2985
2984
2986 The callback will be executed when the outermost lock is released
2985 The callback will be executed when the outermost lock is released
2987 (with wlock being higher level than 'lock')."""
2986 (with wlock being higher level than 'lock')."""
2988 for ref in (self._wlockref, self._lockref):
2987 for ref in (self._wlockref, self._lockref):
2989 l = ref and ref()
2988 l = ref and ref()
2990 if l and l.held:
2989 if l and l.held:
2991 l.postrelease.append(callback)
2990 l.postrelease.append(callback)
2992 break
2991 break
2993 else: # no lock have been found.
2992 else: # no lock have been found.
2994 callback(True)
2993 callback(True)
2995
2994
2996 def lock(self, wait=True):
2995 def lock(self, wait=True):
2997 """Lock the repository store (.hg/store) and return a weak reference
2996 """Lock the repository store (.hg/store) and return a weak reference
2998 to the lock. Use this before modifying the store (e.g. committing or
2997 to the lock. Use this before modifying the store (e.g. committing or
2999 stripping). If you are opening a transaction, get a lock as well.)
2998 stripping). If you are opening a transaction, get a lock as well.)
3000
2999
3001 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3000 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3002 'wlock' first to avoid a dead-lock hazard."""
3001 'wlock' first to avoid a dead-lock hazard."""
3003 l = self._currentlock(self._lockref)
3002 l = self._currentlock(self._lockref)
3004 if l is not None:
3003 if l is not None:
3005 l.lock()
3004 l.lock()
3006 return l
3005 return l
3007
3006
3008 l = self._lock(
3007 l = self._lock(
3009 vfs=self.svfs,
3008 vfs=self.svfs,
3010 lockname=b"lock",
3009 lockname=b"lock",
3011 wait=wait,
3010 wait=wait,
3012 releasefn=None,
3011 releasefn=None,
3013 acquirefn=self.invalidate,
3012 acquirefn=self.invalidate,
3014 desc=_(b'repository %s') % self.origroot,
3013 desc=_(b'repository %s') % self.origroot,
3015 )
3014 )
3016 self._lockref = weakref.ref(l)
3015 self._lockref = weakref.ref(l)
3017 return l
3016 return l
3018
3017
3019 def wlock(self, wait=True):
3018 def wlock(self, wait=True):
3020 """Lock the non-store parts of the repository (everything under
3019 """Lock the non-store parts of the repository (everything under
3021 .hg except .hg/store) and return a weak reference to the lock.
3020 .hg except .hg/store) and return a weak reference to the lock.
3022
3021
3023 Use this before modifying files in .hg.
3022 Use this before modifying files in .hg.
3024
3023
3025 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3024 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3026 'wlock' first to avoid a dead-lock hazard."""
3025 'wlock' first to avoid a dead-lock hazard."""
3027 l = self._wlockref() if self._wlockref else None
3026 l = self._wlockref() if self._wlockref else None
3028 if l is not None and l.held:
3027 if l is not None and l.held:
3029 l.lock()
3028 l.lock()
3030 return l
3029 return l
3031
3030
3032 # We do not need to check for non-waiting lock acquisition. Such
3031 # We do not need to check for non-waiting lock acquisition. Such
3033 # acquisition would not cause dead-lock as they would just fail.
3032 # acquisition would not cause dead-lock as they would just fail.
3034 if wait and (
3033 if wait and (
3035 self.ui.configbool(b'devel', b'all-warnings')
3034 self.ui.configbool(b'devel', b'all-warnings')
3036 or self.ui.configbool(b'devel', b'check-locks')
3035 or self.ui.configbool(b'devel', b'check-locks')
3037 ):
3036 ):
3038 if self._currentlock(self._lockref) is not None:
3037 if self._currentlock(self._lockref) is not None:
3039 self.ui.develwarn(b'"wlock" acquired after "lock"')
3038 self.ui.develwarn(b'"wlock" acquired after "lock"')
3040
3039
3041 def unlock():
3040 def unlock():
3042 if self.dirstate.pendingparentchange():
3041 if self.dirstate.pendingparentchange():
3043 self.dirstate.invalidate()
3042 self.dirstate.invalidate()
3044 else:
3043 else:
3045 self.dirstate.write(None)
3044 self.dirstate.write(None)
3046
3045
3047 self._filecache[b'dirstate'].refresh()
3046 self._filecache[b'dirstate'].refresh()
3048
3047
3049 l = self._lock(
3048 l = self._lock(
3050 self.vfs,
3049 self.vfs,
3051 b"wlock",
3050 b"wlock",
3052 wait,
3051 wait,
3053 unlock,
3052 unlock,
3054 self.invalidatedirstate,
3053 self.invalidatedirstate,
3055 _(b'working directory of %s') % self.origroot,
3054 _(b'working directory of %s') % self.origroot,
3056 )
3055 )
3057 self._wlockref = weakref.ref(l)
3056 self._wlockref = weakref.ref(l)
3058 return l
3057 return l
3059
3058
3060 def _currentlock(self, lockref):
3059 def _currentlock(self, lockref):
3061 """Returns the lock if it's held, or None if it's not."""
3060 """Returns the lock if it's held, or None if it's not."""
3062 if lockref is None:
3061 if lockref is None:
3063 return None
3062 return None
3064 l = lockref()
3063 l = lockref()
3065 if l is None or not l.held:
3064 if l is None or not l.held:
3066 return None
3065 return None
3067 return l
3066 return l
3068
3067
3069 def currentwlock(self):
3068 def currentwlock(self):
3070 """Returns the wlock if it's held, or None if it's not."""
3069 """Returns the wlock if it's held, or None if it's not."""
3071 return self._currentlock(self._wlockref)
3070 return self._currentlock(self._wlockref)
3072
3071
3073 def checkcommitpatterns(self, wctx, match, status, fail):
3072 def checkcommitpatterns(self, wctx, match, status, fail):
3074 """check for commit arguments that aren't committable"""
3073 """check for commit arguments that aren't committable"""
3075 if match.isexact() or match.prefix():
3074 if match.isexact() or match.prefix():
3076 matched = set(status.modified + status.added + status.removed)
3075 matched = set(status.modified + status.added + status.removed)
3077
3076
3078 for f in match.files():
3077 for f in match.files():
3079 f = self.dirstate.normalize(f)
3078 f = self.dirstate.normalize(f)
3080 if f == b'.' or f in matched or f in wctx.substate:
3079 if f == b'.' or f in matched or f in wctx.substate:
3081 continue
3080 continue
3082 if f in status.deleted:
3081 if f in status.deleted:
3083 fail(f, _(b'file not found!'))
3082 fail(f, _(b'file not found!'))
3084 # Is it a directory that exists or used to exist?
3083 # Is it a directory that exists or used to exist?
3085 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3084 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3086 d = f + b'/'
3085 d = f + b'/'
3087 for mf in matched:
3086 for mf in matched:
3088 if mf.startswith(d):
3087 if mf.startswith(d):
3089 break
3088 break
3090 else:
3089 else:
3091 fail(f, _(b"no match under directory!"))
3090 fail(f, _(b"no match under directory!"))
3092 elif f not in self.dirstate:
3091 elif f not in self.dirstate:
3093 fail(f, _(b"file not tracked!"))
3092 fail(f, _(b"file not tracked!"))
3094
3093
3095 @unfilteredmethod
3094 @unfilteredmethod
3096 def commit(
3095 def commit(
3097 self,
3096 self,
3098 text=b"",
3097 text=b"",
3099 user=None,
3098 user=None,
3100 date=None,
3099 date=None,
3101 match=None,
3100 match=None,
3102 force=False,
3101 force=False,
3103 editor=None,
3102 editor=None,
3104 extra=None,
3103 extra=None,
3105 ):
3104 ):
3106 """Add a new revision to current repository.
3105 """Add a new revision to current repository.
3107
3106
3108 Revision information is gathered from the working directory,
3107 Revision information is gathered from the working directory,
3109 match can be used to filter the committed files. If editor is
3108 match can be used to filter the committed files. If editor is
3110 supplied, it is called to get a commit message.
3109 supplied, it is called to get a commit message.
3111 """
3110 """
3112 if extra is None:
3111 if extra is None:
3113 extra = {}
3112 extra = {}
3114
3113
3115 def fail(f, msg):
3114 def fail(f, msg):
3116 raise error.InputError(b'%s: %s' % (f, msg))
3115 raise error.InputError(b'%s: %s' % (f, msg))
3117
3116
3118 if not match:
3117 if not match:
3119 match = matchmod.always()
3118 match = matchmod.always()
3120
3119
3121 if not force:
3120 if not force:
3122 match.bad = fail
3121 match.bad = fail
3123
3122
3124 # lock() for recent changelog (see issue4368)
3123 # lock() for recent changelog (see issue4368)
3125 with self.wlock(), self.lock():
3124 with self.wlock(), self.lock():
3126 wctx = self[None]
3125 wctx = self[None]
3127 merge = len(wctx.parents()) > 1
3126 merge = len(wctx.parents()) > 1
3128
3127
3129 if not force and merge and not match.always():
3128 if not force and merge and not match.always():
3130 raise error.Abort(
3129 raise error.Abort(
3131 _(
3130 _(
3132 b'cannot partially commit a merge '
3131 b'cannot partially commit a merge '
3133 b'(do not specify files or patterns)'
3132 b'(do not specify files or patterns)'
3134 )
3133 )
3135 )
3134 )
3136
3135
3137 status = self.status(match=match, clean=force)
3136 status = self.status(match=match, clean=force)
3138 if force:
3137 if force:
3139 status.modified.extend(
3138 status.modified.extend(
3140 status.clean
3139 status.clean
3141 ) # mq may commit clean files
3140 ) # mq may commit clean files
3142
3141
3143 # check subrepos
3142 # check subrepos
3144 subs, commitsubs, newstate = subrepoutil.precommit(
3143 subs, commitsubs, newstate = subrepoutil.precommit(
3145 self.ui, wctx, status, match, force=force
3144 self.ui, wctx, status, match, force=force
3146 )
3145 )
3147
3146
3148 # make sure all explicit patterns are matched
3147 # make sure all explicit patterns are matched
3149 if not force:
3148 if not force:
3150 self.checkcommitpatterns(wctx, match, status, fail)
3149 self.checkcommitpatterns(wctx, match, status, fail)
3151
3150
3152 cctx = context.workingcommitctx(
3151 cctx = context.workingcommitctx(
3153 self, status, text, user, date, extra
3152 self, status, text, user, date, extra
3154 )
3153 )
3155
3154
3156 ms = mergestatemod.mergestate.read(self)
3155 ms = mergestatemod.mergestate.read(self)
3157 mergeutil.checkunresolved(ms)
3156 mergeutil.checkunresolved(ms)
3158
3157
3159 # internal config: ui.allowemptycommit
3158 # internal config: ui.allowemptycommit
3160 if cctx.isempty() and not self.ui.configbool(
3159 if cctx.isempty() and not self.ui.configbool(
3161 b'ui', b'allowemptycommit'
3160 b'ui', b'allowemptycommit'
3162 ):
3161 ):
3163 self.ui.debug(b'nothing to commit, clearing merge state\n')
3162 self.ui.debug(b'nothing to commit, clearing merge state\n')
3164 ms.reset()
3163 ms.reset()
3165 return None
3164 return None
3166
3165
3167 if merge and cctx.deleted():
3166 if merge and cctx.deleted():
3168 raise error.Abort(_(b"cannot commit merge with missing files"))
3167 raise error.Abort(_(b"cannot commit merge with missing files"))
3169
3168
3170 if editor:
3169 if editor:
3171 cctx._text = editor(self, cctx, subs)
3170 cctx._text = editor(self, cctx, subs)
3172 edited = text != cctx._text
3171 edited = text != cctx._text
3173
3172
3174 # Save commit message in case this transaction gets rolled back
3173 # Save commit message in case this transaction gets rolled back
3175 # (e.g. by a pretxncommit hook). Leave the content alone on
3174 # (e.g. by a pretxncommit hook). Leave the content alone on
3176 # the assumption that the user will use the same editor again.
3175 # the assumption that the user will use the same editor again.
3177 msg_path = self.savecommitmessage(cctx._text)
3176 msg_path = self.savecommitmessage(cctx._text)
3178
3177
3179 # commit subs and write new state
3178 # commit subs and write new state
3180 if subs:
3179 if subs:
3181 uipathfn = scmutil.getuipathfn(self)
3180 uipathfn = scmutil.getuipathfn(self)
3182 for s in sorted(commitsubs):
3181 for s in sorted(commitsubs):
3183 sub = wctx.sub(s)
3182 sub = wctx.sub(s)
3184 self.ui.status(
3183 self.ui.status(
3185 _(b'committing subrepository %s\n')
3184 _(b'committing subrepository %s\n')
3186 % uipathfn(subrepoutil.subrelpath(sub))
3185 % uipathfn(subrepoutil.subrelpath(sub))
3187 )
3186 )
3188 sr = sub.commit(cctx._text, user, date)
3187 sr = sub.commit(cctx._text, user, date)
3189 newstate[s] = (newstate[s][0], sr)
3188 newstate[s] = (newstate[s][0], sr)
3190 subrepoutil.writestate(self, newstate)
3189 subrepoutil.writestate(self, newstate)
3191
3190
3192 p1, p2 = self.dirstate.parents()
3191 p1, p2 = self.dirstate.parents()
3193 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3192 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3194 try:
3193 try:
3195 self.hook(
3194 self.hook(
3196 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3195 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3197 )
3196 )
3198 with self.transaction(b'commit'):
3197 with self.transaction(b'commit'):
3199 ret = self.commitctx(cctx, True)
3198 ret = self.commitctx(cctx, True)
3200 # update bookmarks, dirstate and mergestate
3199 # update bookmarks, dirstate and mergestate
3201 bookmarks.update(self, [p1, p2], ret)
3200 bookmarks.update(self, [p1, p2], ret)
3202 cctx.markcommitted(ret)
3201 cctx.markcommitted(ret)
3203 ms.reset()
3202 ms.reset()
3204 except: # re-raises
3203 except: # re-raises
3205 if edited:
3204 if edited:
3206 self.ui.write(
3205 self.ui.write(
3207 _(b'note: commit message saved in %s\n') % msg_path
3206 _(b'note: commit message saved in %s\n') % msg_path
3208 )
3207 )
3209 self.ui.write(
3208 self.ui.write(
3210 _(
3209 _(
3211 b"note: use 'hg commit --logfile "
3210 b"note: use 'hg commit --logfile "
3212 b"%s --edit' to reuse it\n"
3211 b"%s --edit' to reuse it\n"
3213 )
3212 )
3214 % msg_path
3213 % msg_path
3215 )
3214 )
3216 raise
3215 raise
3217
3216
3218 def commithook(unused_success):
3217 def commithook(unused_success):
3219 # hack for command that use a temporary commit (eg: histedit)
3218 # hack for command that use a temporary commit (eg: histedit)
3220 # temporary commit got stripped before hook release
3219 # temporary commit got stripped before hook release
3221 if self.changelog.hasnode(ret):
3220 if self.changelog.hasnode(ret):
3222 self.hook(
3221 self.hook(
3223 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3222 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3224 )
3223 )
3225
3224
3226 self._afterlock(commithook)
3225 self._afterlock(commithook)
3227 return ret
3226 return ret
3228
3227
3229 @unfilteredmethod
3228 @unfilteredmethod
3230 def commitctx(self, ctx, error=False, origctx=None):
3229 def commitctx(self, ctx, error=False, origctx=None):
3231 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3230 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3232
3231
3233 @unfilteredmethod
3232 @unfilteredmethod
3234 def destroying(self):
3233 def destroying(self):
3235 """Inform the repository that nodes are about to be destroyed.
3234 """Inform the repository that nodes are about to be destroyed.
3236 Intended for use by strip and rollback, so there's a common
3235 Intended for use by strip and rollback, so there's a common
3237 place for anything that has to be done before destroying history.
3236 place for anything that has to be done before destroying history.
3238
3237
3239 This is mostly useful for saving state that is in memory and waiting
3238 This is mostly useful for saving state that is in memory and waiting
3240 to be flushed when the current lock is released. Because a call to
3239 to be flushed when the current lock is released. Because a call to
3241 destroyed is imminent, the repo will be invalidated causing those
3240 destroyed is imminent, the repo will be invalidated causing those
3242 changes to stay in memory (waiting for the next unlock), or vanish
3241 changes to stay in memory (waiting for the next unlock), or vanish
3243 completely.
3242 completely.
3244 """
3243 """
3245 # When using the same lock to commit and strip, the phasecache is left
3244 # When using the same lock to commit and strip, the phasecache is left
3246 # dirty after committing. Then when we strip, the repo is invalidated,
3245 # dirty after committing. Then when we strip, the repo is invalidated,
3247 # causing those changes to disappear.
3246 # causing those changes to disappear.
3248 if '_phasecache' in vars(self):
3247 if '_phasecache' in vars(self):
3249 self._phasecache.write()
3248 self._phasecache.write()
3250
3249
3251 @unfilteredmethod
3250 @unfilteredmethod
3252 def destroyed(self):
3251 def destroyed(self):
3253 """Inform the repository that nodes have been destroyed.
3252 """Inform the repository that nodes have been destroyed.
3254 Intended for use by strip and rollback, so there's a common
3253 Intended for use by strip and rollback, so there's a common
3255 place for anything that has to be done after destroying history.
3254 place for anything that has to be done after destroying history.
3256 """
3255 """
3257 # When one tries to:
3256 # When one tries to:
3258 # 1) destroy nodes thus calling this method (e.g. strip)
3257 # 1) destroy nodes thus calling this method (e.g. strip)
3259 # 2) use phasecache somewhere (e.g. commit)
3258 # 2) use phasecache somewhere (e.g. commit)
3260 #
3259 #
3261 # then 2) will fail because the phasecache contains nodes that were
3260 # then 2) will fail because the phasecache contains nodes that were
3262 # removed. We can either remove phasecache from the filecache,
3261 # removed. We can either remove phasecache from the filecache,
3263 # causing it to reload next time it is accessed, or simply filter
3262 # causing it to reload next time it is accessed, or simply filter
3264 # the removed nodes now and write the updated cache.
3263 # the removed nodes now and write the updated cache.
3265 self._phasecache.filterunknown(self)
3264 self._phasecache.filterunknown(self)
3266 self._phasecache.write()
3265 self._phasecache.write()
3267
3266
3268 # refresh all repository caches
3267 # refresh all repository caches
3269 self.updatecaches()
3268 self.updatecaches()
3270
3269
3271 # Ensure the persistent tag cache is updated. Doing it now
3270 # Ensure the persistent tag cache is updated. Doing it now
3272 # means that the tag cache only has to worry about destroyed
3271 # means that the tag cache only has to worry about destroyed
3273 # heads immediately after a strip/rollback. That in turn
3272 # heads immediately after a strip/rollback. That in turn
3274 # guarantees that "cachetip == currenttip" (comparing both rev
3273 # guarantees that "cachetip == currenttip" (comparing both rev
3275 # and node) always means no nodes have been added or destroyed.
3274 # and node) always means no nodes have been added or destroyed.
3276
3275
3277 # XXX this is suboptimal when qrefresh'ing: we strip the current
3276 # XXX this is suboptimal when qrefresh'ing: we strip the current
3278 # head, refresh the tag cache, then immediately add a new head.
3277 # head, refresh the tag cache, then immediately add a new head.
3279 # But I think doing it this way is necessary for the "instant
3278 # But I think doing it this way is necessary for the "instant
3280 # tag cache retrieval" case to work.
3279 # tag cache retrieval" case to work.
3281 self.invalidate()
3280 self.invalidate()
3282
3281
3283 def status(
3282 def status(
3284 self,
3283 self,
3285 node1=b'.',
3284 node1=b'.',
3286 node2=None,
3285 node2=None,
3287 match=None,
3286 match=None,
3288 ignored=False,
3287 ignored=False,
3289 clean=False,
3288 clean=False,
3290 unknown=False,
3289 unknown=False,
3291 listsubrepos=False,
3290 listsubrepos=False,
3292 ):
3291 ):
3293 '''a convenience method that calls node1.status(node2)'''
3292 '''a convenience method that calls node1.status(node2)'''
3294 return self[node1].status(
3293 return self[node1].status(
3295 node2, match, ignored, clean, unknown, listsubrepos
3294 node2, match, ignored, clean, unknown, listsubrepos
3296 )
3295 )
3297
3296
3298 def addpostdsstatus(self, ps):
3297 def addpostdsstatus(self, ps):
3299 """Add a callback to run within the wlock, at the point at which status
3298 """Add a callback to run within the wlock, at the point at which status
3300 fixups happen.
3299 fixups happen.
3301
3300
3302 On status completion, callback(wctx, status) will be called with the
3301 On status completion, callback(wctx, status) will be called with the
3303 wlock held, unless the dirstate has changed from underneath or the wlock
3302 wlock held, unless the dirstate has changed from underneath or the wlock
3304 couldn't be grabbed.
3303 couldn't be grabbed.
3305
3304
3306 Callbacks should not capture and use a cached copy of the dirstate --
3305 Callbacks should not capture and use a cached copy of the dirstate --
3307 it might change in the meanwhile. Instead, they should access the
3306 it might change in the meanwhile. Instead, they should access the
3308 dirstate via wctx.repo().dirstate.
3307 dirstate via wctx.repo().dirstate.
3309
3308
3310 This list is emptied out after each status run -- extensions should
3309 This list is emptied out after each status run -- extensions should
3311 make sure it adds to this list each time dirstate.status is called.
3310 make sure it adds to this list each time dirstate.status is called.
3312 Extensions should also make sure they don't call this for statuses
3311 Extensions should also make sure they don't call this for statuses
3313 that don't involve the dirstate.
3312 that don't involve the dirstate.
3314 """
3313 """
3315
3314
3316 # The list is located here for uniqueness reasons -- it is actually
3315 # The list is located here for uniqueness reasons -- it is actually
3317 # managed by the workingctx, but that isn't unique per-repo.
3316 # managed by the workingctx, but that isn't unique per-repo.
3318 self._postdsstatus.append(ps)
3317 self._postdsstatus.append(ps)
3319
3318
3320 def postdsstatus(self):
3319 def postdsstatus(self):
3321 """Used by workingctx to get the list of post-dirstate-status hooks."""
3320 """Used by workingctx to get the list of post-dirstate-status hooks."""
3322 return self._postdsstatus
3321 return self._postdsstatus
3323
3322
3324 def clearpostdsstatus(self):
3323 def clearpostdsstatus(self):
3325 """Used by workingctx to clear post-dirstate-status hooks."""
3324 """Used by workingctx to clear post-dirstate-status hooks."""
3326 del self._postdsstatus[:]
3325 del self._postdsstatus[:]
3327
3326
3328 def heads(self, start=None):
3327 def heads(self, start=None):
3329 if start is None:
3328 if start is None:
3330 cl = self.changelog
3329 cl = self.changelog
3331 headrevs = reversed(cl.headrevs())
3330 headrevs = reversed(cl.headrevs())
3332 return [cl.node(rev) for rev in headrevs]
3331 return [cl.node(rev) for rev in headrevs]
3333
3332
3334 heads = self.changelog.heads(start)
3333 heads = self.changelog.heads(start)
3335 # sort the output in rev descending order
3334 # sort the output in rev descending order
3336 return sorted(heads, key=self.changelog.rev, reverse=True)
3335 return sorted(heads, key=self.changelog.rev, reverse=True)
3337
3336
3338 def branchheads(self, branch=None, start=None, closed=False):
3337 def branchheads(self, branch=None, start=None, closed=False):
3339 """return a (possibly filtered) list of heads for the given branch
3338 """return a (possibly filtered) list of heads for the given branch
3340
3339
3341 Heads are returned in topological order, from newest to oldest.
3340 Heads are returned in topological order, from newest to oldest.
3342 If branch is None, use the dirstate branch.
3341 If branch is None, use the dirstate branch.
3343 If start is not None, return only heads reachable from start.
3342 If start is not None, return only heads reachable from start.
3344 If closed is True, return heads that are marked as closed as well.
3343 If closed is True, return heads that are marked as closed as well.
3345 """
3344 """
3346 if branch is None:
3345 if branch is None:
3347 branch = self[None].branch()
3346 branch = self[None].branch()
3348 branches = self.branchmap()
3347 branches = self.branchmap()
3349 if not branches.hasbranch(branch):
3348 if not branches.hasbranch(branch):
3350 return []
3349 return []
3351 # the cache returns heads ordered lowest to highest
3350 # the cache returns heads ordered lowest to highest
3352 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3351 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3353 if start is not None:
3352 if start is not None:
3354 # filter out the heads that cannot be reached from startrev
3353 # filter out the heads that cannot be reached from startrev
3355 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3354 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3356 bheads = [h for h in bheads if h in fbheads]
3355 bheads = [h for h in bheads if h in fbheads]
3357 return bheads
3356 return bheads
3358
3357
3359 def branches(self, nodes):
3358 def branches(self, nodes):
3360 if not nodes:
3359 if not nodes:
3361 nodes = [self.changelog.tip()]
3360 nodes = [self.changelog.tip()]
3362 b = []
3361 b = []
3363 for n in nodes:
3362 for n in nodes:
3364 t = n
3363 t = n
3365 while True:
3364 while True:
3366 p = self.changelog.parents(n)
3365 p = self.changelog.parents(n)
3367 if p[1] != self.nullid or p[0] == self.nullid:
3366 if p[1] != self.nullid or p[0] == self.nullid:
3368 b.append((t, n, p[0], p[1]))
3367 b.append((t, n, p[0], p[1]))
3369 break
3368 break
3370 n = p[0]
3369 n = p[0]
3371 return b
3370 return b
3372
3371
3373 def between(self, pairs):
3372 def between(self, pairs):
3374 r = []
3373 r = []
3375
3374
3376 for top, bottom in pairs:
3375 for top, bottom in pairs:
3377 n, l, i = top, [], 0
3376 n, l, i = top, [], 0
3378 f = 1
3377 f = 1
3379
3378
3380 while n != bottom and n != self.nullid:
3379 while n != bottom and n != self.nullid:
3381 p = self.changelog.parents(n)[0]
3380 p = self.changelog.parents(n)[0]
3382 if i == f:
3381 if i == f:
3383 l.append(n)
3382 l.append(n)
3384 f = f * 2
3383 f = f * 2
3385 n = p
3384 n = p
3386 i += 1
3385 i += 1
3387
3386
3388 r.append(l)
3387 r.append(l)
3389
3388
3390 return r
3389 return r
3391
3390
3392 def checkpush(self, pushop):
3391 def checkpush(self, pushop):
3393 """Extensions can override this function if additional checks have
3392 """Extensions can override this function if additional checks have
3394 to be performed before pushing, or call it if they override push
3393 to be performed before pushing, or call it if they override push
3395 command.
3394 command.
3396 """
3395 """
3397
3396
3398 @unfilteredpropertycache
3397 @unfilteredpropertycache
3399 def prepushoutgoinghooks(self):
3398 def prepushoutgoinghooks(self):
3400 """Return util.hooks consists of a pushop with repo, remote, outgoing
3399 """Return util.hooks consists of a pushop with repo, remote, outgoing
3401 methods, which are called before pushing changesets.
3400 methods, which are called before pushing changesets.
3402 """
3401 """
3403 return util.hooks()
3402 return util.hooks()
3404
3403
3405 def pushkey(self, namespace, key, old, new):
3404 def pushkey(self, namespace, key, old, new):
3406 try:
3405 try:
3407 tr = self.currenttransaction()
3406 tr = self.currenttransaction()
3408 hookargs = {}
3407 hookargs = {}
3409 if tr is not None:
3408 if tr is not None:
3410 hookargs.update(tr.hookargs)
3409 hookargs.update(tr.hookargs)
3411 hookargs = pycompat.strkwargs(hookargs)
3410 hookargs = pycompat.strkwargs(hookargs)
3412 hookargs['namespace'] = namespace
3411 hookargs['namespace'] = namespace
3413 hookargs['key'] = key
3412 hookargs['key'] = key
3414 hookargs['old'] = old
3413 hookargs['old'] = old
3415 hookargs['new'] = new
3414 hookargs['new'] = new
3416 self.hook(b'prepushkey', throw=True, **hookargs)
3415 self.hook(b'prepushkey', throw=True, **hookargs)
3417 except error.HookAbort as exc:
3416 except error.HookAbort as exc:
3418 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3417 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3419 if exc.hint:
3418 if exc.hint:
3420 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3419 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3421 return False
3420 return False
3422 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3421 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3423 ret = pushkey.push(self, namespace, key, old, new)
3422 ret = pushkey.push(self, namespace, key, old, new)
3424
3423
3425 def runhook(unused_success):
3424 def runhook(unused_success):
3426 self.hook(
3425 self.hook(
3427 b'pushkey',
3426 b'pushkey',
3428 namespace=namespace,
3427 namespace=namespace,
3429 key=key,
3428 key=key,
3430 old=old,
3429 old=old,
3431 new=new,
3430 new=new,
3432 ret=ret,
3431 ret=ret,
3433 )
3432 )
3434
3433
3435 self._afterlock(runhook)
3434 self._afterlock(runhook)
3436 return ret
3435 return ret
3437
3436
3438 def listkeys(self, namespace):
3437 def listkeys(self, namespace):
3439 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3438 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3440 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3439 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3441 values = pushkey.list(self, namespace)
3440 values = pushkey.list(self, namespace)
3442 self.hook(b'listkeys', namespace=namespace, values=values)
3441 self.hook(b'listkeys', namespace=namespace, values=values)
3443 return values
3442 return values
3444
3443
3445 def debugwireargs(self, one, two, three=None, four=None, five=None):
3444 def debugwireargs(self, one, two, three=None, four=None, five=None):
3446 '''used to test argument passing over the wire'''
3445 '''used to test argument passing over the wire'''
3447 return b"%s %s %s %s %s" % (
3446 return b"%s %s %s %s %s" % (
3448 one,
3447 one,
3449 two,
3448 two,
3450 pycompat.bytestr(three),
3449 pycompat.bytestr(three),
3451 pycompat.bytestr(four),
3450 pycompat.bytestr(four),
3452 pycompat.bytestr(five),
3451 pycompat.bytestr(five),
3453 )
3452 )
3454
3453
3455 def savecommitmessage(self, text):
3454 def savecommitmessage(self, text):
3456 fp = self.vfs(b'last-message.txt', b'wb')
3455 fp = self.vfs(b'last-message.txt', b'wb')
3457 try:
3456 try:
3458 fp.write(text)
3457 fp.write(text)
3459 finally:
3458 finally:
3460 fp.close()
3459 fp.close()
3461 return self.pathto(fp.name[len(self.root) + 1 :])
3460 return self.pathto(fp.name[len(self.root) + 1 :])
3462
3461
3463 def register_wanted_sidedata(self, category):
3462 def register_wanted_sidedata(self, category):
3464 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3463 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3465 # Only revlogv2 repos can want sidedata.
3464 # Only revlogv2 repos can want sidedata.
3466 return
3465 return
3467 self._wanted_sidedata.add(pycompat.bytestr(category))
3466 self._wanted_sidedata.add(pycompat.bytestr(category))
3468
3467
3469 def register_sidedata_computer(
3468 def register_sidedata_computer(
3470 self, kind, category, keys, computer, flags, replace=False
3469 self, kind, category, keys, computer, flags, replace=False
3471 ):
3470 ):
3472 if kind not in revlogconst.ALL_KINDS:
3471 if kind not in revlogconst.ALL_KINDS:
3473 msg = _(b"unexpected revlog kind '%s'.")
3472 msg = _(b"unexpected revlog kind '%s'.")
3474 raise error.ProgrammingError(msg % kind)
3473 raise error.ProgrammingError(msg % kind)
3475 category = pycompat.bytestr(category)
3474 category = pycompat.bytestr(category)
3476 already_registered = category in self._sidedata_computers.get(kind, [])
3475 already_registered = category in self._sidedata_computers.get(kind, [])
3477 if already_registered and not replace:
3476 if already_registered and not replace:
3478 msg = _(
3477 msg = _(
3479 b"cannot register a sidedata computer twice for category '%s'."
3478 b"cannot register a sidedata computer twice for category '%s'."
3480 )
3479 )
3481 raise error.ProgrammingError(msg % category)
3480 raise error.ProgrammingError(msg % category)
3482 if replace and not already_registered:
3481 if replace and not already_registered:
3483 msg = _(
3482 msg = _(
3484 b"cannot replace a sidedata computer that isn't registered "
3483 b"cannot replace a sidedata computer that isn't registered "
3485 b"for category '%s'."
3484 b"for category '%s'."
3486 )
3485 )
3487 raise error.ProgrammingError(msg % category)
3486 raise error.ProgrammingError(msg % category)
3488 self._sidedata_computers.setdefault(kind, {})
3487 self._sidedata_computers.setdefault(kind, {})
3489 self._sidedata_computers[kind][category] = (keys, computer, flags)
3488 self._sidedata_computers[kind][category] = (keys, computer, flags)
3490
3489
3491
3490
3492 # used to avoid circular references so destructors work
3491 # used to avoid circular references so destructors work
3493 def aftertrans(files):
3492 def aftertrans(files):
3494 renamefiles = [tuple(t) for t in files]
3493 renamefiles = [tuple(t) for t in files]
3495
3494
3496 def a():
3495 def a():
3497 for vfs, src, dest in renamefiles:
3496 for vfs, src, dest in renamefiles:
3498 # if src and dest refer to a same file, vfs.rename is a no-op,
3497 # if src and dest refer to a same file, vfs.rename is a no-op,
3499 # leaving both src and dest on disk. delete dest to make sure
3498 # leaving both src and dest on disk. delete dest to make sure
3500 # the rename couldn't be such a no-op.
3499 # the rename couldn't be such a no-op.
3501 vfs.tryunlink(dest)
3500 vfs.tryunlink(dest)
3502 try:
3501 try:
3503 vfs.rename(src, dest)
3502 vfs.rename(src, dest)
3504 except FileNotFoundError: # journal file does not yet exist
3503 except FileNotFoundError: # journal file does not yet exist
3505 pass
3504 pass
3506
3505
3507 return a
3506 return a
3508
3507
3509
3508
3510 def undoname(fn):
3509 def undoname(fn):
3511 base, name = os.path.split(fn)
3510 base, name = os.path.split(fn)
3512 assert name.startswith(b'journal')
3511 assert name.startswith(b'journal')
3513 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3512 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3514
3513
3515
3514
3516 def instance(ui, path, create, intents=None, createopts=None):
3515 def instance(ui, path, create, intents=None, createopts=None):
3517
3516
3518 # prevent cyclic import localrepo -> upgrade -> localrepo
3517 # prevent cyclic import localrepo -> upgrade -> localrepo
3519 from . import upgrade
3518 from . import upgrade
3520
3519
3521 localpath = urlutil.urllocalpath(path)
3520 localpath = urlutil.urllocalpath(path)
3522 if create:
3521 if create:
3523 createrepository(ui, localpath, createopts=createopts)
3522 createrepository(ui, localpath, createopts=createopts)
3524
3523
3525 def repo_maker():
3524 def repo_maker():
3526 return makelocalrepository(ui, localpath, intents=intents)
3525 return makelocalrepository(ui, localpath, intents=intents)
3527
3526
3528 repo = repo_maker()
3527 repo = repo_maker()
3529 repo = upgrade.may_auto_upgrade(repo, repo_maker)
3528 repo = upgrade.may_auto_upgrade(repo, repo_maker)
3530 return repo
3529 return repo
3531
3530
3532
3531
3533 def islocal(path):
3532 def islocal(path):
3534 return True
3533 return True
3535
3534
3536
3535
3537 def defaultcreateopts(ui, createopts=None):
3536 def defaultcreateopts(ui, createopts=None):
3538 """Populate the default creation options for a repository.
3537 """Populate the default creation options for a repository.
3539
3538
3540 A dictionary of explicitly requested creation options can be passed
3539 A dictionary of explicitly requested creation options can be passed
3541 in. Missing keys will be populated.
3540 in. Missing keys will be populated.
3542 """
3541 """
3543 createopts = dict(createopts or {})
3542 createopts = dict(createopts or {})
3544
3543
3545 if b'backend' not in createopts:
3544 if b'backend' not in createopts:
3546 # experimental config: storage.new-repo-backend
3545 # experimental config: storage.new-repo-backend
3547 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3546 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3548
3547
3549 return createopts
3548 return createopts
3550
3549
3551
3550
3552 def clone_requirements(ui, createopts, srcrepo):
3551 def clone_requirements(ui, createopts, srcrepo):
3553 """clone the requirements of a local repo for a local clone
3552 """clone the requirements of a local repo for a local clone
3554
3553
3555 The store requirements are unchanged while the working copy requirements
3554 The store requirements are unchanged while the working copy requirements
3556 depends on the configuration
3555 depends on the configuration
3557 """
3556 """
3558 target_requirements = set()
3557 target_requirements = set()
3559 if not srcrepo.requirements:
3558 if not srcrepo.requirements:
3560 # this is a legacy revlog "v0" repository, we cannot do anything fancy
3559 # this is a legacy revlog "v0" repository, we cannot do anything fancy
3561 # with it.
3560 # with it.
3562 return target_requirements
3561 return target_requirements
3563 createopts = defaultcreateopts(ui, createopts=createopts)
3562 createopts = defaultcreateopts(ui, createopts=createopts)
3564 for r in newreporequirements(ui, createopts):
3563 for r in newreporequirements(ui, createopts):
3565 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3564 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3566 target_requirements.add(r)
3565 target_requirements.add(r)
3567
3566
3568 for r in srcrepo.requirements:
3567 for r in srcrepo.requirements:
3569 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3568 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3570 target_requirements.add(r)
3569 target_requirements.add(r)
3571 return target_requirements
3570 return target_requirements
3572
3571
3573
3572
3574 def newreporequirements(ui, createopts):
3573 def newreporequirements(ui, createopts):
3575 """Determine the set of requirements for a new local repository.
3574 """Determine the set of requirements for a new local repository.
3576
3575
3577 Extensions can wrap this function to specify custom requirements for
3576 Extensions can wrap this function to specify custom requirements for
3578 new repositories.
3577 new repositories.
3579 """
3578 """
3580
3579
3581 if b'backend' not in createopts:
3580 if b'backend' not in createopts:
3582 raise error.ProgrammingError(
3581 raise error.ProgrammingError(
3583 b'backend key not present in createopts; '
3582 b'backend key not present in createopts; '
3584 b'was defaultcreateopts() called?'
3583 b'was defaultcreateopts() called?'
3585 )
3584 )
3586
3585
3587 if createopts[b'backend'] != b'revlogv1':
3586 if createopts[b'backend'] != b'revlogv1':
3588 raise error.Abort(
3587 raise error.Abort(
3589 _(
3588 _(
3590 b'unable to determine repository requirements for '
3589 b'unable to determine repository requirements for '
3591 b'storage backend: %s'
3590 b'storage backend: %s'
3592 )
3591 )
3593 % createopts[b'backend']
3592 % createopts[b'backend']
3594 )
3593 )
3595
3594
3596 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3595 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3597 if ui.configbool(b'format', b'usestore'):
3596 if ui.configbool(b'format', b'usestore'):
3598 requirements.add(requirementsmod.STORE_REQUIREMENT)
3597 requirements.add(requirementsmod.STORE_REQUIREMENT)
3599 if ui.configbool(b'format', b'usefncache'):
3598 if ui.configbool(b'format', b'usefncache'):
3600 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3599 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3601 if ui.configbool(b'format', b'dotencode'):
3600 if ui.configbool(b'format', b'dotencode'):
3602 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3601 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3603
3602
3604 compengines = ui.configlist(b'format', b'revlog-compression')
3603 compengines = ui.configlist(b'format', b'revlog-compression')
3605 for compengine in compengines:
3604 for compengine in compengines:
3606 if compengine in util.compengines:
3605 if compengine in util.compengines:
3607 engine = util.compengines[compengine]
3606 engine = util.compengines[compengine]
3608 if engine.available() and engine.revlogheader():
3607 if engine.available() and engine.revlogheader():
3609 break
3608 break
3610 else:
3609 else:
3611 raise error.Abort(
3610 raise error.Abort(
3612 _(
3611 _(
3613 b'compression engines %s defined by '
3612 b'compression engines %s defined by '
3614 b'format.revlog-compression not available'
3613 b'format.revlog-compression not available'
3615 )
3614 )
3616 % b', '.join(b'"%s"' % e for e in compengines),
3615 % b', '.join(b'"%s"' % e for e in compengines),
3617 hint=_(
3616 hint=_(
3618 b'run "hg debuginstall" to list available '
3617 b'run "hg debuginstall" to list available '
3619 b'compression engines'
3618 b'compression engines'
3620 ),
3619 ),
3621 )
3620 )
3622
3621
3623 # zlib is the historical default and doesn't need an explicit requirement.
3622 # zlib is the historical default and doesn't need an explicit requirement.
3624 if compengine == b'zstd':
3623 if compengine == b'zstd':
3625 requirements.add(b'revlog-compression-zstd')
3624 requirements.add(b'revlog-compression-zstd')
3626 elif compengine != b'zlib':
3625 elif compengine != b'zlib':
3627 requirements.add(b'exp-compression-%s' % compengine)
3626 requirements.add(b'exp-compression-%s' % compengine)
3628
3627
3629 if scmutil.gdinitconfig(ui):
3628 if scmutil.gdinitconfig(ui):
3630 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3629 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3631 if ui.configbool(b'format', b'sparse-revlog'):
3630 if ui.configbool(b'format', b'sparse-revlog'):
3632 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3631 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3633
3632
3634 # experimental config: format.use-dirstate-v2
3633 # experimental config: format.use-dirstate-v2
3635 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3634 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3636 if ui.configbool(b'format', b'use-dirstate-v2'):
3635 if ui.configbool(b'format', b'use-dirstate-v2'):
3637 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3636 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3638
3637
3639 # experimental config: format.exp-use-copies-side-data-changeset
3638 # experimental config: format.exp-use-copies-side-data-changeset
3640 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3639 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3641 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3640 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3642 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3641 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3643 if ui.configbool(b'experimental', b'treemanifest'):
3642 if ui.configbool(b'experimental', b'treemanifest'):
3644 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3643 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3645
3644
3646 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3645 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3647 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3646 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3648 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3647 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3649
3648
3650 revlogv2 = ui.config(b'experimental', b'revlogv2')
3649 revlogv2 = ui.config(b'experimental', b'revlogv2')
3651 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3650 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3652 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3651 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3653 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3652 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3654 # experimental config: format.internal-phase
3653 # experimental config: format.internal-phase
3655 if ui.configbool(b'format', b'internal-phase'):
3654 if ui.configbool(b'format', b'internal-phase'):
3656 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3655 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3657
3656
3658 if createopts.get(b'narrowfiles'):
3657 if createopts.get(b'narrowfiles'):
3659 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3658 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3660
3659
3661 if createopts.get(b'lfs'):
3660 if createopts.get(b'lfs'):
3662 requirements.add(b'lfs')
3661 requirements.add(b'lfs')
3663
3662
3664 if ui.configbool(b'format', b'bookmarks-in-store'):
3663 if ui.configbool(b'format', b'bookmarks-in-store'):
3665 requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3664 requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3666
3665
3667 if ui.configbool(b'format', b'use-persistent-nodemap'):
3666 if ui.configbool(b'format', b'use-persistent-nodemap'):
3668 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3667 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3669
3668
3670 # if share-safe is enabled, let's create the new repository with the new
3669 # if share-safe is enabled, let's create the new repository with the new
3671 # requirement
3670 # requirement
3672 if ui.configbool(b'format', b'use-share-safe'):
3671 if ui.configbool(b'format', b'use-share-safe'):
3673 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3672 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3674
3673
3675 # if we are creating a share-repoΒΉ we have to handle requirement
3674 # if we are creating a share-repoΒΉ we have to handle requirement
3676 # differently.
3675 # differently.
3677 #
3676 #
3678 # [1] (i.e. reusing the store from another repository, just having a
3677 # [1] (i.e. reusing the store from another repository, just having a
3679 # working copy)
3678 # working copy)
3680 if b'sharedrepo' in createopts:
3679 if b'sharedrepo' in createopts:
3681 source_requirements = set(createopts[b'sharedrepo'].requirements)
3680 source_requirements = set(createopts[b'sharedrepo'].requirements)
3682
3681
3683 if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements:
3682 if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements:
3684 # share to an old school repository, we have to copy the
3683 # share to an old school repository, we have to copy the
3685 # requirements and hope for the best.
3684 # requirements and hope for the best.
3686 requirements = source_requirements
3685 requirements = source_requirements
3687 else:
3686 else:
3688 # We have control on the working copy only, so "copy" the non
3687 # We have control on the working copy only, so "copy" the non
3689 # working copy part over, ignoring previous logic.
3688 # working copy part over, ignoring previous logic.
3690 to_drop = set()
3689 to_drop = set()
3691 for req in requirements:
3690 for req in requirements:
3692 if req in requirementsmod.WORKING_DIR_REQUIREMENTS:
3691 if req in requirementsmod.WORKING_DIR_REQUIREMENTS:
3693 continue
3692 continue
3694 if req in source_requirements:
3693 if req in source_requirements:
3695 continue
3694 continue
3696 to_drop.add(req)
3695 to_drop.add(req)
3697 requirements -= to_drop
3696 requirements -= to_drop
3698 requirements |= source_requirements
3697 requirements |= source_requirements
3699
3698
3700 if createopts.get(b'sharedrelative'):
3699 if createopts.get(b'sharedrelative'):
3701 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3700 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3702 else:
3701 else:
3703 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3702 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3704
3703
3705 if ui.configbool(b'format', b'use-dirstate-tracked-hint'):
3704 if ui.configbool(b'format', b'use-dirstate-tracked-hint'):
3706 version = ui.configint(b'format', b'use-dirstate-tracked-hint.version')
3705 version = ui.configint(b'format', b'use-dirstate-tracked-hint.version')
3707 msg = _("ignoring unknown tracked key version: %d\n")
3706 msg = _("ignoring unknown tracked key version: %d\n")
3708 hint = _("see `hg help config.format.use-dirstate-tracked-hint-version")
3707 hint = _("see `hg help config.format.use-dirstate-tracked-hint-version")
3709 if version != 1:
3708 if version != 1:
3710 ui.warn(msg % version, hint=hint)
3709 ui.warn(msg % version, hint=hint)
3711 else:
3710 else:
3712 requirements.add(requirementsmod.DIRSTATE_TRACKED_HINT_V1)
3711 requirements.add(requirementsmod.DIRSTATE_TRACKED_HINT_V1)
3713
3712
3714 return requirements
3713 return requirements
3715
3714
3716
3715
3717 def checkrequirementscompat(ui, requirements):
3716 def checkrequirementscompat(ui, requirements):
3718 """Checks compatibility of repository requirements enabled and disabled.
3717 """Checks compatibility of repository requirements enabled and disabled.
3719
3718
3720 Returns a set of requirements which needs to be dropped because dependend
3719 Returns a set of requirements which needs to be dropped because dependend
3721 requirements are not enabled. Also warns users about it"""
3720 requirements are not enabled. Also warns users about it"""
3722
3721
3723 dropped = set()
3722 dropped = set()
3724
3723
3725 if requirementsmod.STORE_REQUIREMENT not in requirements:
3724 if requirementsmod.STORE_REQUIREMENT not in requirements:
3726 if requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3725 if requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3727 ui.warn(
3726 ui.warn(
3728 _(
3727 _(
3729 b'ignoring enabled \'format.bookmarks-in-store\' config '
3728 b'ignoring enabled \'format.bookmarks-in-store\' config '
3730 b'beacuse it is incompatible with disabled '
3729 b'beacuse it is incompatible with disabled '
3731 b'\'format.usestore\' config\n'
3730 b'\'format.usestore\' config\n'
3732 )
3731 )
3733 )
3732 )
3734 dropped.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3733 dropped.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3735
3734
3736 if (
3735 if (
3737 requirementsmod.SHARED_REQUIREMENT in requirements
3736 requirementsmod.SHARED_REQUIREMENT in requirements
3738 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3737 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3739 ):
3738 ):
3740 raise error.Abort(
3739 raise error.Abort(
3741 _(
3740 _(
3742 b"cannot create shared repository as source was created"
3741 b"cannot create shared repository as source was created"
3743 b" with 'format.usestore' config disabled"
3742 b" with 'format.usestore' config disabled"
3744 )
3743 )
3745 )
3744 )
3746
3745
3747 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3746 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3748 if ui.hasconfig(b'format', b'use-share-safe'):
3747 if ui.hasconfig(b'format', b'use-share-safe'):
3749 msg = _(
3748 msg = _(
3750 b"ignoring enabled 'format.use-share-safe' config because "
3749 b"ignoring enabled 'format.use-share-safe' config because "
3751 b"it is incompatible with disabled 'format.usestore'"
3750 b"it is incompatible with disabled 'format.usestore'"
3752 b" config\n"
3751 b" config\n"
3753 )
3752 )
3754 ui.warn(msg)
3753 ui.warn(msg)
3755 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3754 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3756
3755
3757 return dropped
3756 return dropped
3758
3757
3759
3758
3760 def filterknowncreateopts(ui, createopts):
3759 def filterknowncreateopts(ui, createopts):
3761 """Filters a dict of repo creation options against options that are known.
3760 """Filters a dict of repo creation options against options that are known.
3762
3761
3763 Receives a dict of repo creation options and returns a dict of those
3762 Receives a dict of repo creation options and returns a dict of those
3764 options that we don't know how to handle.
3763 options that we don't know how to handle.
3765
3764
3766 This function is called as part of repository creation. If the
3765 This function is called as part of repository creation. If the
3767 returned dict contains any items, repository creation will not
3766 returned dict contains any items, repository creation will not
3768 be allowed, as it means there was a request to create a repository
3767 be allowed, as it means there was a request to create a repository
3769 with options not recognized by loaded code.
3768 with options not recognized by loaded code.
3770
3769
3771 Extensions can wrap this function to filter out creation options
3770 Extensions can wrap this function to filter out creation options
3772 they know how to handle.
3771 they know how to handle.
3773 """
3772 """
3774 known = {
3773 known = {
3775 b'backend',
3774 b'backend',
3776 b'lfs',
3775 b'lfs',
3777 b'narrowfiles',
3776 b'narrowfiles',
3778 b'sharedrepo',
3777 b'sharedrepo',
3779 b'sharedrelative',
3778 b'sharedrelative',
3780 b'shareditems',
3779 b'shareditems',
3781 b'shallowfilestore',
3780 b'shallowfilestore',
3782 }
3781 }
3783
3782
3784 return {k: v for k, v in createopts.items() if k not in known}
3783 return {k: v for k, v in createopts.items() if k not in known}
3785
3784
3786
3785
3787 def createrepository(ui, path, createopts=None, requirements=None):
3786 def createrepository(ui, path, createopts=None, requirements=None):
3788 """Create a new repository in a vfs.
3787 """Create a new repository in a vfs.
3789
3788
3790 ``path`` path to the new repo's working directory.
3789 ``path`` path to the new repo's working directory.
3791 ``createopts`` options for the new repository.
3790 ``createopts`` options for the new repository.
3792 ``requirement`` predefined set of requirements.
3791 ``requirement`` predefined set of requirements.
3793 (incompatible with ``createopts``)
3792 (incompatible with ``createopts``)
3794
3793
3795 The following keys for ``createopts`` are recognized:
3794 The following keys for ``createopts`` are recognized:
3796
3795
3797 backend
3796 backend
3798 The storage backend to use.
3797 The storage backend to use.
3799 lfs
3798 lfs
3800 Repository will be created with ``lfs`` requirement. The lfs extension
3799 Repository will be created with ``lfs`` requirement. The lfs extension
3801 will automatically be loaded when the repository is accessed.
3800 will automatically be loaded when the repository is accessed.
3802 narrowfiles
3801 narrowfiles
3803 Set up repository to support narrow file storage.
3802 Set up repository to support narrow file storage.
3804 sharedrepo
3803 sharedrepo
3805 Repository object from which storage should be shared.
3804 Repository object from which storage should be shared.
3806 sharedrelative
3805 sharedrelative
3807 Boolean indicating if the path to the shared repo should be
3806 Boolean indicating if the path to the shared repo should be
3808 stored as relative. By default, the pointer to the "parent" repo
3807 stored as relative. By default, the pointer to the "parent" repo
3809 is stored as an absolute path.
3808 is stored as an absolute path.
3810 shareditems
3809 shareditems
3811 Set of items to share to the new repository (in addition to storage).
3810 Set of items to share to the new repository (in addition to storage).
3812 shallowfilestore
3811 shallowfilestore
3813 Indicates that storage for files should be shallow (not all ancestor
3812 Indicates that storage for files should be shallow (not all ancestor
3814 revisions are known).
3813 revisions are known).
3815 """
3814 """
3816
3815
3817 if requirements is not None:
3816 if requirements is not None:
3818 if createopts is not None:
3817 if createopts is not None:
3819 msg = b'cannot specify both createopts and requirements'
3818 msg = b'cannot specify both createopts and requirements'
3820 raise error.ProgrammingError(msg)
3819 raise error.ProgrammingError(msg)
3821 createopts = {}
3820 createopts = {}
3822 else:
3821 else:
3823 createopts = defaultcreateopts(ui, createopts=createopts)
3822 createopts = defaultcreateopts(ui, createopts=createopts)
3824
3823
3825 unknownopts = filterknowncreateopts(ui, createopts)
3824 unknownopts = filterknowncreateopts(ui, createopts)
3826
3825
3827 if not isinstance(unknownopts, dict):
3826 if not isinstance(unknownopts, dict):
3828 raise error.ProgrammingError(
3827 raise error.ProgrammingError(
3829 b'filterknowncreateopts() did not return a dict'
3828 b'filterknowncreateopts() did not return a dict'
3830 )
3829 )
3831
3830
3832 if unknownopts:
3831 if unknownopts:
3833 raise error.Abort(
3832 raise error.Abort(
3834 _(
3833 _(
3835 b'unable to create repository because of unknown '
3834 b'unable to create repository because of unknown '
3836 b'creation option: %s'
3835 b'creation option: %s'
3837 )
3836 )
3838 % b', '.join(sorted(unknownopts)),
3837 % b', '.join(sorted(unknownopts)),
3839 hint=_(b'is a required extension not loaded?'),
3838 hint=_(b'is a required extension not loaded?'),
3840 )
3839 )
3841
3840
3842 requirements = newreporequirements(ui, createopts=createopts)
3841 requirements = newreporequirements(ui, createopts=createopts)
3843 requirements -= checkrequirementscompat(ui, requirements)
3842 requirements -= checkrequirementscompat(ui, requirements)
3844
3843
3845 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3844 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3846
3845
3847 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3846 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3848 if hgvfs.exists():
3847 if hgvfs.exists():
3849 raise error.RepoError(_(b'repository %s already exists') % path)
3848 raise error.RepoError(_(b'repository %s already exists') % path)
3850
3849
3851 if b'sharedrepo' in createopts:
3850 if b'sharedrepo' in createopts:
3852 sharedpath = createopts[b'sharedrepo'].sharedpath
3851 sharedpath = createopts[b'sharedrepo'].sharedpath
3853
3852
3854 if createopts.get(b'sharedrelative'):
3853 if createopts.get(b'sharedrelative'):
3855 try:
3854 try:
3856 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3855 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3857 sharedpath = util.pconvert(sharedpath)
3856 sharedpath = util.pconvert(sharedpath)
3858 except (IOError, ValueError) as e:
3857 except (IOError, ValueError) as e:
3859 # ValueError is raised on Windows if the drive letters differ
3858 # ValueError is raised on Windows if the drive letters differ
3860 # on each path.
3859 # on each path.
3861 raise error.Abort(
3860 raise error.Abort(
3862 _(b'cannot calculate relative path'),
3861 _(b'cannot calculate relative path'),
3863 hint=stringutil.forcebytestr(e),
3862 hint=stringutil.forcebytestr(e),
3864 )
3863 )
3865
3864
3866 if not wdirvfs.exists():
3865 if not wdirvfs.exists():
3867 wdirvfs.makedirs()
3866 wdirvfs.makedirs()
3868
3867
3869 hgvfs.makedir(notindexed=True)
3868 hgvfs.makedir(notindexed=True)
3870 if b'sharedrepo' not in createopts:
3869 if b'sharedrepo' not in createopts:
3871 hgvfs.mkdir(b'cache')
3870 hgvfs.mkdir(b'cache')
3872 hgvfs.mkdir(b'wcache')
3871 hgvfs.mkdir(b'wcache')
3873
3872
3874 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3873 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3875 if has_store and b'sharedrepo' not in createopts:
3874 if has_store and b'sharedrepo' not in createopts:
3876 hgvfs.mkdir(b'store')
3875 hgvfs.mkdir(b'store')
3877
3876
3878 # We create an invalid changelog outside the store so very old
3877 # We create an invalid changelog outside the store so very old
3879 # Mercurial versions (which didn't know about the requirements
3878 # Mercurial versions (which didn't know about the requirements
3880 # file) encounter an error on reading the changelog. This
3879 # file) encounter an error on reading the changelog. This
3881 # effectively locks out old clients and prevents them from
3880 # effectively locks out old clients and prevents them from
3882 # mucking with a repo in an unknown format.
3881 # mucking with a repo in an unknown format.
3883 #
3882 #
3884 # The revlog header has version 65535, which won't be recognized by
3883 # The revlog header has version 65535, which won't be recognized by
3885 # such old clients.
3884 # such old clients.
3886 hgvfs.append(
3885 hgvfs.append(
3887 b'00changelog.i',
3886 b'00changelog.i',
3888 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3887 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3889 b'layout',
3888 b'layout',
3890 )
3889 )
3891
3890
3892 # Filter the requirements into working copy and store ones
3891 # Filter the requirements into working copy and store ones
3893 wcreq, storereq = scmutil.filterrequirements(requirements)
3892 wcreq, storereq = scmutil.filterrequirements(requirements)
3894 # write working copy ones
3893 # write working copy ones
3895 scmutil.writerequires(hgvfs, wcreq)
3894 scmutil.writerequires(hgvfs, wcreq)
3896 # If there are store requirements and the current repository
3895 # If there are store requirements and the current repository
3897 # is not a shared one, write stored requirements
3896 # is not a shared one, write stored requirements
3898 # For new shared repository, we don't need to write the store
3897 # For new shared repository, we don't need to write the store
3899 # requirements as they are already present in store requires
3898 # requirements as they are already present in store requires
3900 if storereq and b'sharedrepo' not in createopts:
3899 if storereq and b'sharedrepo' not in createopts:
3901 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3900 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3902 scmutil.writerequires(storevfs, storereq)
3901 scmutil.writerequires(storevfs, storereq)
3903
3902
3904 # Write out file telling readers where to find the shared store.
3903 # Write out file telling readers where to find the shared store.
3905 if b'sharedrepo' in createopts:
3904 if b'sharedrepo' in createopts:
3906 hgvfs.write(b'sharedpath', sharedpath)
3905 hgvfs.write(b'sharedpath', sharedpath)
3907
3906
3908 if createopts.get(b'shareditems'):
3907 if createopts.get(b'shareditems'):
3909 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3908 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3910 hgvfs.write(b'shared', shared)
3909 hgvfs.write(b'shared', shared)
3911
3910
3912
3911
3913 def poisonrepository(repo):
3912 def poisonrepository(repo):
3914 """Poison a repository instance so it can no longer be used."""
3913 """Poison a repository instance so it can no longer be used."""
3915 # Perform any cleanup on the instance.
3914 # Perform any cleanup on the instance.
3916 repo.close()
3915 repo.close()
3917
3916
3918 # Our strategy is to replace the type of the object with one that
3917 # Our strategy is to replace the type of the object with one that
3919 # has all attribute lookups result in error.
3918 # has all attribute lookups result in error.
3920 #
3919 #
3921 # But we have to allow the close() method because some constructors
3920 # But we have to allow the close() method because some constructors
3922 # of repos call close() on repo references.
3921 # of repos call close() on repo references.
3923 class poisonedrepository:
3922 class poisonedrepository:
3924 def __getattribute__(self, item):
3923 def __getattribute__(self, item):
3925 if item == 'close':
3924 if item == 'close':
3926 return object.__getattribute__(self, item)
3925 return object.__getattribute__(self, item)
3927
3926
3928 raise error.ProgrammingError(
3927 raise error.ProgrammingError(
3929 b'repo instances should not be used after unshare'
3928 b'repo instances should not be used after unshare'
3930 )
3929 )
3931
3930
3932 def close(self):
3931 def close(self):
3933 pass
3932 pass
3934
3933
3935 # We may have a repoview, which intercepts __setattr__. So be sure
3934 # We may have a repoview, which intercepts __setattr__. So be sure
3936 # we operate at the lowest level possible.
3935 # we operate at the lowest level possible.
3937 object.__setattr__(repo, '__class__', poisonedrepository)
3936 object.__setattr__(repo, '__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now